diff --git a/.ci/bwcVersions b/.ci/bwcVersions index e6b7df3ac4a25..f220567e883f8 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -57,6 +57,7 @@ BWC_VERSION: - "1.3.18" - "1.3.19" - "1.3.20" + - "1.3.21" - "2.0.0" - "2.0.1" - "2.0.2" diff --git a/.github/workflows/dco.yml b/.github/workflows/dco.yml deleted file mode 100644 index ef842bb405d60..0000000000000 --- a/.github/workflows/dco.yml +++ /dev/null @@ -1,19 +0,0 @@ -name: Developer Certificate of Origin Check - -on: [pull_request] - -jobs: - dco-check: - runs-on: ubuntu-latest - - steps: - - name: Get PR Commits - id: 'get-pr-commits' - uses: tim-actions/get-pr-commits@v1.3.1 - with: - token: ${{ secrets.GITHUB_TOKEN }} - - name: DCO Check - uses: tim-actions/dco@v1.1.0 - with: - commits: ${{ steps.get-pr-commits.outputs.commits }} - diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index 3697750dab97a..923c82028cd1b 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -13,7 +13,7 @@ jobs: - uses: actions/checkout@v4 - name: lychee Link Checker id: lychee - uses: lycheeverse/lychee-action@v2.1.0 + uses: lycheeverse/lychee-action@v2.2.0 with: args: --accept=200,403,429 --exclude-mail **/*.html **/*.md **/*.txt **/*.json --exclude-file .lychee.excludes fail: true diff --git a/CHANGELOG.md b/CHANGELOG.md index 54e5e8dbf11d8..a9d72ac5aa79d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,33 +14,58 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Make IndexStoreListener a pluggable interface ([#16583](https://github.com/opensearch-project/OpenSearch/pull/16583)) - Add a flag in QueryShardContext to differentiate inner hit query ([#16600](https://github.com/opensearch-project/OpenSearch/pull/16600)) - Add vertical scaling and SoftReference for snapshot repository data cache ([#16489](https://github.com/opensearch-project/OpenSearch/pull/16489)) +- [Workload Management] Add Workload Management IT ([#16359](https://github.com/opensearch-project/OpenSearch/pull/16359)) - Support prefix list for remote repository attributes([#16271](https://github.com/opensearch-project/OpenSearch/pull/16271)) - Add new configuration setting `synonym_analyzer`, to the `synonym` and `synonym_graph` filters, enabling the specification of a custom analyzer for reading the synonym file ([#16488](https://github.com/opensearch-project/OpenSearch/pull/16488)). - Add stats for remote publication failure and move download failure stats to remote methods([#16682](https://github.com/opensearch-project/OpenSearch/pull/16682/)) +- Update script supports java.lang.String.sha1() and java.lang.String.sha256() methods ([#16923](https://github.com/opensearch-project/OpenSearch/pull/16923)) +- Added a precaution to handle extreme date values during sorting to prevent `arithmetic_exception: long overflow` ([#16812](https://github.com/opensearch-project/OpenSearch/pull/16812)). +- Add search replica stats to segment replication stats API ([#16678](https://github.com/opensearch-project/OpenSearch/pull/16678)) +- Introduce framework for auxiliary transports and an experimental gRPC transport plugin ([#16534](https://github.com/opensearch-project/OpenSearch/pull/16534)) +- Support searching from doc_value using termQueryCaseInsensitive/termQuery in flat_object/keyword field([#16974](https://github.com/opensearch-project/OpenSearch/pull/16974/)) ### Dependencies - Bump `com.google.cloud:google-cloud-core-http` from 2.23.0 to 2.47.0 ([#16504](https://github.com/opensearch-project/OpenSearch/pull/16504)) - Bump `google-auth-library-oauth2-http` from 1.7.0 to 1.29.0 in /plugins/repository-gcs ([#16520](https://github.com/opensearch-project/OpenSearch/pull/16520)) -- Bump `com.azure:azure-storage-common` from 12.25.1 to 12.27.1 ([#16521](https://github.com/opensearch-project/OpenSearch/pull/16521)) +- Bump `com.azure:azure-storage-common` from 12.25.1 to 12.28.0 ([#16521](https://github.com/opensearch-project/OpenSearch/pull/16521), [#16808](https://github.com/opensearch-project/OpenSearch/pull/16808)) - Bump `com.google.apis:google-api-services-compute` from v1-rev20240407-2.0.0 to v1-rev20241105-2.0.0 ([#16502](https://github.com/opensearch-project/OpenSearch/pull/16502), [#16548](https://github.com/opensearch-project/OpenSearch/pull/16548), [#16613](https://github.com/opensearch-project/OpenSearch/pull/16613)) - Bump `com.azure:azure-storage-common` from 12.25.1 to 12.27.1 ([#16521](https://github.com/opensearch-project/OpenSearch/pull/16521)) - Bump `com.azure:azure-storage-blob` from 12.23.0 to 12.28.1 ([#16501](https://github.com/opensearch-project/OpenSearch/pull/16501)) - Bump `org.apache.hadoop:hadoop-minicluster` from 3.4.0 to 3.4.1 ([#16550](https://github.com/opensearch-project/OpenSearch/pull/16550)) -- Bump `org.apache.xmlbeans:xmlbeans` from 5.2.1 to 5.2.2 ([#16612](https://github.com/opensearch-project/OpenSearch/pull/16612)) -- Bump `com.nimbusds:nimbus-jose-jwt` from 9.41.1 to 9.46 ([#16611](https://github.com/opensearch-project/OpenSearch/pull/16611)) -- Bump `lycheeverse/lychee-action` from 2.0.2 to 2.1.0 ([#16610](https://github.com/opensearch-project/OpenSearch/pull/16610)) +- Bump `org.apache.xmlbeans:xmlbeans` from 5.2.1 to 5.3.0 ([#16612](https://github.com/opensearch-project/OpenSearch/pull/16612), [#16854](https://github.com/opensearch-project/OpenSearch/pull/16854)) +- Bump `com.nimbusds:nimbus-jose-jwt` from 9.41.1 to 10.0.1 ([#16611](https://github.com/opensearch-project/OpenSearch/pull/16611), [#16807](https://github.com/opensearch-project/OpenSearch/pull/16807), [#17011](https://github.com/opensearch-project/OpenSearch/pull/17011)) +- Bump `lycheeverse/lychee-action` from 2.0.2 to 2.2.0 ([#16610](https://github.com/opensearch-project/OpenSearch/pull/16610), [#16897](https://github.com/opensearch-project/OpenSearch/pull/16897)) - Bump `me.champeau.gradle.japicmp` from 0.4.4 to 0.4.5 ([#16614](https://github.com/opensearch-project/OpenSearch/pull/16614)) - Bump `mockito` from 5.14.1 to 5.14.2, `objenesis` from 3.2 to 3.3 and `bytebuddy` from 1.15.4 to 1.15.10 ([#16655](https://github.com/opensearch-project/OpenSearch/pull/16655)) - Bump `Netty` from 4.1.114.Final to 4.1.115.Final ([#16661](https://github.com/opensearch-project/OpenSearch/pull/16661)) - Bump `org.xerial.snappy:snappy-java` from 1.1.10.6 to 1.1.10.7 ([#16665](https://github.com/opensearch-project/OpenSearch/pull/16665)) - Bump `codecov/codecov-action` from 4 to 5 ([#16667](https://github.com/opensearch-project/OpenSearch/pull/16667)) -- Bump `org.apache.logging.log4j:log4j-core` from 2.24.1 to 2.24.2 ([#16718](https://github.com/opensearch-project/OpenSearch/pull/16718)) +- Bump `org.apache.logging.log4j:log4j-core` from 2.24.1 to 2.24.3 ([#16718](https://github.com/opensearch-project/OpenSearch/pull/16718), [#16858](https://github.com/opensearch-project/OpenSearch/pull/16858)) - Bump `jackson` from 2.17.2 to 2.18.2 ([#16733](https://github.com/opensearch-project/OpenSearch/pull/16733)) -- Bump `ch.qos.logback:logback-classic` from 1.2.13 to 1.5.12 ([#16716](https://github.com/opensearch-project/OpenSearch/pull/16716)) +- Bump `ch.qos.logback:logback-classic` from 1.2.13 to 1.5.15 ([#16716](https://github.com/opensearch-project/OpenSearch/pull/16716), [#16898](https://github.com/opensearch-project/OpenSearch/pull/16898)) - Bump `com.azure:azure-identity` from 1.13.2 to 1.14.2 ([#16778](https://github.com/opensearch-project/OpenSearch/pull/16778)) +- Bump Apache Lucene from 9.12.0 to 9.12.1 ([#16846](https://github.com/opensearch-project/OpenSearch/pull/16846)) +- Bump `com.gradle.develocity` from 3.18.2 to 3.19 ([#16855](https://github.com/opensearch-project/OpenSearch/pull/16855)) +- Bump `org.jline:jline` from 3.27.1 to 3.28.0 ([#16857](https://github.com/opensearch-project/OpenSearch/pull/16857)) +- Bump `com.azure:azure-core` from 1.51.0 to 1.54.1 ([#16856](https://github.com/opensearch-project/OpenSearch/pull/16856)) +- Bump `com.nimbusds:oauth2-oidc-sdk` from 11.19.1 to 11.21 ([#16895](https://github.com/opensearch-project/OpenSearch/pull/16895), [#17010](https://github.com/opensearch-project/OpenSearch/pull/17010)) +- Bump `com.netflix.nebula.ospackage-base` from 11.10.0 to 11.10.1 ([#16896](https://github.com/opensearch-project/OpenSearch/pull/16896)) +- Bump `com.microsoft.azure:msal4j` from 1.17.2 to 1.18.0 ([#16918](https://github.com/opensearch-project/OpenSearch/pull/16918)) +- Bump `org.apache.commons:commons-text` from 1.12.0 to 1.13.0 ([#16919](https://github.com/opensearch-project/OpenSearch/pull/16919)) +- Bump `ch.qos.logback:logback-core` from 1.5.12 to 1.5.16 ([#16951](https://github.com/opensearch-project/OpenSearch/pull/16951)) +- Bump `com.azure:azure-core-http-netty` from 1.15.5 to 1.15.7 ([#16952](https://github.com/opensearch-project/OpenSearch/pull/16952)) +- Bump `opentelemetry` from 1.41.0 to 1.46.0 ([#16700](https://github.com/opensearch-project/OpenSearch/pull/16700)) +- Bump `opentelemetry-semconv` from 1.27.0-alpha to 1.29.0-alpha ([#16700](https://github.com/opensearch-project/OpenSearch/pull/16700)) +- Bump `com.google.re2j:re2j` from 1.7 to 1.8 ([#17012](https://github.com/opensearch-project/OpenSearch/pull/17012)) +- Bump `org.apache.commons:commons-lang3` from 3.14.0 to 3.17.0 ([#15580](https://github.com/opensearch-project/OpenSearch/pull/15580)) ### Changed - Indexed IP field supports `terms_query` with more than 1025 IP masks [#16391](https://github.com/opensearch-project/OpenSearch/pull/16391) +- Make entries for dependencies from server/build.gradle to gradle version catalog ([#16707](https://github.com/opensearch-project/OpenSearch/pull/16707)) +- Sliced search only fans out to shards matched by the selected slice, reducing open search contexts ([#16771](https://github.com/opensearch-project/OpenSearch/pull/16771)) +- Allow extended plugins to be optional ([#16909](https://github.com/opensearch-project/OpenSearch/pull/16909)) +- Use the correct type to widen the sort fields when merging top docs ([#16881](https://github.com/opensearch-project/OpenSearch/pull/16881)) +- Limit reader writer separation to remote store enabled clusters [#16760](https://github.com/opensearch-project/OpenSearch/pull/16760) ### Deprecated - Performing update operation with default pipeline or final pipeline is deprecated ([#16712](https://github.com/opensearch-project/OpenSearch/pull/16712)) @@ -59,6 +84,14 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix stale cluster state custom file deletion ([#16670](https://github.com/opensearch-project/OpenSearch/pull/16670)) - Bound the size of cache in deprecation logger ([16702](https://github.com/opensearch-project/OpenSearch/issues/16702)) - [Tiered Caching] Fix bug in cache stats API ([#16560](https://github.com/opensearch-project/OpenSearch/pull/16560)) +- Ensure consistency of system flag on IndexMetadata after diff is applied ([#16644](https://github.com/opensearch-project/OpenSearch/pull/16644)) +- Skip remote-repositories validations for node-joins when RepositoriesService is not in sync with cluster-state ([#16763](https://github.com/opensearch-project/OpenSearch/pull/16763)) +- Fix _list/shards API failing when closed indices are present ([#16606](https://github.com/opensearch-project/OpenSearch/pull/16606)) +- Fix remote shards balance ([#15335](https://github.com/opensearch-project/OpenSearch/pull/15335)) +- Always use `constant_score` query for `match_only_text` field ([#16964](https://github.com/opensearch-project/OpenSearch/pull/16964)) +- Fix Shallow copy snapshot failures on closed index ([#16868](https://github.com/opensearch-project/OpenSearch/pull/16868)) +- Fix multi-value sort for unsigned long ([#16732](https://github.com/opensearch-project/OpenSearch/pull/16732)) +- The `phone-search` analyzer no longer emits the tel/sip prefix, international calling code, extension numbers and unformatted input as a token ([#16993](https://github.com/opensearch-project/OpenSearch/pull/16993)) ### Security diff --git a/build.gradle b/build.gradle index 463dc80f22fb3..f8bc0431f0f62 100644 --- a/build.gradle +++ b/build.gradle @@ -127,8 +127,8 @@ subprojects { name = 'Snapshots' url = 'https://aws.oss.sonatype.org/content/repositories/snapshots' credentials { - username "$System.env.SONATYPE_USERNAME" - password "$System.env.SONATYPE_PASSWORD" + username = "$System.env.SONATYPE_USERNAME" + password = "$System.env.SONATYPE_PASSWORD" } } } @@ -412,7 +412,7 @@ allprojects { gradle.projectsEvaluated { allprojects { project.tasks.withType(JavaForkOptions) { - maxHeapSize project.property('options.forkOptions.memoryMaximumSize') + maxHeapSize = project.property('options.forkOptions.memoryMaximumSize') } if (project.path == ':test:framework') { @@ -728,7 +728,7 @@ tasks.named(JavaBasePlugin.CHECK_TASK_NAME) { } tasks.register('checkCompatibility', CheckCompatibilityTask) { - description('Checks the compatibility with child components') + description = 'Checks the compatibility with child components' } allprojects { project -> diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 552dd09fc3d9e..a3b3a8e278101 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -105,7 +105,7 @@ dependencies { api "org.apache.commons:commons-compress:${props.getProperty('commonscompress')}" api 'org.apache.ant:ant:1.10.14' api 'com.netflix.nebula:gradle-extra-configurations-plugin:10.0.0' - api 'com.netflix.nebula:nebula-publishing-plugin:21.0.0' + api 'com.netflix.nebula:nebula-publishing-plugin:21.1.0' api 'com.netflix.nebula:gradle-info-plugin:12.1.6' api 'org.apache.rat:apache-rat:0.15' api "commons-io:commons-io:${props.getProperty('commonsio')}" diff --git a/buildSrc/src/main/groovy/org/opensearch/gradle/NoticeTask.groovy b/buildSrc/src/main/groovy/org/opensearch/gradle/NoticeTask.groovy index 7b3a0fc01ab65..6a7a011d08dc4 100644 --- a/buildSrc/src/main/groovy/org/opensearch/gradle/NoticeTask.groovy +++ b/buildSrc/src/main/groovy/org/opensearch/gradle/NoticeTask.groovy @@ -30,6 +30,7 @@ package org.opensearch.gradle import org.gradle.api.DefaultTask +import org.gradle.api.Project import org.gradle.api.file.FileCollection import org.gradle.api.file.FileTree import org.gradle.api.file.SourceDirectorySet @@ -39,6 +40,8 @@ import org.gradle.api.tasks.Optional import org.gradle.api.tasks.OutputFile import org.gradle.api.tasks.TaskAction +import javax.inject.Inject + import java.nio.file.Files import java.nio.file.attribute.PosixFilePermissions @@ -58,8 +61,12 @@ class NoticeTask extends DefaultTask { /** Directories to include notices from */ private List licensesDirs = new ArrayList<>() - NoticeTask() { - description = 'Create a notice file from dependencies' + private final Project project + + @Inject + NoticeTask(Project project) { + this.project = project + this.description = 'Create a notice file from dependencies' // Default licenses directory is ${projectDir}/licenses (if it exists) File licensesDir = new File(project.projectDir, 'licenses') if (licensesDir.exists()) { @@ -161,11 +168,12 @@ class NoticeTask extends DefaultTask { @Optional FileCollection getNoticeFiles() { FileTree tree + def p = project licensesDirs.each { dir -> if (tree == null) { - tree = project.fileTree(dir) + tree = p.fileTree(dir) } else { - tree += project.fileTree(dir) + tree += p.fileTree(dir) } } diff --git a/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/OptionalDependenciesPlugin.groovy b/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/OptionalDependenciesPlugin.groovy index 2bd8835535881..9b687e1037a08 100644 --- a/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/OptionalDependenciesPlugin.groovy +++ b/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/OptionalDependenciesPlugin.groovy @@ -79,7 +79,7 @@ class OptionalDependenciesPlugin implements Plugin { if (foundDep) { if (foundDep.optional) { - foundDep.optional.value = 'true' + foundDep.optional*.value = 'true' } else { foundDep.appendNode(OPTIONAL_IDENTIFIER, 'true') } diff --git a/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy index 13f5f8724c6f2..ad4bdb3258fcc 100644 --- a/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy @@ -160,14 +160,14 @@ class PluginBuildPlugin implements Plugin { archiveBaseName = archiveBaseName.get() + "-client" } // always configure publishing for client jars - project.publishing.publications.nebula(MavenPublication).artifactId(extension.name + "-client") + project.publishing.publications.nebula(MavenPublication).artifactId = extension.name + "-client" final BasePluginExtension base = project.getExtensions().findByType(BasePluginExtension.class) project.tasks.withType(GenerateMavenPom.class).configureEach { GenerateMavenPom generatePOMTask -> generatePOMTask.destination = "${project.buildDir}/distributions/${base.archivesName}-client-${project.versions.opensearch}.pom" } } else { if (project.plugins.hasPlugin(MavenPublishPlugin)) { - project.publishing.publications.nebula(MavenPublication).artifactId(extension.name) + project.publishing.publications.nebula(MavenPublication).artifactId = extension.name } } } diff --git a/buildSrc/src/main/groovy/org/opensearch/gradle/precommit/LicenseHeadersTask.groovy b/buildSrc/src/main/groovy/org/opensearch/gradle/precommit/LicenseHeadersTask.groovy index b8d0ed2b9c43c..e3f7469b527c8 100644 --- a/buildSrc/src/main/groovy/org/opensearch/gradle/precommit/LicenseHeadersTask.groovy +++ b/buildSrc/src/main/groovy/org/opensearch/gradle/precommit/LicenseHeadersTask.groovy @@ -32,6 +32,7 @@ import org.apache.rat.anttasks.Report import org.apache.rat.anttasks.SubstringLicenseMatcher import org.apache.rat.license.SimpleLicenseFamily import org.opensearch.gradle.AntTask +import org.gradle.api.Project import org.gradle.api.file.FileCollection import org.gradle.api.tasks.Input import org.gradle.api.tasks.InputFiles @@ -41,6 +42,8 @@ import org.gradle.api.tasks.PathSensitive import org.gradle.api.tasks.PathSensitivity import org.gradle.api.tasks.SkipWhenEmpty +import javax.inject.Inject + import java.nio.file.Files /** @@ -65,14 +68,18 @@ class LicenseHeadersTask extends AntTask { @Input List excludes = [] + private final Project project + /** * Additional license families that may be found. The key is the license category name (5 characters), * followed by the family name and the value list of patterns to search for. */ protected Map additionalLicenses = new HashMap<>() - LicenseHeadersTask() { - description = "Checks sources for missing, incorrect, or unacceptable license headers" + @Inject + LicenseHeadersTask(Project project) { + this.project = project + this.description = "Checks sources for missing, incorrect, or unacceptable license headers" } /** diff --git a/buildSrc/src/main/groovy/org/opensearch/gradle/test/AntFixture.groovy b/buildSrc/src/main/groovy/org/opensearch/gradle/test/AntFixture.groovy index 316db8aa01764..42db92fd83515 100644 --- a/buildSrc/src/main/groovy/org/opensearch/gradle/test/AntFixture.groovy +++ b/buildSrc/src/main/groovy/org/opensearch/gradle/test/AntFixture.groovy @@ -30,12 +30,16 @@ package org.opensearch.gradle.test import org.apache.tools.ant.taskdefs.condition.Os +import org.gradle.api.Project import org.gradle.api.GradleException import org.gradle.api.tasks.Exec import org.gradle.api.tasks.Internal import org.gradle.api.tasks.TaskProvider import org.opensearch.gradle.AntTask import org.opensearch.gradle.LoggedExec + +import javax.inject.Inject + /** * A fixture for integration tests which runs in a separate process launched by Ant. */ @@ -90,9 +94,12 @@ class AntFixture extends AntTask implements Fixture { } private final TaskProvider stopTask + private final Project project - AntFixture() { - stopTask = createStopTask() + @Inject + AntFixture(Project project) { + this.project = project + this.stopTask = createStopTask() finalizedBy(stopTask) } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/EmptyDirTask.java b/buildSrc/src/main/java/org/opensearch/gradle/EmptyDirTask.java index 96d7c69699c68..36aa1f99aa894 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/EmptyDirTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/EmptyDirTask.java @@ -32,6 +32,7 @@ package org.opensearch.gradle; import org.gradle.api.DefaultTask; +import org.gradle.api.Project; import org.gradle.api.tasks.Input; import org.gradle.api.tasks.Internal; import org.gradle.api.tasks.TaskAction; @@ -48,6 +49,12 @@ public class EmptyDirTask extends DefaultTask { private File dir; private int dirMode = 0755; + private final Project project; + + @Inject + public EmptyDirTask(Project project) { + this.project = project; + } /** * Creates an empty directory with the configured permissions. @@ -84,7 +91,7 @@ public void setDir(File dir) { * @param dir The path of the directory to create. Takes a String and coerces it to a file. */ public void setDir(String dir) { - this.dir = getProject().file(dir); + this.dir = project.file(dir); } @Input diff --git a/buildSrc/src/main/java/org/opensearch/gradle/ExportOpenSearchBuildResourcesTask.java b/buildSrc/src/main/java/org/opensearch/gradle/ExportOpenSearchBuildResourcesTask.java index d00e790c94fcc..072b6fa788cbd 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/ExportOpenSearchBuildResourcesTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/ExportOpenSearchBuildResourcesTask.java @@ -33,6 +33,7 @@ import org.gradle.api.DefaultTask; import org.gradle.api.GradleException; +import org.gradle.api.Project; import org.gradle.api.file.DirectoryProperty; import org.gradle.api.logging.Logger; import org.gradle.api.logging.Logging; @@ -42,6 +43,8 @@ import org.gradle.api.tasks.StopExecutionException; import org.gradle.api.tasks.TaskAction; +import javax.inject.Inject; + import java.io.File; import java.io.IOException; import java.io.InputStream; @@ -67,8 +70,9 @@ public class ExportOpenSearchBuildResourcesTask extends DefaultTask { private DirectoryProperty outputDir; - public ExportOpenSearchBuildResourcesTask() { - outputDir = getProject().getObjects().directoryProperty(); + @Inject + public ExportOpenSearchBuildResourcesTask(Project project) { + outputDir = project.getObjects().directoryProperty(); } @OutputDirectory diff --git a/buildSrc/src/main/java/org/opensearch/gradle/LoggedExec.java b/buildSrc/src/main/java/org/opensearch/gradle/LoggedExec.java index 4c62f4a6b4ee8..3557ef6ef3df7 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/LoggedExec.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/LoggedExec.java @@ -70,6 +70,7 @@ public class LoggedExec extends Exec implements FileSystemOperationsAware { private static final Logger LOGGER = Logging.getLogger(LoggedExec.class); private Consumer outputLogger; private FileSystemOperations fileSystemOperations; + private final Project project; interface InjectedExecOps { @Inject @@ -77,8 +78,9 @@ interface InjectedExecOps { } @Inject - public LoggedExec(FileSystemOperations fileSystemOperations) { + public LoggedExec(FileSystemOperations fileSystemOperations, Project project) { this.fileSystemOperations = fileSystemOperations; + this.project = project; if (getLogger().isInfoEnabled() == false) { setIgnoreExitValue(true); setSpoolOutput(false); @@ -111,7 +113,7 @@ public void execute(Task task) { public void setSpoolOutput(boolean spoolOutput) { final OutputStream out; if (spoolOutput) { - File spoolFile = new File(getProject().getBuildDir() + "/buffered-output/" + this.getName()); + File spoolFile = new File(project.getBuildDir() + "/buffered-output/" + this.getName()); out = new LazyFileOutputStream(spoolFile); outputLogger = logger -> { try { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerBuildTask.java b/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerBuildTask.java index 08f0e7488a43c..94a8592d9bc2f 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerBuildTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerBuildTask.java @@ -34,6 +34,7 @@ import org.opensearch.gradle.LoggedExec; import org.gradle.api.DefaultTask; import org.gradle.api.GradleException; +import org.gradle.api.Project; import org.gradle.api.file.DirectoryProperty; import org.gradle.api.file.RegularFileProperty; import org.gradle.api.logging.Logger; @@ -60,18 +61,22 @@ public class DockerBuildTask extends DefaultTask { private static final Logger LOGGER = Logging.getLogger(DockerBuildTask.class); private final WorkerExecutor workerExecutor; - private final RegularFileProperty markerFile = getProject().getObjects().fileProperty(); - private final DirectoryProperty dockerContext = getProject().getObjects().directoryProperty(); + private final RegularFileProperty markerFile; + private final DirectoryProperty dockerContext; private String[] tags; private boolean pull = true; private boolean noCache = true; private String[] baseImages; + private final Project project; @Inject - public DockerBuildTask(WorkerExecutor workerExecutor) { + public DockerBuildTask(WorkerExecutor workerExecutor, Project project) { this.workerExecutor = workerExecutor; - this.markerFile.set(getProject().getLayout().getBuildDirectory().file("markers/" + this.getName() + ".marker")); + this.project = project; + this.markerFile = project.getObjects().fileProperty(); + this.dockerContext = project.getObjects().directoryProperty(); + this.markerFile.set(project.getLayout().getBuildDirectory().file("markers/" + this.getName() + ".marker")); } @TaskAction diff --git a/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerSupportService.java b/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerSupportService.java index 77d7997d6d48d..b75bdcffb257b 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerSupportService.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerSupportService.java @@ -105,8 +105,7 @@ public DockerAvailability getDockerAvailability() { Result lastResult = null; Version version = null; boolean isVersionHighEnough = false; - boolean isComposeAvailable = false; - boolean isComposeV2Available = false; + DockerComposeAvailability dockerComposeAvailability = null; // Check if the Docker binary exists final Optional dockerBinary = getDockerPath(); @@ -114,7 +113,7 @@ public DockerAvailability getDockerAvailability() { dockerPath = dockerBinary.get(); // Since we use a multi-stage Docker build, check the Docker version meets minimum requirement - lastResult = runCommand(dockerPath, "version", "--format", "{{.Server.Version}}"); + lastResult = runCommand(execOperations, dockerPath, "version", "--format", "{{.Server.Version}}"); if (lastResult.isSuccess()) { version = Version.fromString(lastResult.stdout.trim(), Version.Mode.RELAXED); @@ -123,15 +122,11 @@ public DockerAvailability getDockerAvailability() { if (isVersionHighEnough) { // Check that we can execute a privileged command - lastResult = runCommand(dockerPath, "images"); - + lastResult = runCommand(execOperations, dockerPath, "images"); // If docker all checks out, see if docker-compose is available and working - Optional composePath = getDockerComposePath(); - if (lastResult.isSuccess() && composePath.isPresent()) { - isComposeAvailable = runCommand(composePath.get(), "version").isSuccess(); + if (lastResult.isSuccess()) { + dockerComposeAvailability = DockerComposeAvailability.detect(execOperations, dockerPath).orElse(null); } - - isComposeV2Available = runCommand(dockerPath, "compose", "version").isSuccess(); } } } @@ -140,8 +135,7 @@ public DockerAvailability getDockerAvailability() { this.dockerAvailability = new DockerAvailability( isAvailable, - isComposeAvailable, - isComposeV2Available, + dockerComposeAvailability, isVersionHighEnough, dockerPath, version, @@ -291,17 +285,6 @@ private Optional getDockerPath() { return Arrays.asList(DOCKER_BINARIES).stream().filter(path -> new File(path).exists()).findFirst(); } - /** - * Searches the entries in {@link #DOCKER_COMPOSE_BINARIES} for the Docker Compose CLI. This method does - * not check whether the installation appears usable, see {@link #getDockerAvailability()} instead. - * - * @return the path to a CLI, if available. - */ - private Optional getDockerComposePath() { - // Check if the Docker binary exists - return Arrays.asList(DOCKER_COMPOSE_BINARIES).stream().filter(path -> new File(path).exists()).findFirst(); - } - private void throwDockerRequiredException(final String message) { throwDockerRequiredException(message, null); } @@ -321,7 +304,7 @@ private void throwDockerRequiredException(final String message, Exception e) { * while running the command, or the process was killed after reaching the 10s timeout, * then the exit code will be -1. */ - private Result runCommand(String... args) { + private static Result runCommand(ExecOperations execOperations, String... args) { if (args.length == 0) { throw new IllegalArgumentException("Cannot execute with no command"); } @@ -356,14 +339,9 @@ public static class DockerAvailability { public final boolean isAvailable; /** - * True if docker-compose is available. + * Non-null if docker-compose v1 or v2 is available. */ - public final boolean isComposeAvailable; - - /** - * True if docker compose is available. - */ - public final boolean isComposeV2Available; + public final DockerComposeAvailability dockerComposeAvailability; /** * True if the installed Docker version is >= 17.05 @@ -387,23 +365,70 @@ public static class DockerAvailability { DockerAvailability( boolean isAvailable, - boolean isComposeAvailable, - boolean isComposeV2Available, + DockerComposeAvailability dockerComposeAvailability, boolean isVersionHighEnough, String path, Version version, Result lastCommand ) { this.isAvailable = isAvailable; - this.isComposeAvailable = isComposeAvailable; - this.isComposeV2Available = isComposeV2Available; + this.dockerComposeAvailability = dockerComposeAvailability; this.isVersionHighEnough = isVersionHighEnough; this.path = path; this.version = version; this.lastCommand = lastCommand; } + + public boolean isDockerComposeAvailable() { + return dockerComposeAvailability != null; + } + } + + /** + * Marker interface for Docker Compose availability + */ + private interface DockerComposeAvailability { + /** + * Detects Docker Compose V1/V2 availability + */ + private static Optional detect(ExecOperations execOperations, String dockerPath) { + Optional composePath = getDockerComposePath(); + if (composePath.isPresent()) { + if (runCommand(execOperations, composePath.get(), "version").isSuccess()) { + return Optional.of(new DockerComposeV1Availability()); + } + } + + if (runCommand(execOperations, dockerPath, "compose", "version").isSuccess()) { + return Optional.of(new DockerComposeV2Availability()); + } + + return Optional.empty(); + } + + /** + * Searches the entries in {@link #DOCKER_COMPOSE_BINARIES} for the Docker Compose CLI. This method does + * not check whether the installation appears usable, see {@link #getDockerAvailability()} instead. + * + * @return the path to a CLI, if available. + */ + private static Optional getDockerComposePath() { + // Check if the Docker binary exists + return Arrays.asList(DOCKER_COMPOSE_BINARIES).stream().filter(path -> new File(path).exists()).findFirst(); + } + } + /** + * Docker Compose V1 availability + */ + public static class DockerComposeV1Availability implements DockerComposeAvailability {} + + /** + * Docker Compose V2 availability + */ + public static class DockerComposeV2Availability implements DockerComposeAvailability {} + /** * This class models the result of running a command. It captures the exit code, standard output and standard error. */ diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/DependencyLicensesTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/DependencyLicensesTask.java index 7248e0bc14431..337ac5d62c3fd 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/DependencyLicensesTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/DependencyLicensesTask.java @@ -36,6 +36,7 @@ import org.gradle.api.DefaultTask; import org.gradle.api.GradleException; import org.gradle.api.InvalidUserDataException; +import org.gradle.api.Project; import org.gradle.api.file.FileCollection; import org.gradle.api.logging.Logger; import org.gradle.api.logging.Logging; @@ -48,6 +49,8 @@ import org.gradle.api.tasks.OutputDirectory; import org.gradle.api.tasks.TaskAction; +import javax.inject.Inject; + import java.io.File; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -127,7 +130,7 @@ public class DependencyLicensesTask extends DefaultTask { /** * The directory to find the license and sha files in. */ - private File licensesDir = new File(getProject().getProjectDir(), "licenses"); + private File licensesDir; /** * A map of patterns to prefix, used to find the LICENSE and NOTICE file. @@ -139,6 +142,14 @@ public class DependencyLicensesTask extends DefaultTask { */ private Set ignoreShas = new HashSet<>(); + private final Project project; + + @Inject + public DependencyLicensesTask(Project project) { + this.project = project; + this.licensesDir = new File(project.getProjectDir(), "licenses"); + } + /** * Add a mapping from a regex pattern for the jar name, to a prefix to find * the LICENSE and NOTICE file for that jar. @@ -161,7 +172,7 @@ public void mapping(Map props) { @InputFiles public Property getDependencies() { if (dependenciesProvider == null) { - dependenciesProvider = getProject().getObjects().property(FileCollection.class); + dependenciesProvider = project.getObjects().property(FileCollection.class); } return dependenciesProvider; } @@ -250,7 +261,7 @@ public void checkDependencies() throws IOException, NoSuchAlgorithmException { // by this output but when successful we can safely mark the task as up-to-date. @OutputDirectory public File getOutputMarker() { - return new File(getProject().getBuildDir(), "dependencyLicense"); + return new File(project.getBuildDir(), "dependencyLicense"); } private void failIfAnyMissing(String item, Boolean exists, String type) { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/FilePermissionsTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/FilePermissionsTask.java index 2c17666d8ee0c..0e5276bfdf033 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/FilePermissionsTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/FilePermissionsTask.java @@ -35,6 +35,7 @@ import org.opensearch.gradle.util.GradleUtils; import org.gradle.api.DefaultTask; import org.gradle.api.GradleException; +import org.gradle.api.Project; import org.gradle.api.file.FileCollection; import org.gradle.api.file.FileTree; import org.gradle.api.tasks.IgnoreEmptyDirectories; @@ -48,6 +49,8 @@ import org.gradle.api.tasks.util.PatternFilterable; import org.gradle.api.tasks.util.PatternSet; +import javax.inject.Inject; + import java.io.File; import java.io.IOException; import java.nio.file.Files; @@ -71,10 +74,14 @@ public class FilePermissionsTask extends DefaultTask { // exclude sh files that might have the executable bit set .exclude("**/*.sh"); - private File outputMarker = new File(getProject().getBuildDir(), "markers/filePermissions"); + private final File outputMarker; + private final Project project; - public FilePermissionsTask() { + @Inject + public FilePermissionsTask(Project project) { setDescription("Checks java source files for correct file permissions"); + this.project = project; + this.outputMarker = new File(project.getBuildDir(), "markers/filePermissions"); } private static boolean isExecutableFile(File file) { @@ -98,11 +105,11 @@ private static boolean isExecutableFile(File file) { @IgnoreEmptyDirectories @PathSensitive(PathSensitivity.RELATIVE) public FileCollection getFiles() { - return GradleUtils.getJavaSourceSets(getProject()) + return GradleUtils.getJavaSourceSets(project) .stream() .map(sourceSet -> sourceSet.getAllSource().matching(filesFilter)) .reduce(FileTree::plus) - .orElse(getProject().files().getAsFileTree()); + .orElse(project.files().getAsFileTree()); } @TaskAction diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenPatternsTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenPatternsTask.java index 6ef1e77f5138f..1790b32fb2f36 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenPatternsTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenPatternsTask.java @@ -34,6 +34,7 @@ import org.gradle.api.DefaultTask; import org.gradle.api.GradleException; import org.gradle.api.InvalidUserDataException; +import org.gradle.api.Project; import org.gradle.api.file.FileCollection; import org.gradle.api.file.FileTree; import org.gradle.api.plugins.JavaPluginExtension; @@ -48,6 +49,8 @@ import org.gradle.api.tasks.util.PatternFilterable; import org.gradle.api.tasks.util.PatternSet; +import javax.inject.Inject; + import java.io.File; import java.io.IOException; import java.io.UncheckedIOException; @@ -89,8 +92,10 @@ public class ForbiddenPatternsTask extends DefaultTask { * The rules: a map from the rule name, to a rule regex pattern. */ private final Map patterns = new HashMap<>(); + private final Project project; - public ForbiddenPatternsTask() { + @Inject + public ForbiddenPatternsTask(Project project) { setDescription("Checks source files for invalid patterns like nocommits or tabs"); getInputs().property("excludes", filesFilter.getExcludes()); getInputs().property("rules", patterns); @@ -99,6 +104,8 @@ public ForbiddenPatternsTask() { patterns.put("nocommit", "nocommit|NOCOMMIT"); patterns.put("nocommit should be all lowercase or all uppercase", "((?i)nocommit)(? sourceSet.getAllSource().matching(filesFilter)) .reduce(FileTree::plus) - .orElse(getProject().files().getAsFileTree()); + .orElse(project.files().getAsFileTree()); } @TaskAction @@ -131,7 +138,7 @@ public void checkInvalidPatterns() throws IOException { .boxed() .collect(Collectors.toList()); - String path = getProject().getRootProject().getProjectDir().toURI().relativize(f.toURI()).toString(); + String path = project.getRootProject().getProjectDir().toURI().relativize(f.toURI()).toString(); failures.addAll( invalidLines.stream() .map(l -> new AbstractMap.SimpleEntry<>(l + 1, lines.get(l))) @@ -155,7 +162,7 @@ public void checkInvalidPatterns() throws IOException { @OutputFile public File getOutputMarker() { - return new File(getProject().getBuildDir(), "markers/" + getName()); + return new File(project.getBuildDir(), "markers/" + getName()); } @Input diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/JarHellTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/JarHellTask.java index 7726133562e9f..ebe0b25a3a685 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/JarHellTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/JarHellTask.java @@ -33,11 +33,14 @@ package org.opensearch.gradle.precommit; import org.opensearch.gradle.LoggedExec; +import org.gradle.api.Project; import org.gradle.api.file.FileCollection; import org.gradle.api.tasks.CacheableTask; import org.gradle.api.tasks.CompileClasspath; import org.gradle.api.tasks.TaskAction; +import javax.inject.Inject; + import java.io.File; /** @@ -47,14 +50,18 @@ public class JarHellTask extends PrecommitTask { private FileCollection classpath; + private final Project project; - public JarHellTask() { + @Inject + public JarHellTask(Project project) { + super(project); setDescription("Runs CheckJarHell on the configured classpath"); + this.project = project; } @TaskAction public void runJarHellCheck() { - LoggedExec.javaexec(getProject(), spec -> { + LoggedExec.javaexec(project, spec -> { spec.environment("CLASSPATH", getClasspath().getAsPath()); spec.getMainClass().set("org.opensearch.bootstrap.JarHell"); }); diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/LoggerUsageTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/LoggerUsageTask.java index db215fb65ef95..70acdcc26c212 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/LoggerUsageTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/LoggerUsageTask.java @@ -33,6 +33,7 @@ package org.opensearch.gradle.precommit; import org.opensearch.gradle.LoggedExec; +import org.gradle.api.Project; import org.gradle.api.file.FileCollection; import org.gradle.api.plugins.JavaPluginExtension; import org.gradle.api.tasks.CacheableTask; @@ -45,6 +46,8 @@ import org.gradle.api.tasks.SourceSet; import org.gradle.api.tasks.TaskAction; +import javax.inject.Inject; + import java.io.File; /** @@ -54,14 +57,18 @@ public class LoggerUsageTask extends PrecommitTask { private FileCollection classpath; + private final Project project; - public LoggerUsageTask() { + @Inject + public LoggerUsageTask(Project project) { + super(project); setDescription("Runs LoggerUsageCheck on output directories of all source sets"); + this.project = project; } @TaskAction public void runLoggerUsageTask() { - LoggedExec.javaexec(getProject(), spec -> { + LoggedExec.javaexec(project, spec -> { spec.getMainClass().set("org.opensearch.test.loggerusage.OpenSearchLoggerUsageChecker"); spec.classpath(getClasspath()); getClassDirectories().forEach(spec::args); @@ -82,7 +89,7 @@ public void setClasspath(FileCollection classpath) { @SkipWhenEmpty @IgnoreEmptyDirectories public FileCollection getClassDirectories() { - return getProject().getExtensions() + return project.getExtensions() .getByType(JavaPluginExtension.class) .getSourceSets() .stream() @@ -93,7 +100,7 @@ public FileCollection getClassDirectories() { ) .map(sourceSet -> sourceSet.getOutput().getClassesDirs()) .reduce(FileCollection::plus) - .orElse(getProject().files()) + .orElse(project.files()) .filter(File::exists); } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/PomValidationTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/PomValidationTask.java index b76e0d6dd93cf..f7dea88cb2e30 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/PomValidationTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/PomValidationTask.java @@ -35,10 +35,13 @@ import org.apache.maven.model.Model; import org.apache.maven.model.io.xpp3.MavenXpp3Reader; import org.gradle.api.GradleException; +import org.gradle.api.Project; import org.gradle.api.file.RegularFileProperty; import org.gradle.api.tasks.InputFile; import org.gradle.api.tasks.TaskAction; +import javax.inject.Inject; + import java.io.FileReader; import java.util.Collection; import java.util.function.Consumer; @@ -46,10 +49,16 @@ public class PomValidationTask extends PrecommitTask { - private final RegularFileProperty pomFile = getProject().getObjects().fileProperty(); + private final RegularFileProperty pomFile; private boolean foundError; + @Inject + public PomValidationTask(Project project) { + super(project); + this.pomFile = project.getObjects().fileProperty(); + } + @InputFile public RegularFileProperty getPomFile() { return pomFile; diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/PrecommitTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/PrecommitTask.java index 52646206e4792..670614aa48087 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/PrecommitTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/PrecommitTask.java @@ -32,19 +32,28 @@ package org.opensearch.gradle.precommit; import org.gradle.api.DefaultTask; +import org.gradle.api.Project; import org.gradle.api.tasks.OutputFile; import org.gradle.api.tasks.TaskAction; +import javax.inject.Inject; + import java.io.File; import java.io.IOException; import java.nio.file.Files; import java.nio.file.StandardOpenOption; public class PrecommitTask extends DefaultTask { + private final Project project; + + @Inject + public PrecommitTask(Project project) { + this.project = project; + } @OutputFile public File getSuccessMarker() { - return new File(getProject().getBuildDir(), "markers/" + this.getName()); + return new File(project.getBuildDir(), "markers/" + this.getName()); } @TaskAction diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/TestingConventionsTasks.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/TestingConventionsTasks.java index d66b1f9d25cdd..9c1285914a03e 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/TestingConventionsTasks.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/TestingConventionsTasks.java @@ -38,6 +38,7 @@ import org.opensearch.gradle.util.Util; import org.gradle.api.DefaultTask; import org.gradle.api.NamedDomainObjectContainer; +import org.gradle.api.Project; import org.gradle.api.Task; import org.gradle.api.file.FileCollection; import org.gradle.api.file.FileTree; @@ -85,12 +86,15 @@ public class TestingConventionsTasks extends DefaultTask { private Map testClassNames; private final NamedDomainObjectContainer naming; + private final Project project; - public TestingConventionsTasks() { + @Inject + public TestingConventionsTasks(Project project) { setDescription("Tests various testing conventions"); // Run only after everything is compiled - GradleUtils.getJavaSourceSets(getProject()).all(sourceSet -> dependsOn(sourceSet.getOutput().getClassesDirs())); - naming = getProject().container(TestingConventionRule.class); + GradleUtils.getJavaSourceSets(project).all(sourceSet -> dependsOn(sourceSet.getOutput().getClassesDirs())); + this.naming = project.container(TestingConventionRule.class); + this.project = project; } @Inject @@ -100,38 +104,34 @@ protected Factory getPatternSetFactory() { @Input public Map> getClassFilesPerEnabledTask() { - return getProject().getTasks() - .withType(Test.class) - .stream() - .filter(Task::getEnabled) - .collect(Collectors.toMap(Task::getPath, task -> { - // See please https://docs.gradle.org/8.1/userguide/upgrading_version_8.html#test_task_default_classpath - final JvmTestSuite jvmTestSuite = JvmTestSuiteHelper.getDefaultTestSuite(getProject()).orElse(null); - if (jvmTestSuite != null) { - final PatternFilterable patternSet = getPatternSetFactory().create() - .include(task.getIncludes()) - .exclude(task.getExcludes()); - - final Set files = jvmTestSuite.getSources() - .getOutput() - .getClassesDirs() - .getAsFileTree() - .matching(patternSet) - .getFiles(); - - if (!files.isEmpty()) { - return files; - } + return project.getTasks().withType(Test.class).stream().filter(Task::getEnabled).collect(Collectors.toMap(Task::getPath, task -> { + // See please https://docs.gradle.org/8.1/userguide/upgrading_version_8.html#test_task_default_classpath + final JvmTestSuite jvmTestSuite = JvmTestSuiteHelper.getDefaultTestSuite(project).orElse(null); + if (jvmTestSuite != null) { + final PatternFilterable patternSet = getPatternSetFactory().create() + .include(task.getIncludes()) + .exclude(task.getExcludes()); + + final Set files = jvmTestSuite.getSources() + .getOutput() + .getClassesDirs() + .getAsFileTree() + .matching(patternSet) + .getFiles(); + + if (!files.isEmpty()) { + return files; } + } - return task.getCandidateClassFiles().getFiles(); - })); + return task.getCandidateClassFiles().getFiles(); + })); } @Input public Map getTestClassNames() { if (testClassNames == null) { - testClassNames = Util.getJavaTestSourceSet(getProject()) + testClassNames = Util.getJavaTestSourceSet(project) .get() .getOutput() .getClassesDirs() @@ -151,7 +151,7 @@ public NamedDomainObjectContainer getNaming() { @OutputFile public File getSuccessMarker() { - return new File(getProject().getBuildDir(), "markers/" + getName()); + return new File(project.getBuildDir(), "markers/" + getName()); } public void naming(Closure action) { @@ -160,7 +160,7 @@ public void naming(Closure action) { @Input public Set getMainClassNamedLikeTests() { - SourceSetContainer javaSourceSets = GradleUtils.getJavaSourceSets(getProject()); + SourceSetContainer javaSourceSets = GradleUtils.getJavaSourceSets(project); if (javaSourceSets.findByName(SourceSet.MAIN_SOURCE_SET_NAME) == null) { // some test projects don't have a main source set return Collections.emptySet(); @@ -195,7 +195,7 @@ public void doCheck() throws IOException { .stream() .collect(Collectors.toMap(Map.Entry::getValue, entry -> loadClassWithoutInitializing(entry.getKey(), isolatedClassLoader))); - final FileTree allTestClassFiles = getProject().files( + final FileTree allTestClassFiles = project.files( classes.values() .stream() .filter(isStaticClass.negate()) @@ -207,7 +207,7 @@ public void doCheck() throws IOException { final Map> classFilesPerTask = getClassFilesPerEnabledTask(); - final Set testSourceSetFiles = Util.getJavaTestSourceSet(getProject()).get().getRuntimeClasspath().getFiles(); + final Set testSourceSetFiles = Util.getJavaTestSourceSet(project).get().getRuntimeClasspath().getFiles(); final Map>> testClassesPerTask = classFilesPerTask.entrySet() .stream() .filter(entry -> testSourceSetFiles.containsAll(entry.getValue())) @@ -398,7 +398,7 @@ private boolean isAnnotated(Method method, Class annotation) { @Classpath public FileCollection getTestsClassPath() { - return Util.getJavaTestSourceSet(getProject()).get().getRuntimeClasspath(); + return Util.getJavaTestSourceSet(project).get().getRuntimeClasspath(); } private Map walkPathAndLoadClasses(File testRoot) { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java index 26d205eaf01d8..ab6201e02b37b 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java @@ -40,6 +40,7 @@ import org.opensearch.gradle.util.GradleUtils; import org.gradle.api.DefaultTask; import org.gradle.api.JavaVersion; +import org.gradle.api.Project; import org.gradle.api.artifacts.Configuration; import org.gradle.api.artifacts.Dependency; import org.gradle.api.file.FileCollection; @@ -107,7 +108,15 @@ public class ThirdPartyAuditTask extends DefaultTask { private FileCollection jdkJarHellClasspath; - private final Property targetCompatibility = getProject().getObjects().property(JavaVersion.class); + private final Project project; + + private final Property targetCompatibility; + + @Inject + public ThirdPartyAuditTask(Project project) { + this.project = project; + this.targetCompatibility = project.getObjects().property(JavaVersion.class); + } public boolean jarHellEnabled = true; @@ -124,7 +133,7 @@ public Property getTargetCompatibility() { @InputFiles @PathSensitive(PathSensitivity.NAME_ONLY) public Configuration getForbiddenAPIsConfiguration() { - return getProject().getConfigurations().getByName("forbiddenApisCliJar"); + return project.getConfigurations().getByName("forbiddenApisCliJar"); } @InputFile @@ -149,12 +158,12 @@ public void setJavaHome(String javaHome) { @Internal public File getJarExpandDir() { - return new File(new File(getProject().getBuildDir(), "precommit/thirdPartyAudit"), getName()); + return new File(new File(project.getBuildDir(), "precommit/thirdPartyAudit"), getName()); } @OutputFile public File getSuccessMarker() { - return new File(getProject().getBuildDir(), "markers/" + getName()); + return new File(project.getBuildDir(), "markers/" + getName()); } // We use compile classpath normalization here because class implementation changes are irrelevant for the purposes of jdk jar hell. @@ -213,10 +222,10 @@ public Set getJarsToScan() { // err on the side of scanning these to make sure we don't miss anything Spec reallyThirdParty = dep -> dep.getGroup() != null && dep.getGroup().startsWith("org.opensearch") == false; - Set jars = GradleUtils.getFiles(getProject(), getRuntimeConfiguration(), reallyThirdParty).getFiles(); + Set jars = GradleUtils.getFiles(project, getRuntimeConfiguration(), reallyThirdParty).getFiles(); Set compileOnlyConfiguration = GradleUtils.getFiles( - getProject(), - getProject().getConfigurations().getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME), + project, + project.getConfigurations().getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME), reallyThirdParty ).getFiles(); // don't scan provided dependencies that we already scanned, e.x. don't scan cores dependencies for every plugin @@ -229,8 +238,7 @@ public Set getJarsToScan() { @TaskAction public void runThirdPartyAudit() throws IOException { Set jars = getJarsToScan(); - - extractJars(jars); + Set extractedJars = extractJars(jars); final String forbiddenApisOutput = runForbiddenAPIsCli(); @@ -248,7 +256,7 @@ public void runThirdPartyAudit() throws IOException { Set jdkJarHellClasses = null; if (this.jarHellEnabled) { - jdkJarHellClasses = runJdkJarHellCheck(); + jdkJarHellClasses = runJdkJarHellCheck(extractedJars); } if (missingClassExcludes != null) { @@ -302,16 +310,26 @@ private void logForbiddenAPIsOutput(String forbiddenApisOutput) { getLogger().error("Forbidden APIs output:\n{}==end of forbidden APIs==", forbiddenApisOutput); } - private void extractJars(Set jars) { + /** + * Extract project jars to build directory as specified by getJarExpandDir. + * Handle multi release jars by keeping versions closest to `targetCompatibility` version. + * @param jars to extract to build dir + * @return File set of extracted jars + */ + private Set extractJars(Set jars) { + Set extractedJars = new TreeSet<>(); File jarExpandDir = getJarExpandDir(); // We need to clean up to make sure old dependencies don't linger - getProject().delete(jarExpandDir); + project.delete(jarExpandDir); jars.forEach(jar -> { - FileTree jarFiles = getProject().zipTree(jar); - getProject().copy(spec -> { + String jarPrefix = jar.getName().replace(".jar", ""); + File jarSubDir = new File(jarExpandDir, jarPrefix); + extractedJars.add(jarSubDir); + FileTree jarFiles = project.zipTree(jar); + project.copy(spec -> { spec.from(jarFiles); - spec.into(jarExpandDir); + spec.into(jarSubDir); // exclude classes from multi release jars spec.exclude("META-INF/versions/**"); }); @@ -328,9 +346,9 @@ private void extractJars(Set jars) { IntStream.rangeClosed( Integer.parseInt(JavaVersion.VERSION_1_9.getMajorVersion()), Integer.parseInt(targetCompatibility.get().getMajorVersion()) - ).forEach(majorVersion -> getProject().copy(spec -> { - spec.from(getProject().zipTree(jar)); - spec.into(jarExpandDir); + ).forEach(majorVersion -> project.copy(spec -> { + spec.from(project.zipTree(jar)); + spec.into(jarSubDir); String metaInfPrefix = "META-INF/versions/" + majorVersion; spec.include(metaInfPrefix + "/**"); // Drop the version specific prefix @@ -338,6 +356,8 @@ private void extractJars(Set jars) { spec.setIncludeEmptyDirs(false); })); }); + + return extractedJars; } private void assertNoJarHell(Set jdkJarHellClasses) { @@ -366,7 +386,7 @@ private String formatClassList(Set classList) { private String runForbiddenAPIsCli() throws IOException { ByteArrayOutputStream errorOut = new ByteArrayOutputStream(); - InjectedExecOps execOps = getProject().getObjects().newInstance(InjectedExecOps.class); + InjectedExecOps execOps = project.getObjects().newInstance(InjectedExecOps.class); ExecResult result = execOps.getExecOps().javaexec(spec -> { if (javaHome != null) { spec.setExecutable(javaHome + "/bin/java"); @@ -374,7 +394,7 @@ private String runForbiddenAPIsCli() throws IOException { spec.classpath( getForbiddenAPIsConfiguration(), getRuntimeConfiguration(), - getProject().getConfigurations().getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME) + project.getConfigurations().getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME) ); spec.jvmArgs("-Xmx1g"); spec.jvmArgs(LoggedExec.shortLivedArgs()); @@ -399,18 +419,22 @@ private String runForbiddenAPIsCli() throws IOException { return forbiddenApisOutput; } - private Set runJdkJarHellCheck() throws IOException { + /** + * Execute java with JDK_JAR_HELL_MAIN_CLASS against provided jars with OpenSearch core in the classpath. + * @param jars to scan for jarHell violations. + * @return standard out of jarHell process. + */ + private Set runJdkJarHellCheck(Set jars) throws IOException { ByteArrayOutputStream standardOut = new ByteArrayOutputStream(); - InjectedExecOps execOps = getProject().getObjects().newInstance(InjectedExecOps.class); + InjectedExecOps execOps = project.getObjects().newInstance(InjectedExecOps.class); ExecResult execResult = execOps.getExecOps().javaexec(spec -> { spec.classpath( jdkJarHellClasspath, getRuntimeConfiguration(), - getProject().getConfigurations().getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME) + project.getConfigurations().getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME) ); - spec.getMainClass().set(JDK_JAR_HELL_MAIN_CLASS); - spec.args(getJarExpandDir()); + spec.args(jars); spec.setIgnoreExitValue(true); if (javaHome != null) { spec.setExecutable(javaHome + "/bin/java"); @@ -428,9 +452,9 @@ private Set runJdkJarHellCheck() throws IOException { } private Configuration getRuntimeConfiguration() { - Configuration runtime = getProject().getConfigurations().findByName("runtimeClasspath"); + Configuration runtime = project.getConfigurations().findByName("runtimeClasspath"); if (runtime == null) { - return getProject().getConfigurations().getByName("testCompileClasspath"); + return project.getConfigurations().getByName("testCompileClasspath"); } return runtime; } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/ErrorReportingTestListener.java b/buildSrc/src/main/java/org/opensearch/gradle/test/ErrorReportingTestListener.java index aff9198e15772..4bdc75457ba75 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/ErrorReportingTestListener.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/ErrorReportingTestListener.java @@ -192,6 +192,10 @@ public Destination getDestination() { public String getMessage() { return message; } + + public long getLogTime() { + return System.currentTimeMillis(); + } }); } } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/GradleDistroTestTask.java b/buildSrc/src/main/java/org/opensearch/gradle/test/GradleDistroTestTask.java index fa417da1a1007..caac3ede98588 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/GradleDistroTestTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/GradleDistroTestTask.java @@ -34,9 +34,12 @@ import org.opensearch.gradle.vagrant.VagrantMachine; import org.opensearch.gradle.vagrant.VagrantShellTask; +import org.gradle.api.Project; import org.gradle.api.tasks.Input; import org.gradle.api.tasks.options.Option; +import javax.inject.Inject; + import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -49,6 +52,13 @@ public class GradleDistroTestTask extends VagrantShellTask { private String taskName; private String testClass; private List extraArgs = new ArrayList<>(); + private final Project project; + + @Inject + public GradleDistroTestTask(Project project) { + super(project); + this.project = project; + } public void setTaskName(String taskName) { this.taskName = taskName; @@ -84,17 +94,15 @@ protected List getLinuxScript() { } private List getScript(boolean isWindows) { - String cacheDir = getProject().getBuildDir() + "/gradle-cache"; + String cacheDir = project.getBuildDir() + "/gradle-cache"; StringBuilder line = new StringBuilder(); line.append(isWindows ? "& .\\gradlew " : "./gradlew "); line.append(taskName); line.append(" --project-cache-dir "); - line.append( - isWindows ? VagrantMachine.convertWindowsPath(getProject(), cacheDir) : VagrantMachine.convertLinuxPath(getProject(), cacheDir) - ); + line.append(isWindows ? VagrantMachine.convertWindowsPath(project, cacheDir) : VagrantMachine.convertLinuxPath(project, cacheDir)); line.append(" -S"); line.append(" --parallel"); - line.append(" -D'org.gradle.logging.level'=" + getProject().getGradle().getStartParameter().getLogLevel()); + line.append(" -D'org.gradle.logging.level'=" + project.getGradle().getStartParameter().getLogLevel()); if (testClass != null) { line.append(" --tests="); line.append(testClass); diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/RestIntegTestTask.java b/buildSrc/src/main/java/org/opensearch/gradle/test/RestIntegTestTask.java index aec31d02b9bee..474c04eabbcaf 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/RestIntegTestTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/RestIntegTestTask.java @@ -35,9 +35,12 @@ import groovy.lang.Closure; import org.opensearch.gradle.testclusters.StandaloneRestIntegTestTask; +import org.gradle.api.Project; import org.gradle.api.Task; import org.gradle.api.tasks.CacheableTask; +import javax.inject.Inject; + /** * Sub typed version of {@link StandaloneRestIntegTestTask} that is used to differentiate between plain standalone * integ test tasks based on {@link StandaloneRestIntegTestTask} and @@ -45,11 +48,19 @@ */ @CacheableTask public abstract class RestIntegTestTask extends StandaloneRestIntegTestTask implements TestSuiteConventionMappings { + private final Project project; + + @Inject + public RestIntegTestTask(Project project) { + super(project); + this.project = project; + } + @SuppressWarnings("rawtypes") @Override public Task configure(Closure closure) { final Task t = super.configure(closure); - applyConventionMapping(getProject(), getConventionMapping()); + applyConventionMapping(project, getConventionMapping()); return t; } } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/RestTestBasePlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/test/RestTestBasePlugin.java index ce5210482c055..24c4a46abfe29 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/RestTestBasePlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/RestTestBasePlugin.java @@ -55,7 +55,7 @@ public void apply(Project project) { .getExtensions() .getByName(TestClustersPlugin.EXTENSION_NAME); OpenSearchCluster cluster = testClusters.maybeCreate(restIntegTestTask.getName()); - restIntegTestTask.useCluster(cluster); + restIntegTestTask.useCluster(project, cluster); restIntegTestTask.include("**/*IT.class"); restIntegTestTask.systemProperty("tests.rest.load_packaged", Boolean.FALSE.toString()); if (System.getProperty(TESTS_REST_CLUSTER) == null) { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/TestTask.java b/buildSrc/src/main/java/org/opensearch/gradle/test/TestTask.java index f7511a2ac7f1c..abd40d2e0665a 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/TestTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/TestTask.java @@ -10,17 +10,27 @@ import groovy.lang.Closure; +import org.gradle.api.Project; import org.gradle.api.Task; import org.gradle.api.tasks.CacheableTask; import org.gradle.api.tasks.testing.Test; +import javax.inject.Inject; + @CacheableTask public abstract class TestTask extends Test implements TestSuiteConventionMappings { + private final Project project; + + @Inject + public TestTask(Project project) { + this.project = project; + } + @SuppressWarnings("rawtypes") @Override public Task configure(Closure closure) { final Task t = super.configure(closure); - applyConventionMapping(getProject(), getConventionMapping()); + applyConventionMapping(project, getConventionMapping()); return t; } } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestApiTask.java b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestApiTask.java index 485561a305291..4d6be4beaccf8 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestApiTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestApiTask.java @@ -74,16 +74,20 @@ */ public class CopyRestApiTask extends DefaultTask { private static final String REST_API_PREFIX = "rest-api-spec/api"; - final ListProperty includeCore = getProject().getObjects().listProperty(String.class); + final ListProperty includeCore; String sourceSetName; boolean skipHasRestTestCheck; Configuration coreConfig; Configuration additionalConfig; + private final Project project; private final PatternFilterable corePatternSet; - public CopyRestApiTask() { - corePatternSet = getPatternSetFactory().create(); + @Inject + public CopyRestApiTask(Project project) { + this.project = project; + this.corePatternSet = getPatternSetFactory().create(); + this.includeCore = project.getObjects().listProperty(String.class); } @Inject @@ -133,8 +137,8 @@ public FileTree getInputDir() { } ConfigurableFileCollection fileCollection = additionalConfig == null - ? getProject().files(coreFileTree) - : getProject().files(coreFileTree, additionalConfig.getAsFileTree()); + ? project.files(coreFileTree) + : project.files(coreFileTree, additionalConfig.getAsFileTree()); // if project has rest tests or the includes are explicitly configured execute the task, else NO-SOURCE due to the null input return projectHasYamlRestTests || includeCore.get().isEmpty() == false ? fileCollection.getAsFileTree() : null; @@ -210,7 +214,7 @@ private boolean projectHasYamlRestTests() { .anyMatch(p -> p.getFileName().toString().endsWith("yml")); } } catch (IOException e) { - throw new IllegalStateException(String.format("Error determining if this project [%s] has rest tests.", getProject()), e); + throw new IllegalStateException(String.format("Error determining if this project [%s] has rest tests.", project), e); } return false; } @@ -240,7 +244,6 @@ private File getTestOutputResourceDir() { } private Optional getSourceSet() { - Project project = getProject(); return project.getExtensions().findByType(JavaPluginExtension.class) == null ? Optional.empty() : Optional.ofNullable(GradleUtils.getJavaSourceSets(project).findByName(getSourceSetName())); diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestTestsTask.java b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestTestsTask.java index 0d5af7ca06b50..6f7c99889e3a2 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestTestsTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestTestsTask.java @@ -71,16 +71,20 @@ */ public class CopyRestTestsTask extends DefaultTask { private static final String REST_TEST_PREFIX = "rest-api-spec/test"; - final ListProperty includeCore = getProject().getObjects().listProperty(String.class); + final ListProperty includeCore; String sourceSetName; Configuration coreConfig; Configuration additionalConfig; + private final Project project; private final PatternFilterable corePatternSet; - public CopyRestTestsTask() { - corePatternSet = getPatternSetFactory().create(); + @Inject + public CopyRestTestsTask(Project project) { + this.project = project; + this.corePatternSet = getPatternSetFactory().create(); + this.includeCore = project.getObjects().listProperty(String.class); } @Inject @@ -123,8 +127,8 @@ public FileTree getInputDir() { } } ConfigurableFileCollection fileCollection = additionalConfig == null - ? getProject().files(coreFileTree) - : getProject().files(coreFileTree, additionalConfig.getAsFileTree()); + ? project.files(coreFileTree) + : project.files(coreFileTree, additionalConfig.getAsFileTree()); // copy tests only if explicitly requested return includeCore.get().isEmpty() == false || additionalConfig != null ? fileCollection.getAsFileTree() : null; @@ -178,7 +182,6 @@ void copy() { } private Optional getSourceSet() { - Project project = getProject(); return project.getExtensions().findByType(JavaPluginExtension.class) == null ? Optional.empty() : Optional.ofNullable(GradleUtils.getJavaSourceSets(project).findByName(getSourceSetName())); diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/StandaloneRestIntegTestTask.java b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/StandaloneRestIntegTestTask.java index ddcbf77b0d5e6..5b883f8068825 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/StandaloneRestIntegTestTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/StandaloneRestIntegTestTask.java @@ -36,6 +36,7 @@ import org.opensearch.gradle.FileSystemOperationsAware; import org.opensearch.gradle.test.Fixture; import org.opensearch.gradle.util.GradleUtils; +import org.gradle.api.Project; import org.gradle.api.Task; import org.gradle.api.provider.Provider; import org.gradle.api.services.internal.BuildServiceProvider; @@ -48,6 +49,8 @@ import org.gradle.internal.resources.ResourceLock; import org.gradle.internal.resources.SharedResource; +import javax.inject.Inject; + import java.lang.invoke.MethodHandles; import java.lang.invoke.MethodType; import java.util.ArrayList; @@ -67,7 +70,8 @@ public abstract class StandaloneRestIntegTestTask extends Test implements TestCl private Collection clusters = new HashSet<>(); private Closure beforeStart; - public StandaloneRestIntegTestTask() { + @Inject + public StandaloneRestIntegTestTask(Project project) { this.getOutputs() .doNotCacheIf( "Caching disabled for this task since it uses a cluster shared by other tasks", @@ -77,7 +81,7 @@ public StandaloneRestIntegTestTask() { * avoid any undesired behavior we simply disable the cache if we detect that this task uses a cluster shared between * multiple tasks. */ - t -> getProject().getTasks() + t -> project.getTasks() .withType(StandaloneRestIntegTestTask.class) .stream() .filter(task -> task != this) diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/TestClustersAware.java b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/TestClustersAware.java index e5d264121b0aa..a4cf0a6748005 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/TestClustersAware.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/TestClustersAware.java @@ -32,6 +32,7 @@ package org.opensearch.gradle.testclusters; import org.opensearch.gradle.Jdk; +import org.gradle.api.Project; import org.gradle.api.Task; import org.gradle.api.artifacts.Configuration; import org.gradle.api.tasks.Nested; @@ -44,8 +45,13 @@ public interface TestClustersAware extends Task { @Nested Collection getClusters(); + @Deprecated(forRemoval = true) default void useCluster(OpenSearchCluster cluster) { - if (cluster.getPath().equals(getProject().getPath()) == false) { + useCluster(getProject(), cluster); + } + + default void useCluster(Project project, OpenSearchCluster cluster) { + if (cluster.getPath().equals(project.getPath()) == false) { throw new TestClustersException("Task " + getPath() + " can't use test cluster from" + " another project " + cluster); } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java index f65e231cd2e50..c3b870e4ce5ad 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java @@ -43,6 +43,7 @@ import org.opensearch.gradle.SystemPropertyCommandLineArgumentProvider; import org.opensearch.gradle.docker.DockerSupportPlugin; import org.opensearch.gradle.docker.DockerSupportService; +import org.opensearch.gradle.docker.DockerSupportService.DockerComposeV2Availability; import org.opensearch.gradle.info.BuildParams; import org.opensearch.gradle.precommit.TestingConventionsTasks; import org.opensearch.gradle.util.GradleUtils; @@ -171,11 +172,8 @@ public void execute(Task task) { .findFirst(); composeExtension.getExecutable().set(dockerCompose.isPresent() ? dockerCompose.get() : "/usr/bin/docker"); - if (dockerSupport.get().getDockerAvailability().isComposeV2Available) { - composeExtension.getUseDockerComposeV2().set(true); - } else if (dockerSupport.get().getDockerAvailability().isComposeAvailable) { - composeExtension.getUseDockerComposeV2().set(false); - } + composeExtension.getUseDockerComposeV2() + .set(dockerSupport.get().getDockerAvailability().dockerComposeAvailability instanceof DockerComposeV2Availability); tasks.named("composeUp").configure(t -> { // Avoid running docker-compose tasks in parallel in CI due to some issues on certain Linux distributions @@ -232,8 +230,7 @@ private void maybeSkipTask(Provider dockerSupport, TaskPro private void maybeSkipTask(Provider dockerSupport, Task task) { task.onlyIf(spec -> { - boolean isComposeAvailable = dockerSupport.get().getDockerAvailability().isComposeV2Available - || dockerSupport.get().getDockerAvailability().isComposeAvailable; + boolean isComposeAvailable = dockerSupport.get().getDockerAvailability().isDockerComposeAvailable(); if (isComposeAvailable == false) { LOGGER.info("Task {} requires docker-compose but it is unavailable. Task will be skipped.", task.getPath()); } @@ -252,7 +249,7 @@ private void configureServiceInfoForTask( task.doFirst(new Action() { @Override public void execute(Task theTask) { - TestFixtureExtension extension = theTask.getProject().getExtensions().getByType(TestFixtureExtension.class); + TestFixtureExtension extension = fixtureProject.getExtensions().getByType(TestFixtureExtension.class); fixtureProject.getExtensions() .getByType(ComposeExtension.class) diff --git a/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantShellTask.java b/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantShellTask.java index ca1b95183505f..665f690b8b146 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantShellTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantShellTask.java @@ -33,9 +33,12 @@ package org.opensearch.gradle.vagrant; import org.gradle.api.DefaultTask; +import org.gradle.api.Project; import org.gradle.api.tasks.Input; import org.gradle.api.tasks.TaskAction; +import javax.inject.Inject; + import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -55,13 +58,16 @@ public abstract class VagrantShellTask extends DefaultTask { private final VagrantExtension extension; private final VagrantMachine service; private UnaryOperator progressHandler = UnaryOperator.identity(); + private final Project project; - public VagrantShellTask() { - extension = getProject().getExtensions().findByType(VagrantExtension.class); - if (extension == null) { + @Inject + public VagrantShellTask(Project project) { + this.project = project; + this.extension = project.getExtensions().findByType(VagrantExtension.class); + if (this.extension == null) { throw new IllegalStateException("opensearch.vagrant-base must be applied to create " + getClass().getName()); } - service = getProject().getExtensions().getByType(VagrantMachine.class); + this.service = project.getExtensions().getByType(VagrantMachine.class); } @Input @@ -81,14 +87,14 @@ public void setProgressHandler(UnaryOperator progressHandler) { @TaskAction public void runScript() { - String rootDir = getProject().getRootDir().toString(); + String rootDir = project.getRootDir().toString(); if (extension.isWindowsVM()) { service.execute(spec -> { spec.setCommand("winrm"); List script = new ArrayList<>(); script.add("try {"); - script.add("cd " + convertWindowsPath(getProject(), rootDir)); + script.add("cd " + convertWindowsPath(project, rootDir)); extension.getVmEnv().forEach((k, v) -> script.add("$Env:" + k + " = \"" + v + "\"")); script.addAll(getWindowsScript().stream().map(s -> " " + s).collect(Collectors.toList())); script.addAll( @@ -111,7 +117,7 @@ public void runScript() { List script = new ArrayList<>(); script.add("sudo bash -c '"); // start inline bash script script.add("pwd"); - script.add("cd " + convertLinuxPath(getProject(), rootDir)); + script.add("cd " + convertLinuxPath(project, rootDir)); extension.getVmEnv().forEach((k, v) -> script.add("export " + k + "=" + v)); script.addAll(getLinuxScript()); script.add("'"); // end inline bash script diff --git a/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle b/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle index 3db2a6e7c2733..83bec727b1502 100644 --- a/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle +++ b/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle @@ -17,7 +17,7 @@ repositories { } dependencies { - implementation "org.apache.logging.log4j:log4j-core:2.24.2" + implementation "org.apache.logging.log4j:log4j-core:2.24.3" } ["0.0.1", "0.0.2"].forEach { v -> diff --git a/client/client-benchmark-noop-api-plugin/build.gradle b/client/client-benchmark-noop-api-plugin/build.gradle index 8e4f40c096851..feec78547edb6 100644 --- a/client/client-benchmark-noop-api-plugin/build.gradle +++ b/client/client-benchmark-noop-api-plugin/build.gradle @@ -33,9 +33,9 @@ group = 'org.opensearch.plugin' apply plugin: 'opensearch.opensearchplugin' opensearchplugin { - name 'client-benchmark-noop-api' - description 'Stubbed out OpenSearch actions that can be used for client-side benchmarking' - classname 'org.opensearch.plugin.noop.NoopPlugin' + name = 'client-benchmark-noop-api' + description = 'Stubbed out OpenSearch actions that can be used for client-side benchmarking' + classname = 'org.opensearch.plugin.noop.NoopPlugin' } // Not published so no need to assemble diff --git a/distribution/build.gradle b/distribution/build.gradle index a323dd15ed9cf..572000d64d5a8 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -150,7 +150,7 @@ void copyModule(TaskProvider copyTask, Project module) { dependsOn moduleConfig from({ zipTree(moduleConfig.singleFile) }) { - includeEmptyDirs false + includeEmptyDirs = false // these are handled separately in the log4j config tasks below exclude '*/config/log4j2.properties' diff --git a/distribution/docker/build.gradle b/distribution/docker/build.gradle index ad8678c608b54..fa753623ce204 100644 --- a/distribution/docker/build.gradle +++ b/distribution/docker/build.gradle @@ -177,7 +177,7 @@ tasks.named("preProcessFixture").configure { } doLast { // tests expect to have an empty repo - project.delete( + delete( "${buildDir}/repo" ) createAndSetWritable( @@ -272,8 +272,8 @@ subprojects { Project subProject -> } artifacts.add('default', file(tarFile)) { - type 'tar' - name artifactName + type = 'tar' + name = artifactName builtBy exportTaskName } diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index 659b25129b23c..113ab8aced60b 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -63,7 +63,7 @@ import java.util.regex.Pattern */ plugins { - id "com.netflix.nebula.ospackage-base" version "11.10.0" + id "com.netflix.nebula.ospackage-base" version "11.10.1" } void addProcessFilesTask(String type, boolean jdk) { @@ -111,21 +111,21 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) { OS.current().equals(OS.WINDOWS) == false } dependsOn "process'${jdk ? '' : 'NoJdk'}${type.capitalize()}Files" - packageName "opensearch" + packageName = "opensearch" if (type == 'deb') { if (architecture == 'x64') { - arch('amd64') + arch = 'amd64' } else { assert architecture == 'arm64' : architecture - arch('arm64') + arch = 'arm64' } } else { assert type == 'rpm' : type if (architecture == 'x64') { - arch('x86_64') + arch = 'x86_64' } else { assert architecture == 'arm64' : architecture - arch('aarch64') + arch = 'aarch64' } } // Follow opensearch's file naming convention @@ -224,8 +224,8 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) { } into('/etc') permissionGroup 'opensearch' - includeEmptyDirs true - createDirectoryEntry true + includeEmptyDirs = true + createDirectoryEntry = true include("opensearch") // empty dir, just to add directory entry include("opensearch/jvm.options.d") // empty dir, just to add directory entry } @@ -238,8 +238,8 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) { unix 0660 } permissionGroup 'opensearch' - includeEmptyDirs true - createDirectoryEntry true + includeEmptyDirs = true + createDirectoryEntry = true fileType CONFIG | NOREPLACE } String envFile = expansionsForDistribution(type, jdk)['path.env'] @@ -298,8 +298,8 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) { into(file.parent) { from "${packagingFiles}/${file.parent}" include file.name - includeEmptyDirs true - createDirectoryEntry true + includeEmptyDirs = true + createDirectoryEntry = true user u permissionGroup g dirPermissions { @@ -320,13 +320,13 @@ apply plugin: 'com.netflix.nebula.ospackage-base' // this is package indepdendent configuration ospackage { - maintainer 'OpenSearch Team ' - summary 'Distributed RESTful search engine built for the cloud' - packageDescription ''' + maintainer ='OpenSearch Team ' + summary = 'Distributed RESTful search engine built for the cloud' + packageDescription = ''' Reference documentation can be found at https://github.com/opensearch-project/OpenSearch '''.stripIndent().trim() - url 'https://github.com/opensearch-project/OpenSearch' + url = 'https://github.com/opensearch-project/OpenSearch' // signing setup if (project.hasProperty('signing.password') && BuildParams.isSnapshotBuild() == false) { @@ -340,10 +340,10 @@ ospackage { // version found on oldest supported distro, centos-6 requires('coreutils', '8.4', GREATER | EQUAL) - fileMode 0644 - dirMode 0755 - user 'root' - permissionGroup 'root' + fileMode = 0644 + dirMode = 0755 + user = 'root' + permissionGroup = 'root' into '/usr/share/opensearch' } @@ -357,7 +357,7 @@ Closure commonDebConfig(boolean jdk, String architecture) { customFields['License'] = 'ASL-2.0' archiveVersion = project.version.replace('-', '~') - packageGroup 'web' + packageGroup = 'web' // versions found on oldest supported distro, centos-6 requires('bash', '4.1', GREATER | EQUAL) @@ -394,24 +394,24 @@ Closure commonRpmConfig(boolean jdk, String architecture) { return { configure(commonPackageConfig('rpm', jdk, architecture)) - license 'ASL-2.0' + license = 'ASL-2.0' - packageGroup 'Application/Internet' + packageGroup = 'Application/Internet' requires '/bin/bash' obsoletes packageName, '7.0.0', Flags.LESS prefix '/usr' - packager 'OpenSearch' + packager = 'OpenSearch' archiveVersion = project.version.replace('-', '_') release = '1' - os 'LINUX' - distribution 'OpenSearch' - vendor 'OpenSearch' + os = 'LINUX' + distribution = 'OpenSearch' + vendor = 'OpenSearch' // TODO ospackage doesn't support icon but we used to have one // without this the rpm will have parent dirs of any files we copy in, eg /etc/opensearch - addParentDirs false + addParentDirs = false } } diff --git a/doc-tools/build.gradle b/doc-tools/build.gradle index e6ace21420dda..9639c7d7048d6 100644 --- a/doc-tools/build.gradle +++ b/doc-tools/build.gradle @@ -3,8 +3,8 @@ plugins { } base { - group 'org.opensearch' - version '1.0.0-SNAPSHOT' + group = 'org.opensearch' + version = '1.0.0-SNAPSHOT' } repositories { diff --git a/doc-tools/missing-doclet/build.gradle b/doc-tools/missing-doclet/build.gradle index 114ccc948951a..c3c951fbcaf47 100644 --- a/doc-tools/missing-doclet/build.gradle +++ b/doc-tools/missing-doclet/build.gradle @@ -2,8 +2,8 @@ plugins { id 'java-library' } -group 'org.opensearch' -version '1.0.0-SNAPSHOT' +group = 'org.opensearch' +version = '1.0.0-SNAPSHOT' tasks.withType(JavaCompile) { options.compilerArgs += ["--release", targetCompatibility.toString()] diff --git a/gradle/ide.gradle b/gradle/ide.gradle index ea353f8d92bdd..82b3911f1618b 100644 --- a/gradle/ide.gradle +++ b/gradle/ide.gradle @@ -16,7 +16,7 @@ import org.jetbrains.gradle.ext.JUnit buildscript { repositories { maven { - url "https://plugins.gradle.org/m2/" + url = "https://plugins.gradle.org/m2/" } } dependencies { diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index c9305ce9d1bc9..1f0aa003cd83c 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -1,6 +1,6 @@ [versions] opensearch = "2.19.0" -lucene = "9.12.0" +lucene = "9.12.1" bundled_jdk_vendor = "adoptium" bundled_jdk = "21.0.5+11" @@ -27,13 +27,14 @@ google_http_client = "1.44.1" google_auth = "1.29.0" tdigest = "3.2" hdrhistogram = "2.2.2" -grpc = "1.68.0" +grpc = "1.68.2" # when updating the JNA version, also update the version in buildSrc/build.gradle jna = "5.13.0" netty = "4.1.115.Final" joda = "2.12.7" +roaringbitmap = "1.3.0" # project reactor reactor_netty = "1.1.23" @@ -75,9 +76,58 @@ jzlib = "1.1.3" resteasy = "6.2.4.Final" # opentelemetry dependencies -opentelemetry = "1.41.0" -opentelemetrysemconv = "1.27.0-alpha" +opentelemetry = "1.46.0" +opentelemetrysemconv = "1.29.0-alpha" # arrow dependencies arrow = "17.0.0" flatbuffers = "2.0.0" + +[libraries] +hdrhistogram = { group = "org.hdrhistogram", name = "HdrHistogram", version.ref = "hdrhistogram" } +jakartaannotation = { group = "jakarta.annotation", name = "jakarta.annotation-api", version.ref = "jakarta_annotation" } +jodatime = { group = "joda-time", name = "joda-time", version.ref = "joda" } +jna = { group = "net.java.dev.jna", name = "jna", version.ref = "jna" } +jtscore = { group = "org.locationtech.jts", name = "jts-core", version.ref = "jts" } +jzlib = { group = "com.jcraft", name = "jzlib", version.ref = "jzlib" } +log4japi = { group = "org.apache.logging.log4j", name = "log4j-api", version.ref = "log4j" } +log4jjul = { group = "org.apache.logging.log4j", name = "log4j-jul", version.ref = "log4j" } +log4jcore = { group = "org.apache.logging.log4j", name = "log4j-core", version.ref = "log4j" } +lucene-core = { group = "org.apache.lucene", name = "lucene-core", version.ref = "lucene" } +lucene-analysis-common = { group = "org.apache.lucene", name = "lucene-analysis-common", version.ref = "lucene" } +lucene-backward-codecs = { group = "org.apache.lucene", name = "lucene-backward-codecs", version.ref = "lucene" } +lucene-grouping = { group = "org.apache.lucene", name = "lucene-grouping", version.ref = "lucene" } +lucene-highlighter = { group = "org.apache.lucene", name = "lucene-highlighter", version.ref = "lucene" } +lucene-join = { group = "org.apache.lucene", name = "lucene-join", version.ref = "lucene" } +lucene-memory = { group = "org.apache.lucene", name = "lucene-memory", version.ref = "lucene" } +lucene-misc = { group = "org.apache.lucene", name = "lucene-misc", version.ref = "lucene" } +lucene-queries = { group = "org.apache.lucene", name = "lucene-queries", version.ref = "lucene" } +lucene-queryparser = { group = "org.apache.lucene", name = "lucene-queryparser", version.ref = "lucene" } +lucene-sandbox = { group = "org.apache.lucene", name = "lucene-sandbox", version.ref = "lucene" } +lucene-spatial-extras = { group = "org.apache.lucene", name = "lucene-spatial-extras", version.ref = "lucene" } +lucene-spatial3d = { group = "org.apache.lucene", name = "lucene-spatial3d", version.ref = "lucene" } +lucene-suggest = { group = "org.apache.lucene", name = "lucene-suggest", version.ref = "lucene" } +protobuf = { group = "com.google.protobuf", name = "protobuf-java", version.ref = "protobuf" } +reactivestreams = { group = "io.projectreactor", name = "reactor-core", version.ref = "reactor" } +reactorcore = { group = "org.reactivestreams", name = "reactive-streams", version.ref = "reactivestreams" } +roaringbitmap = { group = "org.roaringbitmap", name = "RoaringBitmap", version.ref = "roaringbitmap" } +spatial4j = { group = "org.locationtech.spatial4j", name = "spatial4j", version.ref = "spatial4j" } +tdigest = { group = "com.tdunning", name = "t-digest", version.ref = "tdigest" } + +[bundles] +lucene = [ + "lucene-core", + "lucene-analysis-common", + "lucene-backward-codecs", + "lucene-grouping", + "lucene-highlighter", + "lucene-join", + "lucene-memory", + "lucene-misc", + "lucene-queries", + "lucene-queryparser", + "lucene-sandbox", + "lucene-spatial-extras", + "lucene-spatial3d", + "lucene-suggest" +] diff --git a/gradle/missing-javadoc.gradle b/gradle/missing-javadoc.gradle index 6531bf924aaa9..79877ed820d52 100644 --- a/gradle/missing-javadoc.gradle +++ b/gradle/missing-javadoc.gradle @@ -64,8 +64,8 @@ allprojects { tasks.register('missingJavadoc', MissingJavadocTask) { - description "This task validates and generates Javadoc API documentation for the main source code." - group "documentation" + description = "This task validates and generates Javadoc API documentation for the main source code." + group = "documentation" taskResources = resources dependsOn sourceSets.main.compileClasspath @@ -241,11 +241,18 @@ class MissingJavadocTask extends DefaultTask { @PathSensitive(PathSensitivity.RELATIVE) def taskResources + Project project + // See please https://docs.gradle.org/8.11/userguide/service_injection.html#execoperations interface InjectedExecOps { @Inject ExecOperations getExecOps() } + @Inject + MissingJavadocTask(Project project) { + this.project = project + } + /** Utility method to recursively collect all tasks with same name like this one that we depend on */ private Set findRenderTasksInDependencies() { Set found = [] @@ -364,7 +371,7 @@ class MissingJavadocTask extends DefaultTask { // force locale to be "en_US" (fix for: https://bugs.openjdk.java.net/browse/JDK-8222793) args += [ "-J-Duser.language=en", "-J-Duser.country=US" ] - ignoreExitValue true + ignoreExitValue = true } } diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index ec480eaeb61ef..8b3d2296213c2 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -11,7 +11,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-8.11.1-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.12-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=89d4e70e4e84e2d2dfbb63e4daa53e21b25017cc70c37e4eea31ee51fb15098a +distributionSha256Sum=7ebdac923867a3cec0098302416d1e3c6c0c729fc4e2e05c10637a8af33a76c5 diff --git a/libs/common/build.gradle b/libs/common/build.gradle index 60bf488833393..2bf2dbb803d9f 100644 --- a/libs/common/build.gradle +++ b/libs/common/build.gradle @@ -92,7 +92,7 @@ if (BuildParams.runtimeJavaVersion >= JavaVersion.VERSION_20) { } tasks.register('roundableSimdTest', Test) { - group 'verification' + group = 'verification' include '**/RoundableTests.class' systemProperty 'opensearch.experimental.feature.simd.rounding.enabled', 'forced' } diff --git a/libs/core/licenses/lucene-core-9.12.0.jar.sha1 b/libs/core/licenses/lucene-core-9.12.0.jar.sha1 deleted file mode 100644 index e55f896dedb63..0000000000000 --- a/libs/core/licenses/lucene-core-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fdb055d569bb20bfce9618fe2b01c29bab7f290c \ No newline at end of file diff --git a/libs/core/licenses/lucene-core-9.12.1.jar.sha1 b/libs/core/licenses/lucene-core-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..2521c91a81d64 --- /dev/null +++ b/libs/core/licenses/lucene-core-9.12.1.jar.sha1 @@ -0,0 +1 @@ +91447c90c1180122142773b5baddaf8547124794 \ No newline at end of file diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java index 564c678ce3f0d..f7b76805c4b0c 100644 --- a/libs/core/src/main/java/org/opensearch/Version.java +++ b/libs/core/src/main/java/org/opensearch/Version.java @@ -100,6 +100,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_1_3_18 = new Version(1031899, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_1_3_19 = new Version(1031999, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_1_3_20 = new Version(1032099, org.apache.lucene.util.Version.LUCENE_8_10_1); + public static final Version V_1_3_21 = new Version(1032199, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_2_0_0 = new Version(2000099, org.apache.lucene.util.Version.LUCENE_9_1_0); public static final Version V_2_0_1 = new Version(2000199, org.apache.lucene.util.Version.LUCENE_9_1_0); public static final Version V_2_0_2 = new Version(2000299, org.apache.lucene.util.Version.LUCENE_9_1_0); @@ -142,8 +143,8 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_17_1 = new Version(2170199, org.apache.lucene.util.Version.LUCENE_9_11_1); public static final Version V_2_17_2 = new Version(2170299, org.apache.lucene.util.Version.LUCENE_9_11_1); public static final Version V_2_18_0 = new Version(2180099, org.apache.lucene.util.Version.LUCENE_9_12_0); - public static final Version V_2_18_1 = new Version(2180199, org.apache.lucene.util.Version.LUCENE_9_12_0); - public static final Version V_2_19_0 = new Version(2190099, org.apache.lucene.util.Version.LUCENE_9_12_0); + public static final Version V_2_18_1 = new Version(2180199, org.apache.lucene.util.Version.LUCENE_9_12_1); + public static final Version V_2_19_0 = new Version(2190099, org.apache.lucene.util.Version.LUCENE_9_12_1); public static final Version CURRENT = V_2_19_0; public static Version fromId(int id) { diff --git a/modules/aggs-matrix-stats/build.gradle b/modules/aggs-matrix-stats/build.gradle index dd3aee61f7664..72d6c36af133e 100644 --- a/modules/aggs-matrix-stats/build.gradle +++ b/modules/aggs-matrix-stats/build.gradle @@ -30,8 +30,8 @@ apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - description 'Adds aggregations whose input are a list of numeric fields and output includes a matrix.' - classname 'org.opensearch.search.aggregations.matrix.MatrixAggregationPlugin' + description = 'Adds aggregations whose input are a list of numeric fields and output includes a matrix.' + classname = 'org.opensearch.search.aggregations.matrix.MatrixAggregationPlugin' hasClientJar = true } diff --git a/modules/analysis-common/build.gradle b/modules/analysis-common/build.gradle index be0acf7218c1e..17a2ff5eabf57 100644 --- a/modules/analysis-common/build.gradle +++ b/modules/analysis-common/build.gradle @@ -31,8 +31,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'Adds "built in" analyzers to OpenSearch.' - classname 'org.opensearch.analysis.common.CommonAnalysisPlugin' + description = 'Adds "built in" analyzers to OpenSearch.' + classname = 'org.opensearch.analysis.common.CommonAnalysisPlugin' extendedPlugins = ['lang-painless'] } diff --git a/modules/build.gradle b/modules/build.gradle index 126bf0c8870ac..0c69a43af0509 100644 --- a/modules/build.gradle +++ b/modules/build.gradle @@ -35,7 +35,7 @@ configure(subprojects.findAll { it.parent.path == project.path }) { opensearchplugin { // for local OpenSearch plugins, the name of the plugin is the same as the directory - name project.name + name = project.name } if (project.file('src/main/packaging').exists()) { diff --git a/modules/cache-common/build.gradle b/modules/cache-common/build.gradle index 98cdec83b9ad1..996c47b26b4d9 100644 --- a/modules/cache-common/build.gradle +++ b/modules/cache-common/build.gradle @@ -9,8 +9,8 @@ apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'Module for caches which are optional and do not require additional security permission' - classname 'org.opensearch.cache.common.tier.TieredSpilloverCachePlugin' + description = 'Module for caches which are optional and do not require additional security permission' + classname = 'org.opensearch.cache.common.tier.TieredSpilloverCachePlugin' } test { diff --git a/modules/geo/build.gradle b/modules/geo/build.gradle index 7ab6f80b65ca2..dc135ce7a4e35 100644 --- a/modules/geo/build.gradle +++ b/modules/geo/build.gradle @@ -31,8 +31,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'Plugin for geospatial features in OpenSearch. Registering the geo_shape and aggregations on GeoShape and GeoPoint' - classname 'org.opensearch.geo.GeoModulePlugin' + description = 'Plugin for geospatial features in OpenSearch. Registering the geo_shape and aggregations on GeoShape and GeoPoint' + classname = 'org.opensearch.geo.GeoModulePlugin' } restResources { diff --git a/modules/ingest-common/build.gradle b/modules/ingest-common/build.gradle index 34201069d7b7b..2d25e83233494 100644 --- a/modules/ingest-common/build.gradle +++ b/modules/ingest-common/build.gradle @@ -31,8 +31,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'Module for ingest processors that do not require additional security permissions or have large dependencies and resources' - classname 'org.opensearch.ingest.common.IngestCommonPlugin' + description = 'Module for ingest processors that do not require additional security permissions or have large dependencies and resources' + classname = 'org.opensearch.ingest.common.IngestCommonPlugin' extendedPlugins = ['lang-painless'] } diff --git a/modules/ingest-geoip/build.gradle b/modules/ingest-geoip/build.gradle index 39f0995900059..da11feb077808 100644 --- a/modules/ingest-geoip/build.gradle +++ b/modules/ingest-geoip/build.gradle @@ -34,8 +34,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'Ingest processor that uses looksup geo data based on ip adresses using the Maxmind geo database' - classname 'org.opensearch.ingest.geoip.IngestGeoIpPlugin' + description = 'Ingest processor that uses looksup geo data based on ip adresses using the Maxmind geo database' + classname = 'org.opensearch.ingest.geoip.IngestGeoIpPlugin' } dependencies { diff --git a/modules/ingest-user-agent/build.gradle b/modules/ingest-user-agent/build.gradle index a3752ad1c7f7e..12a252711163d 100644 --- a/modules/ingest-user-agent/build.gradle +++ b/modules/ingest-user-agent/build.gradle @@ -30,8 +30,8 @@ apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - description 'Ingest processor that extracts information from a user agent' - classname 'org.opensearch.ingest.useragent.IngestUserAgentPlugin' + description = 'Ingest processor that extracts information from a user agent' + classname = 'org.opensearch.ingest.useragent.IngestUserAgentPlugin' } restResources { diff --git a/modules/lang-expression/build.gradle b/modules/lang-expression/build.gradle index 0070923dc4be5..c1f3c859034ee 100644 --- a/modules/lang-expression/build.gradle +++ b/modules/lang-expression/build.gradle @@ -31,8 +31,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'Lucene expressions integration for OpenSearch' - classname 'org.opensearch.script.expression.ExpressionPlugin' + description = 'Lucene expressions integration for OpenSearch' + classname = 'org.opensearch.script.expression.ExpressionPlugin' } dependencies { diff --git a/modules/lang-expression/licenses/lucene-expressions-9.12.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.12.0.jar.sha1 deleted file mode 100644 index 476049a66cc08..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5ba843374a0aab3dfe0b11cb28b251844d85bf5b \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.12.1.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..9e0a5c2d7df21 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.12.1.jar.sha1 @@ -0,0 +1 @@ +667ee99f31c8e42eac70b0adcf8deb4232935430 \ No newline at end of file diff --git a/modules/lang-mustache/build.gradle b/modules/lang-mustache/build.gradle index cb4f496201087..e364660fd7c5b 100644 --- a/modules/lang-mustache/build.gradle +++ b/modules/lang-mustache/build.gradle @@ -32,8 +32,8 @@ apply plugin: 'opensearch.java-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'Mustache scripting integration for OpenSearch' - classname 'org.opensearch.script.mustache.MustachePlugin' + description = 'Mustache scripting integration for OpenSearch' + classname = 'org.opensearch.script.mustache.MustachePlugin' hasClientJar = true // For the template apis and query } diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index 28e75a536106f..1147ca249f4dd 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -36,8 +36,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'An easy, safe and fast scripting language for OpenSearch' - classname 'org.opensearch.painless.PainlessPlugin' + description = 'An easy, safe and fast scripting language for OpenSearch' + classname = 'org.opensearch.painless.PainlessPlugin' } ext { diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessPlugin.java b/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessPlugin.java index 2217497ebfca0..f733dcafb117a 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessPlugin.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessPlugin.java @@ -66,6 +66,7 @@ import org.opensearch.script.ScriptContext; import org.opensearch.script.ScriptEngine; import org.opensearch.script.ScriptService; +import org.opensearch.script.UpdateScript; import org.opensearch.search.aggregations.pipeline.MovingFunctionScript; import org.opensearch.threadpool.ThreadPool; import org.opensearch.watcher.ResourceWatcherService; @@ -109,6 +110,11 @@ public final class PainlessPlugin extends Plugin implements ScriptPlugin, Extens ingest.add(WhitelistLoader.loadFromResourceFiles(Whitelist.class, "org.opensearch.ingest.txt")); map.put(IngestScript.CONTEXT, ingest); + // Functions available to update scripts + List update = new ArrayList<>(Whitelist.BASE_WHITELISTS); + update.add(WhitelistLoader.loadFromResourceFiles(Whitelist.class, "org.opensearch.update.txt")); + map.put(UpdateScript.CONTEXT, update); + // Functions available to derived fields List derived = new ArrayList<>(Whitelist.BASE_WHITELISTS); derived.add(WhitelistLoader.loadFromResourceFiles(Whitelist.class, "org.opensearch.derived.txt")); diff --git a/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/org.opensearch.update.txt b/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/org.opensearch.update.txt new file mode 100644 index 0000000000000..144614b3862b0 --- /dev/null +++ b/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/org.opensearch.update.txt @@ -0,0 +1,14 @@ +# +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. +# + +# This file contains an allowlist for the update scripts + +class java.lang.String { + String org.opensearch.painless.api.Augmentation sha1() + String org.opensearch.painless.api.Augmentation sha256() +} diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/15_update.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/15_update.yml index cb118ed9d562f..e0f3068810ed8 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/15_update.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/15_update.yml @@ -123,3 +123,39 @@ - match: { error.root_cause.0.type: "illegal_argument_exception" } - match: { error.type: "illegal_argument_exception" } - match: { error.reason: "Iterable object is self-referencing itself" } + +# update script supports java.lang.String.sha1() and java.lang.String.sha256() methods +# related issue: https://github.com/opensearch-project/OpenSearch/issues/16423 +--- +"Update script supports sha1() and sha256() method for strings": + - skip: + version: " - 2.18.99" + reason: "introduced in 2.19.0" + - do: + index: + index: test_1 + id: 1 + body: + foo: bar + + - do: + update: + index: test_1 + id: 1 + body: + script: + lang: painless + source: "ctx._source.foo_sha1 = ctx._source.foo.sha1();ctx._source.foo_sha256 = ctx._source.foo.sha256();" + + - match: { _index: test_1 } + - match: { _id: "1" } + - match: { _version: 2 } + + - do: + get: + index: test_1 + id: 1 + + - match: { _source.foo: bar } + - match: { _source.foo_sha1: "62cdb7020ff920e5aa642c3d4066950dd1f01f4d" } + - match: { _source.foo_sha256: "fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9" } diff --git a/modules/mapper-extras/build.gradle b/modules/mapper-extras/build.gradle index 08758c7ab2bda..39e47e328ee23 100644 --- a/modules/mapper-extras/build.gradle +++ b/modules/mapper-extras/build.gradle @@ -31,8 +31,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.java-rest-test' opensearchplugin { - description 'Adds advanced field mappers' - classname 'org.opensearch.index.mapper.MapperExtrasPlugin' + description = 'Adds advanced field mappers' + classname = 'org.opensearch.index.mapper.MapperExtrasPlugin' hasClientJar = true } diff --git a/modules/opensearch-dashboards/build.gradle b/modules/opensearch-dashboards/build.gradle index f76ca739faf81..072567f6bd684 100644 --- a/modules/opensearch-dashboards/build.gradle +++ b/modules/opensearch-dashboards/build.gradle @@ -30,8 +30,8 @@ apply plugin: 'opensearch.java-rest-test' opensearchplugin { - description 'Plugin exposing APIs for OpenSearch Dashboards system indices' - classname 'org.opensearch.dashboards.OpenSearchDashboardsPlugin' + description = 'Plugin exposing APIs for OpenSearch Dashboards system indices' + classname = 'org.opensearch.dashboards.OpenSearchDashboardsPlugin' } dependencies { diff --git a/modules/parent-join/build.gradle b/modules/parent-join/build.gradle index 3c71a731e6a6a..6a522d596c4b8 100644 --- a/modules/parent-join/build.gradle +++ b/modules/parent-join/build.gradle @@ -31,8 +31,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'This module adds the support parent-child queries and aggregations' - classname 'org.opensearch.join.ParentJoinPlugin' + description = 'This module adds the support parent-child queries and aggregations' + classname = 'org.opensearch.join.ParentJoinPlugin' hasClientJar = true } diff --git a/modules/percolator/build.gradle b/modules/percolator/build.gradle index 1738de5a55748..55ccd14198389 100644 --- a/modules/percolator/build.gradle +++ b/modules/percolator/build.gradle @@ -31,8 +31,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'Percolator module adds capability to index queries and query these queries by specifying documents' - classname 'org.opensearch.percolator.PercolatorPlugin' + description = 'Percolator module adds capability to index queries and query these queries by specifying documents' + classname = 'org.opensearch.percolator.PercolatorPlugin' hasClientJar = true } diff --git a/modules/rank-eval/build.gradle b/modules/rank-eval/build.gradle index 2b1d1e9abc4b4..59c658bacab97 100644 --- a/modules/rank-eval/build.gradle +++ b/modules/rank-eval/build.gradle @@ -31,8 +31,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'The Rank Eval module adds APIs to evaluate ranking quality.' - classname 'org.opensearch.index.rankeval.RankEvalPlugin' + description = 'The Rank Eval module adds APIs to evaluate ranking quality.' + classname = 'org.opensearch.index.rankeval.RankEvalPlugin' hasClientJar = true } diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index 37526a924da73..a50ec8ceb3361 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -40,8 +40,8 @@ apply plugin: 'opensearch.java-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'The Reindex module adds APIs to reindex from one index to another or update documents in place.' - classname 'org.opensearch.index.reindex.ReindexPlugin' + description = 'The Reindex module adds APIs to reindex from one index to another or update documents in place.' + classname = 'org.opensearch.index.reindex.ReindexPlugin' hasClientJar = true } diff --git a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/85_scripting.yml b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/85_scripting.yml index 9c38b13bb1ff0..5c218aa00ca4f 100644 --- a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/85_scripting.yml +++ b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/85_scripting.yml @@ -440,3 +440,41 @@ lang: painless source: syntax errors are fun! - match: {error.reason: 'compile error'} + +# script in reindex supports java.lang.String.sha1() and java.lang.String.sha256() methods +# related issue: https://github.com/opensearch-project/OpenSearch/issues/16423 +--- +"Script supports sha1() and sha256() method for strings": + - skip: + version: " - 2.18.99" + reason: "introduced in 2.19.0" + - do: + index: + index: twitter + id: 1 + body: { "user": "foobar" } + - do: + indices.refresh: {} + + - do: + reindex: + refresh: true + body: + source: + index: twitter + dest: + index: new_twitter + script: + lang: painless + source: ctx._source.user_sha1 = ctx._source.user.sha1();ctx._source.user_sha256 = ctx._source.user.sha256() + - match: {created: 1} + - match: {noops: 0} + + - do: + get: + index: new_twitter + id: 1 + + - match: { _source.user: foobar } + - match: { _source.user_sha1: "8843d7f92416211de9ebb963ff4ce28125932878" } + - match: { _source.user_sha256: "c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2" } diff --git a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/80_scripting.yml b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/80_scripting.yml index a8de49d812677..b52b1428e08bb 100644 --- a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/80_scripting.yml +++ b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/80_scripting.yml @@ -432,3 +432,38 @@ lang: painless source: syntax errors are fun! - match: {error.reason: 'compile error'} + +# script in update_by_query supports java.lang.String.sha1() and java.lang.String.sha256() methods +# related issue: https://github.com/opensearch-project/OpenSearch/issues/16423 +--- +"Script supports sha1() and sha256() method for strings": + - skip: + version: " - 2.18.99" + reason: "introduced in 2.19.0" + - do: + index: + index: twitter + id: 1 + body: { "user": "foobar" } + - do: + indices.refresh: {} + + - do: + update_by_query: + index: twitter + refresh: true + body: + script: + lang: painless + source: ctx._source.user_sha1 = ctx._source.user.sha1();ctx._source.user_sha256 = ctx._source.user.sha256() + - match: {updated: 1} + - match: {noops: 0} + + - do: + get: + index: twitter + id: 1 + + - match: { _source.user: foobar } + - match: { _source.user_sha1: "8843d7f92416211de9ebb963ff4ce28125932878" } + - match: { _source.user_sha256: "c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2" } diff --git a/modules/repository-url/build.gradle b/modules/repository-url/build.gradle index 702f0e9bb0f8b..5a64c262ec907 100644 --- a/modules/repository-url/build.gradle +++ b/modules/repository-url/build.gradle @@ -37,8 +37,8 @@ apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'Module for URL repository' - classname 'org.opensearch.plugin.repository.url.URLRepositoryPlugin' + description = 'Module for URL repository' + classname = 'org.opensearch.plugin.repository.url.URLRepositoryPlugin' } restResources { @@ -56,7 +56,7 @@ task urlFixture(type: AntFixture) { doFirst { repositoryDir.mkdirs() } - env 'CLASSPATH', "${-> project.sourceSets.test.runtimeClasspath.asPath}" + env 'CLASSPATH', "${-> sourceSets.test.runtimeClasspath.asPath}" executable = "${BuildParams.runtimeJavaHome}/bin/java" args 'org.opensearch.repositories.url.URLFixture', baseDir, "${repositoryDir.absolutePath}" } diff --git a/modules/search-pipeline-common/build.gradle b/modules/search-pipeline-common/build.gradle index 657392d884e97..4b6d579dc22e8 100644 --- a/modules/search-pipeline-common/build.gradle +++ b/modules/search-pipeline-common/build.gradle @@ -13,8 +13,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'Module for search pipeline processors that do not require additional security permissions or have large dependencies and resources' - classname 'org.opensearch.search.pipeline.common.SearchPipelineCommonModulePlugin' + description = 'Module for search pipeline processors that do not require additional security permissions or have large dependencies and resources' + classname = 'org.opensearch.search.pipeline.common.SearchPipelineCommonModulePlugin' extendedPlugins = ['lang-painless'] } diff --git a/modules/systemd/build.gradle b/modules/systemd/build.gradle index 26e094a9eeae1..2e2956852efe4 100644 --- a/modules/systemd/build.gradle +++ b/modules/systemd/build.gradle @@ -29,6 +29,6 @@ */ opensearchplugin { - description 'Integrates OpenSearch with systemd' - classname 'org.opensearch.systemd.SystemdPlugin' + description = 'Integrates OpenSearch with systemd' + classname = 'org.opensearch.systemd.SystemdPlugin' } diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index f91c242a36181..57d2b9abb6362 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -49,8 +49,8 @@ apply plugin: 'opensearch.publish' * maybe figure out a way to run all tests from core with netty4/network? */ opensearchplugin { - description 'Netty 4 based transport implementation' - classname 'org.opensearch.transport.Netty4Plugin' + description = 'Netty 4 based transport implementation' + classname = 'org.opensearch.transport.Netty4Plugin' hasClientJar = true } diff --git a/plugins/analysis-icu/build.gradle b/plugins/analysis-icu/build.gradle index e5c084559f0a6..25e1587136d78 100644 --- a/plugins/analysis-icu/build.gradle +++ b/plugins/analysis-icu/build.gradle @@ -32,8 +32,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'The ICU Analysis plugin integrates the Lucene ICU module into OpenSearch, adding ICU-related analysis components.' - classname 'org.opensearch.plugin.analysis.icu.AnalysisICUPlugin' + description = 'The ICU Analysis plugin integrates the Lucene ICU module into OpenSearch, adding ICU-related analysis components.' + classname = 'org.opensearch.plugin.analysis.icu.AnalysisICUPlugin' hasClientJar = true } diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.12.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.12.0.jar.sha1 deleted file mode 100644 index 31398b27708a3..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a9232b6a4882979118d3281b98dfdb6e0e1cb5ca \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.12.1.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..acb73de8b5dc9 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.12.1.jar.sha1 @@ -0,0 +1 @@ +abaef4767ad64289e62abdd4606bf6ed2ddea0fd \ No newline at end of file diff --git a/plugins/analysis-kuromoji/build.gradle b/plugins/analysis-kuromoji/build.gradle index 426b85f44bf55..5babcb2757f5e 100644 --- a/plugins/analysis-kuromoji/build.gradle +++ b/plugins/analysis-kuromoji/build.gradle @@ -30,8 +30,8 @@ apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - description 'The Japanese (kuromoji) Analysis plugin integrates Lucene kuromoji analysis module into opensearch.' - classname 'org.opensearch.plugin.analysis.kuromoji.AnalysisKuromojiPlugin' + description = 'The Japanese (kuromoji) Analysis plugin integrates Lucene kuromoji analysis module into opensearch.' + classname = 'org.opensearch.plugin.analysis.kuromoji.AnalysisKuromojiPlugin' } dependencies { diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.12.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.12.0.jar.sha1 deleted file mode 100644 index fa4c9d2d09d6e..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a3a6950ffc22e76a082e1b3cefb022b9f7870d29 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.12.1.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..916778086a6bd --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.12.1.jar.sha1 @@ -0,0 +1 @@ +635c41143b896f402589d29e33695dcfabae9cc5 \ No newline at end of file diff --git a/plugins/analysis-nori/build.gradle b/plugins/analysis-nori/build.gradle index 3def7f9c6c60f..41a73fb3895ef 100644 --- a/plugins/analysis-nori/build.gradle +++ b/plugins/analysis-nori/build.gradle @@ -30,8 +30,8 @@ apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - description 'The Korean (nori) Analysis plugin integrates Lucene nori analysis module into opensearch.' - classname 'org.opensearch.plugin.analysis.nori.AnalysisNoriPlugin' + description = 'The Korean (nori) Analysis plugin integrates Lucene nori analysis module into opensearch.' + classname = 'org.opensearch.plugin.analysis.nori.AnalysisNoriPlugin' } dependencies { diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.12.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.12.0.jar.sha1 deleted file mode 100644 index 576b924286d2d..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e71f85b72ed3939039ba8897b28b065dd11918b9 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.12.1.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..9c057370df5d1 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.12.1.jar.sha1 @@ -0,0 +1 @@ +e265410a6a4d9cd23b2e9c73321e6bd307bc1422 \ No newline at end of file diff --git a/plugins/analysis-phonenumber/build.gradle b/plugins/analysis-phonenumber/build.gradle index c9913b36f8508..1e19167582e19 100644 --- a/plugins/analysis-phonenumber/build.gradle +++ b/plugins/analysis-phonenumber/build.gradle @@ -12,8 +12,8 @@ apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - description 'Adds an analyzer for phone numbers to OpenSearch.' - classname 'org.opensearch.analysis.phone.PhoneNumberAnalysisPlugin' + description = 'Adds an analyzer for phone numbers to OpenSearch.' + classname = 'org.opensearch.analysis.phone.PhoneNumberAnalysisPlugin' } dependencies { diff --git a/plugins/analysis-phonenumber/src/main/java/org/opensearch/analysis/phone/PhoneNumberTermTokenizer.java b/plugins/analysis-phonenumber/src/main/java/org/opensearch/analysis/phone/PhoneNumberTermTokenizer.java index 6b95594204eb4..e0541755a2b3e 100644 --- a/plugins/analysis-phonenumber/src/main/java/org/opensearch/analysis/phone/PhoneNumberTermTokenizer.java +++ b/plugins/analysis-phonenumber/src/main/java/org/opensearch/analysis/phone/PhoneNumberTermTokenizer.java @@ -98,7 +98,9 @@ private Set getTokens() throws IOException { // Rip off the "tel:" or "sip:" prefix if (input.indexOf("tel:") == 0 || input.indexOf("sip:") == 0) { - tokens.add(input.substring(0, 4)); + if (addNgrams) { + tokens.add(input.substring(0, 4)); + } input = input.substring(4); } @@ -128,14 +130,23 @@ private Set getTokens() throws IOException { countryCode = Optional.of(String.valueOf(numberProto.getCountryCode())); input = String.valueOf(numberProto.getNationalNumber()); - // Add Country code, extension, and the number as tokens - tokens.add(countryCode.get()); + // add full number as tokens tokens.add(countryCode.get() + input); - if (!Strings.isEmpty(numberProto.getExtension())) { - tokens.add(numberProto.getExtension()); + + if (addNgrams) { + // Consider the country code as an ngram - it makes no sense in the search analyzer as it'd match all values with the + // same country code + tokens.add(countryCode.get()); + + // Add extension without country code (not done for search analyzer as that might match numbers in other countries as + // well!) + if (!Strings.isEmpty(numberProto.getExtension())) { + tokens.add(numberProto.getExtension()); + } + // Add unformatted input (most likely the same as the extension now since the prefix has been removed) + tokens.add(input); } - tokens.add(input); } } catch (final NumberParseException | StringIndexOutOfBoundsException e) { // Libphone didn't like it, no biggie. We'll just ngram the number as it is. diff --git a/plugins/analysis-phonenumber/src/test/java/org/opensearch/analysis/phone/PhoneNumberAnalyzerTests.java b/plugins/analysis-phonenumber/src/test/java/org/opensearch/analysis/phone/PhoneNumberAnalyzerTests.java index 332f6d21f47d6..503cee9cc710f 100644 --- a/plugins/analysis-phonenumber/src/test/java/org/opensearch/analysis/phone/PhoneNumberAnalyzerTests.java +++ b/plugins/analysis-phonenumber/src/test/java/org/opensearch/analysis/phone/PhoneNumberAnalyzerTests.java @@ -87,11 +87,7 @@ public void testEuropeDetailled() throws IOException { * Test for all tokens which are emitted by the "phone" analyzer. */ public void testEuropeDetailledSearch() throws IOException { - assertTokensAreInAnyOrder( - phoneSearchAnalyzer, - "tel:+441344840400", - Arrays.asList("tel:+441344840400", "tel:", "441344840400", "44", "1344840400") - ); + assertTokensAreInAnyOrder(phoneSearchAnalyzer, "tel:+441344840400", Arrays.asList("tel:+441344840400", "441344840400")); } public void testEurope() throws IOException { @@ -163,7 +159,11 @@ public void testSipWithoutDomainPart() throws IOException { } public void testTelPrefix() throws IOException { - assertTokensInclude("tel:+1228", Arrays.asList("1228", "122", "228")); + assertTokensInclude(phoneAnalyzer, "tel:+1228", Arrays.asList("tel:+1228", "tel:", "1228", "122", "228")); + } + + public void testTelPrefixSearch() throws IOException { + assertTokensAreInAnyOrder(phoneSearchAnalyzer, "tel:+1228", Arrays.asList("tel:+1228", "1228")); } public void testNumberPrefix() throws IOException { @@ -189,21 +189,21 @@ public void testLocalNumberWithCH() throws IOException { } public void testSearchInternationalPrefixWithZZ() throws IOException { - assertTokensInclude(phoneSearchAnalyzer, "+41583161010", Arrays.asList("41", "41583161010", "583161010")); + assertTokensAreInAnyOrder(phoneSearchAnalyzer, "+41583161010", Arrays.asList("+41583161010", "41583161010")); } public void testSearchInternationalPrefixWithCH() throws IOException { - assertTokensInclude(phoneSearchCHAnalyzer, "+41583161010", Arrays.asList("41", "41583161010", "583161010")); + assertTokensAreInAnyOrder(phoneSearchCHAnalyzer, "+41583161010", Arrays.asList("+41583161010", "41583161010")); } public void testSearchNationalPrefixWithCH() throws IOException { // + is equivalent to 00 in Switzerland - assertTokensInclude(phoneSearchCHAnalyzer, "0041583161010", Arrays.asList("41", "41583161010", "583161010")); + assertTokensAreInAnyOrder(phoneSearchCHAnalyzer, "0041583161010", Arrays.asList("0041583161010", "41583161010")); } public void testSearchLocalNumberWithCH() throws IOException { // when omitting the international prefix swiss numbers must start with '0' - assertTokensInclude(phoneSearchCHAnalyzer, "0583161010", Arrays.asList("41", "41583161010", "583161010")); + assertTokensAreInAnyOrder(phoneSearchCHAnalyzer, "0583161010", Arrays.asList("0583161010", "41583161010")); } /** diff --git a/plugins/analysis-phonenumber/src/yamlRestTest/resources/rest-api-spec/test/analysis-phone/20_search.yml b/plugins/analysis-phonenumber/src/yamlRestTest/resources/rest-api-spec/test/analysis-phone/20_search.yml index 0bd7d2c371bfc..1c51bfa3c5347 100644 --- a/plugins/analysis-phonenumber/src/yamlRestTest/resources/rest-api-spec/test/analysis-phone/20_search.yml +++ b/plugins/analysis-phonenumber/src/yamlRestTest/resources/rest-api-spec/test/analysis-phone/20_search.yml @@ -32,9 +32,37 @@ index: test id: 1 body: { "phone": "+41 58 316 10 10", "phone-ch": "058 316 10 10" } + - do: + index: + index: test + id: 2 + body: { "phone": "+41 58 316 99 99", "phone-ch": "058 316 99 99" } + - do: + index: + index: test + id: 3 + # number not used in the examples below, just present to make sure that it's never matched + body: { "phone": "+41 12 345 67 89", "phone-ch": "012 345 67 89" } + - do: + index: + index: test + id: 4 + # germany has a different phone number length, but for this test we ignore it and pretend they're the same + body: { "phone": "+49 58 316 10 10", "phone-ch": "+49 58 316 10 10" } + - do: + index: + index: test + id: 5 + body: { "phone": "+1-888-280-4331", "phone-ch": "+1-888-280-4331" } + - do: + index: + index: test + id: 6 + body: { "phone": "tel:+441344840400", "phone-ch": "tel:+441344840400" } - do: indices.refresh: {} + # international format in document & search will always work - do: search: rest_total_hits_as_int: true @@ -45,6 +73,7 @@ "phone": "+41583161010" - match: { hits.total: 1 } + # correct national format & international format in search will always work - do: search: rest_total_hits_as_int: true @@ -54,3 +83,113 @@ match: "phone-ch": "+41583161010" - match: { hits.total: 1 } + + # national format without country specified won't work + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match: + "phone": "0583161010" + - match: { hits.total: 0 } + + # correct national format with country specified in document & search will always work + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match: + "phone-ch": "0583161010" + - match: { hits.total: 1 } + + # search-as-you-type style query + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match: + "phone": "+4158316" + - match: { hits.total: 2 } + + # search-as-you-type style query + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match: + "phone-ch": "058316" + - match: { hits.total: 2 } + + # international format in document & search will always work + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match: + "phone": "+1 888 280 4331" + - match: { hits.total: 1 } + + # international format in document & search will always work + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match: + "phone-ch": "+1 888 280 4331" + - match: { hits.total: 1 } + + # national format in search won't work if no country is specified + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match: + "phone": "888 280 4331" + - match: { hits.total: 0 } + + # document & search have a tel: prefix + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match: + "phone": "tel:+441344840400" + - match: { hits.total: 1 } + + # only document has a tel: prefix + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match: + "phone": "+441344840400" + - match: { hits.total: 1 } + + # only search has a tel: prefix + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match: + "phone": "tel:+1 888 280 4331" + - match: { hits.total: 1 } diff --git a/plugins/analysis-phonetic/build.gradle b/plugins/analysis-phonetic/build.gradle index ffa0466d43170..c0272b78c3db8 100644 --- a/plugins/analysis-phonetic/build.gradle +++ b/plugins/analysis-phonetic/build.gradle @@ -30,8 +30,8 @@ apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - description 'The Phonetic Analysis plugin integrates phonetic token filter analysis with opensearch.' - classname 'org.opensearch.plugin.analysis.AnalysisPhoneticPlugin' + description = 'The Phonetic Analysis plugin integrates phonetic token filter analysis with opensearch.' + classname = 'org.opensearch.plugin.analysis.AnalysisPhoneticPlugin' } dependencies { diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.12.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.12.0.jar.sha1 deleted file mode 100644 index c8c146bbd0d25..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6baa3ae7ab20d6e644cf0bedb271c50a44c0e259 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.12.1.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..30db9fc8d69e2 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.12.1.jar.sha1 @@ -0,0 +1 @@ +3787b8edc0cfad21998abc6aeb9d2cbf152b4b26 \ No newline at end of file diff --git a/plugins/analysis-smartcn/build.gradle b/plugins/analysis-smartcn/build.gradle index d74d314ab0673..448a3a5e0a637 100644 --- a/plugins/analysis-smartcn/build.gradle +++ b/plugins/analysis-smartcn/build.gradle @@ -30,8 +30,8 @@ apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - description 'Smart Chinese Analysis plugin integrates Lucene Smart Chinese analysis module into opensearch.' - classname 'org.opensearch.plugin.analysis.smartcn.AnalysisSmartChinesePlugin' + description = 'Smart Chinese Analysis plugin integrates Lucene Smart Chinese analysis module into opensearch.' + classname = 'org.opensearch.plugin.analysis.smartcn.AnalysisSmartChinesePlugin' } dependencies { diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.12.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.12.0.jar.sha1 deleted file mode 100644 index 54ea0b19f2a7b..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f183e1e8b1eaaa4dec444774a285bb8b66518522 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.12.1.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..96f8d70e6ee53 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.12.1.jar.sha1 @@ -0,0 +1 @@ +e935f600bf153c46f5725198ca9352c32025f274 \ No newline at end of file diff --git a/plugins/analysis-stempel/build.gradle b/plugins/analysis-stempel/build.gradle index d713f80172c58..90523ae2d9d95 100644 --- a/plugins/analysis-stempel/build.gradle +++ b/plugins/analysis-stempel/build.gradle @@ -30,8 +30,8 @@ apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - description 'The Stempel (Polish) Analysis plugin integrates Lucene stempel (polish) analysis module into opensearch.' - classname 'org.opensearch.plugin.analysis.stempel.AnalysisStempelPlugin' + description = 'The Stempel (Polish) Analysis plugin integrates Lucene stempel (polish) analysis module into opensearch.' + classname = 'org.opensearch.plugin.analysis.stempel.AnalysisStempelPlugin' } dependencies { diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.12.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.12.0.jar.sha1 deleted file mode 100644 index 5442a40f5bba2..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b81a609934e65d12ab9d2d84bc2ea6f56a360e57 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.12.1.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..d6d5f1c2609ff --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.12.1.jar.sha1 @@ -0,0 +1 @@ +c4e1c94b1adbd1cb9dbdc0d3c2d2c33beabfc777 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/build.gradle b/plugins/analysis-ukrainian/build.gradle index 386452fcf8aeb..77807198998b3 100644 --- a/plugins/analysis-ukrainian/build.gradle +++ b/plugins/analysis-ukrainian/build.gradle @@ -30,8 +30,8 @@ apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - description 'The Ukrainian Analysis plugin integrates the Lucene UkrainianMorfologikAnalyzer into opensearch.' - classname 'org.opensearch.plugin.analysis.ukrainian.AnalysisUkrainianPlugin' + description = 'The Ukrainian Analysis plugin integrates the Lucene UkrainianMorfologikAnalyzer into opensearch.' + classname = 'org.opensearch.plugin.analysis.ukrainian.AnalysisUkrainianPlugin' } dependencies { diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.12.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.12.0.jar.sha1 deleted file mode 100644 index 60fd4015cfde0..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bec069f286b45f20b743c81e84202369cd0467e7 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.12.1.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..661f3062458e2 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.12.1.jar.sha1 @@ -0,0 +1 @@ +d8e4716dab6d829e7b37a8b185cbd242650aeb9e \ No newline at end of file diff --git a/plugins/build.gradle b/plugins/build.gradle index 4e6de2c120d35..6c7fb749d08ac 100644 --- a/plugins/build.gradle +++ b/plugins/build.gradle @@ -39,9 +39,9 @@ configure(subprojects.findAll { it.parent.path == project.path }) { opensearchplugin { // for local ES plugins, the name of the plugin is the same as the directory - name project.name + name = project.name - licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') - noticeFile rootProject.file('NOTICE.txt') + licenseFile = rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile = rootProject.file('NOTICE.txt') } } diff --git a/plugins/cache-ehcache/build.gradle b/plugins/cache-ehcache/build.gradle index 5747624e2fb69..6390b045db8ea 100644 --- a/plugins/cache-ehcache/build.gradle +++ b/plugins/cache-ehcache/build.gradle @@ -14,8 +14,8 @@ import org.opensearch.gradle.info.BuildParams apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'Ehcache based cache implementation.' - classname 'org.opensearch.cache.EhcacheCachePlugin' + description = 'Ehcache based cache implementation.' + classname = 'org.opensearch.cache.EhcacheCachePlugin' } versions << [ diff --git a/plugins/crypto-kms/build.gradle b/plugins/crypto-kms/build.gradle index c4a8609b6df48..fa63a4a7153d3 100644 --- a/plugins/crypto-kms/build.gradle +++ b/plugins/crypto-kms/build.gradle @@ -16,8 +16,8 @@ apply plugin: 'opensearch.publish' apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - description 'AWS KMS plugin to provide crypto keys' - classname 'org.opensearch.crypto.kms.CryptoKmsPlugin' + description = 'AWS KMS plugin to provide crypto keys' + classname = 'org.opensearch.crypto.kms.CryptoKmsPlugin' } ext { diff --git a/plugins/discovery-azure-classic/build.gradle b/plugins/discovery-azure-classic/build.gradle index 7f34cec94499c..2627b3061bdf2 100644 --- a/plugins/discovery-azure-classic/build.gradle +++ b/plugins/discovery-azure-classic/build.gradle @@ -35,8 +35,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'The Azure Classic Discovery plugin allows to use Azure Classic API for the unicast discovery mechanism' - classname 'org.opensearch.plugin.discovery.azure.classic.AzureDiscoveryPlugin' + description = 'The Azure Classic Discovery plugin allows to use Azure Classic API for the unicast discovery mechanism' + classname = 'org.opensearch.plugin.discovery.azure.classic.AzureDiscoveryPlugin' } versions << [ diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index 9c9f64f09b915..8d615e0bf8d9d 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -34,8 +34,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'The EC2 discovery plugin allows to use AWS API for the unicast discovery mechanism.' - classname 'org.opensearch.discovery.ec2.Ec2DiscoveryPlugin' + description = 'The EC2 discovery plugin allows to use AWS API for the unicast discovery mechanism.' + classname = 'org.opensearch.discovery.ec2.Ec2DiscoveryPlugin' } dependencies { diff --git a/plugins/discovery-ec2/qa/amazon-ec2/build.gradle b/plugins/discovery-ec2/qa/amazon-ec2/build.gradle index a844576d67ece..41c423c57ba36 100644 --- a/plugins/discovery-ec2/qa/amazon-ec2/build.gradle +++ b/plugins/discovery-ec2/qa/amazon-ec2/build.gradle @@ -76,8 +76,8 @@ yamlRestTest.enabled = false */ ['KeyStore', 'EnvVariables', 'SystemProperties', 'ContainerCredentials', 'InstanceProfile'].forEach { action -> AntFixture fixture = tasks.create(name: "ec2Fixture${action}", type: AntFixture) { - dependsOn project.sourceSets.yamlRestTest.runtimeClasspath - env 'CLASSPATH', "${-> project.sourceSets.yamlRestTest.runtimeClasspath.asPath}" + dependsOn sourceSets.yamlRestTest.runtimeClasspath + env 'CLASSPATH', "${-> sourceSets.yamlRestTest.runtimeClasspath.asPath}" executable = "${BuildParams.runtimeJavaHome}/bin/java" args 'org.opensearch.discovery.ec2.AmazonEC2Fixture', baseDir, "${buildDir}/testclusters/yamlRestTest${action}-1/config/unicast_hosts.txt" } @@ -85,7 +85,7 @@ yamlRestTest.enabled = false tasks.create(name: "yamlRestTest${action}", type: RestIntegTestTask) { dependsOn fixture } - SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); + SourceSetContainer sourceSets = getExtensions().getByType(SourceSetContainer.class); SourceSet yamlRestTestSourceSet = sourceSets.getByName(YamlRestTestPlugin.SOURCE_SET_NAME) "yamlRestTest${action}" { setTestClassesDirs(yamlRestTestSourceSet.getOutput().getClassesDirs()) diff --git a/plugins/discovery-gce/build.gradle b/plugins/discovery-gce/build.gradle index 3214db2074198..a9338bfc43a2c 100644 --- a/plugins/discovery-gce/build.gradle +++ b/plugins/discovery-gce/build.gradle @@ -13,8 +13,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'The Google Compute Engine (GCE) Discovery plugin allows to use GCE API for the unicast discovery mechanism.' - classname 'org.opensearch.plugin.discovery.gce.GceDiscoveryPlugin' + description = 'The Google Compute Engine (GCE) Discovery plugin allows to use GCE API for the unicast discovery mechanism.' + classname = 'org.opensearch.plugin.discovery.gce.GceDiscoveryPlugin' } dependencies { @@ -52,9 +52,10 @@ check { dependsOn 'qa:gce:check' } +def name = project.name test { // this is needed for insecure plugins, remove if possible! - systemProperty 'tests.artifact', project.name + systemProperty 'tests.artifact', name } thirdPartyAudit { diff --git a/plugins/discovery-gce/licenses/grpc-api-1.68.0.jar.sha1 b/plugins/discovery-gce/licenses/grpc-api-1.68.0.jar.sha1 deleted file mode 100644 index bf45716c5b8ce..0000000000000 --- a/plugins/discovery-gce/licenses/grpc-api-1.68.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9a9f25c58d8d5b0fcf37ae889a50fec87e34ac08 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/grpc-api-1.68.2.jar.sha1 b/plugins/discovery-gce/licenses/grpc-api-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..1844172dec982 --- /dev/null +++ b/plugins/discovery-gce/licenses/grpc-api-1.68.2.jar.sha1 @@ -0,0 +1 @@ +a257a5dd25dda1c97a99b56d5b9c1e56c12ae554 \ No newline at end of file diff --git a/plugins/discovery-gce/qa/gce/build.gradle b/plugins/discovery-gce/qa/gce/build.gradle index 841cd396a8bcf..562ec4e1db482 100644 --- a/plugins/discovery-gce/qa/gce/build.gradle +++ b/plugins/discovery-gce/qa/gce/build.gradle @@ -51,8 +51,8 @@ restResources { /** A task to start the GCEFixture which emulates a GCE service **/ task gceFixture(type: AntFixture) { - dependsOn project.sourceSets.yamlRestTest.runtimeClasspath - env 'CLASSPATH', "${-> project.sourceSets.yamlRestTest.runtimeClasspath.asPath}" + dependsOn sourceSets.yamlRestTest.runtimeClasspath + env 'CLASSPATH', "${-> sourceSets.yamlRestTest.runtimeClasspath.asPath}" executable = "${BuildParams.runtimeJavaHome}/bin/java" args 'org.opensearch.cloud.gce.GCEFixture', baseDir, "${buildDir}/testclusters/yamlRestTest-1/config/unicast_hosts.txt" } diff --git a/plugins/examples/custom-settings/build.gradle b/plugins/examples/custom-settings/build.gradle index 5b35d887b3db1..c83e710283322 100644 --- a/plugins/examples/custom-settings/build.gradle +++ b/plugins/examples/custom-settings/build.gradle @@ -31,11 +31,11 @@ apply plugin: 'opensearch.opensearchplugin' apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - name 'custom-settings' - description 'An example plugin showing how to register custom settings' - classname 'org.opensearch.example.customsettings.ExampleCustomSettingsPlugin' - licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') - noticeFile rootProject.file('NOTICE.txt') + name = 'custom-settings' + description = 'An example plugin showing how to register custom settings' + classname = 'org.opensearch.example.customsettings.ExampleCustomSettingsPlugin' + licenseFile = rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile = rootProject.file('NOTICE.txt') } testClusters.all { diff --git a/plugins/examples/custom-significance-heuristic/build.gradle b/plugins/examples/custom-significance-heuristic/build.gradle index ab013657fed23..72efbaafad8e3 100644 --- a/plugins/examples/custom-significance-heuristic/build.gradle +++ b/plugins/examples/custom-significance-heuristic/build.gradle @@ -31,9 +31,9 @@ apply plugin: 'opensearch.opensearchplugin' apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - name 'custom-significance-heuristic' - description 'An example plugin showing how to write and register a custom significance heuristic' - classname 'org.opensearch.example.customsigheuristic.CustomSignificanceHeuristicPlugin' - licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') - noticeFile rootProject.file('NOTICE.txt') + name = 'custom-significance-heuristic' + description = 'An example plugin showing how to write and register a custom significance heuristic' + classname = 'org.opensearch.example.customsigheuristic.CustomSignificanceHeuristicPlugin' + licenseFile = rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile = rootProject.file('NOTICE.txt') } diff --git a/plugins/examples/custom-suggester/build.gradle b/plugins/examples/custom-suggester/build.gradle index d60523306b3c1..977cad7d1452e 100644 --- a/plugins/examples/custom-suggester/build.gradle +++ b/plugins/examples/custom-suggester/build.gradle @@ -31,11 +31,11 @@ apply plugin: 'opensearch.opensearchplugin' apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - name 'custom-suggester' - description 'An example plugin showing how to write and register a custom suggester' - classname 'org.opensearch.example.customsuggester.CustomSuggesterPlugin' - licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') - noticeFile rootProject.file('NOTICE.txt') + name = 'custom-suggester' + description = 'An example plugin showing how to write and register a custom suggester' + classname = 'org.opensearch.example.customsuggester.CustomSuggesterPlugin' + licenseFile = rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile = rootProject.file('NOTICE.txt') } testClusters.all { diff --git a/plugins/examples/painless-whitelist/build.gradle b/plugins/examples/painless-whitelist/build.gradle index 70052c209ab61..53ea27f4a74a4 100644 --- a/plugins/examples/painless-whitelist/build.gradle +++ b/plugins/examples/painless-whitelist/build.gradle @@ -31,12 +31,12 @@ apply plugin: 'opensearch.opensearchplugin' apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - name 'painless-whitelist' - description 'An example allowlisting additional classes and methods in painless' - classname 'org.opensearch.example.painlesswhitelist.MyWhitelistPlugin' + name = 'painless-whitelist' + description = 'An example allowlisting additional classes and methods in painless' + classname = 'org.opensearch.example.painlesswhitelist.MyWhitelistPlugin' extendedPlugins = ['lang-painless'] - licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') - noticeFile rootProject.file('NOTICE.txt') + licenseFile = rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile = rootProject.file('NOTICE.txt') } dependencies { diff --git a/plugins/examples/rescore/build.gradle b/plugins/examples/rescore/build.gradle index b33d79395d92b..ad450798514ea 100644 --- a/plugins/examples/rescore/build.gradle +++ b/plugins/examples/rescore/build.gradle @@ -31,9 +31,9 @@ apply plugin: 'opensearch.opensearchplugin' apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - name 'example-rescore' - description 'An example plugin implementing rescore and verifying that plugins *can* implement rescore' - classname 'org.opensearch.example.rescore.ExampleRescorePlugin' - licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') - noticeFile rootProject.file('NOTICE.txt') + name = 'example-rescore' + description = 'An example plugin implementing rescore and verifying that plugins *can* implement rescore' + classname = 'org.opensearch.example.rescore.ExampleRescorePlugin' + licenseFile = rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile = rootProject.file('NOTICE.txt') } diff --git a/plugins/examples/rest-handler/build.gradle b/plugins/examples/rest-handler/build.gradle index b97d091af9d08..c3c25b4b0a841 100644 --- a/plugins/examples/rest-handler/build.gradle +++ b/plugins/examples/rest-handler/build.gradle @@ -35,11 +35,11 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.java-rest-test' opensearchplugin { - name 'rest-handler' - description 'An example plugin showing how to register a REST handler' - classname 'org.opensearch.example.resthandler.ExampleRestHandlerPlugin' - licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') - noticeFile rootProject.file('NOTICE.txt') + name = 'rest-handler' + description = 'An example plugin showing how to register a REST handler' + classname = 'org.opensearch.example.resthandler.ExampleRestHandlerPlugin' + licenseFile = rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile = rootProject.file('NOTICE.txt') } // No unit tests in this example @@ -47,7 +47,7 @@ test.enabled = false tasks.register("exampleFixture", org.opensearch.gradle.test.AntFixture) { dependsOn sourceSets.javaRestTest.runtimeClasspath - env 'CLASSPATH', "${-> project.sourceSets.javaRestTest.runtimeClasspath.asPath}" + env 'CLASSPATH', "${-> sourceSets.javaRestTest.runtimeClasspath.asPath}" executable = "${BuildParams.runtimeJavaHome}/bin/java" args 'org.opensearch.example.resthandler.ExampleFixture', baseDir, 'TEST' } diff --git a/plugins/examples/script-expert-scoring/build.gradle b/plugins/examples/script-expert-scoring/build.gradle index e4ddd97abbe4c..1a880e80d2e49 100644 --- a/plugins/examples/script-expert-scoring/build.gradle +++ b/plugins/examples/script-expert-scoring/build.gradle @@ -31,11 +31,11 @@ apply plugin: 'opensearch.opensearchplugin' apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - name 'script-expert-scoring' - description 'An example script engine to use low level Lucene internals for expert scoring' - classname 'org.opensearch.example.expertscript.ExpertScriptPlugin' - licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') - noticeFile rootProject.file('NOTICE.txt') + name = 'script-expert-scoring' + description = 'An example script engine to use low level Lucene internals for expert scoring' + classname = 'org.opensearch.example.expertscript.ExpertScriptPlugin' + licenseFile = rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile = rootProject.file('NOTICE.txt') } test.enabled = false diff --git a/plugins/identity-shiro/build.gradle b/plugins/identity-shiro/build.gradle index 222443efcb214..2ea3e8e6b1e50 100644 --- a/plugins/identity-shiro/build.gradle +++ b/plugins/identity-shiro/build.gradle @@ -9,11 +9,11 @@ apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'Plugin for identity features in OpenSearch.' - classname 'org.opensearch.identity.shiro.ShiroIdentityPlugin' - name project.name - licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') - noticeFile rootProject.file('NOTICE.txt') + description = 'Plugin for identity features in OpenSearch.' + classname = 'org.opensearch.identity.shiro.ShiroIdentityPlugin' + name = project.name + licenseFile = rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile = rootProject.file('NOTICE.txt') } dependencies { diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 4f30ea9ea7e22..e0ad602266602 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -33,8 +33,8 @@ import org.opensearch.gradle.info.BuildParams apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - description 'Ingest processor that uses Apache Tika to extract contents' - classname 'org.opensearch.ingest.attachment.IngestAttachmentPlugin' + description = 'Ingest processor that uses Apache Tika to extract contents' + classname = 'org.opensearch.ingest.attachment.IngestAttachmentPlugin' } versions << [ @@ -89,7 +89,7 @@ dependencies { api "org.apache.poi:poi:${versions.poi}" api "org.apache.poi:poi-ooxml-lite:${versions.poi}" api "commons-codec:commons-codec:${versions.commonscodec}" - api 'org.apache.xmlbeans:xmlbeans:5.2.2' + api 'org.apache.xmlbeans:xmlbeans:5.3.0' api 'org.apache.commons:commons-collections4:4.4' // MS Office api "org.apache.poi:poi-scratchpad:${versions.poi}" diff --git a/plugins/ingest-attachment/licenses/xmlbeans-5.2.2.jar.sha1 b/plugins/ingest-attachment/licenses/xmlbeans-5.2.2.jar.sha1 deleted file mode 100644 index 613c1028dbd6d..0000000000000 --- a/plugins/ingest-attachment/licenses/xmlbeans-5.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -586ffe10ae9864e19e85c24bd060790a70586f72 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/xmlbeans-5.3.0.jar.sha1 b/plugins/ingest-attachment/licenses/xmlbeans-5.3.0.jar.sha1 new file mode 100644 index 0000000000000..4dbb0149da890 --- /dev/null +++ b/plugins/ingest-attachment/licenses/xmlbeans-5.3.0.jar.sha1 @@ -0,0 +1 @@ +f93c3ba820d7240b7fec4ec5bc35e7223cc6fc1f \ No newline at end of file diff --git a/plugins/mapper-annotated-text/build.gradle b/plugins/mapper-annotated-text/build.gradle index 5ff3bbe37810b..c7bc5b795ed71 100644 --- a/plugins/mapper-annotated-text/build.gradle +++ b/plugins/mapper-annotated-text/build.gradle @@ -31,8 +31,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'The Mapper Annotated_text plugin adds support for text fields with markup used to inject annotation tokens into the index.' - classname 'org.opensearch.plugin.mapper.AnnotatedTextPlugin' + description = 'The Mapper Annotated_text plugin adds support for text fields with markup used to inject annotation tokens into the index.' + classname = 'org.opensearch.plugin.mapper.AnnotatedTextPlugin' } restResources { diff --git a/plugins/mapper-murmur3/build.gradle b/plugins/mapper-murmur3/build.gradle index 67006f29b7565..42e27d7b3908a 100644 --- a/plugins/mapper-murmur3/build.gradle +++ b/plugins/mapper-murmur3/build.gradle @@ -30,8 +30,8 @@ apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - description 'The Mapper Murmur3 plugin allows to compute hashes of a field\'s values at index-time and to store them in the index.' - classname 'org.opensearch.plugin.mapper.MapperMurmur3Plugin' + description = 'The Mapper Murmur3 plugin allows to compute hashes of a field\'s values at index-time and to store them in the index.' + classname = 'org.opensearch.plugin.mapper.MapperMurmur3Plugin' } restResources { diff --git a/plugins/mapper-size/build.gradle b/plugins/mapper-size/build.gradle index fb4f7c4e00c4f..8c6caaf09e01a 100644 --- a/plugins/mapper-size/build.gradle +++ b/plugins/mapper-size/build.gradle @@ -31,8 +31,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'The Mapper Size plugin allows document to record their uncompressed size at index time.' - classname 'org.opensearch.plugin.mapper.MapperSizePlugin' + description = 'The Mapper Size plugin allows document to record their uncompressed size at index time.' + classname = 'org.opensearch.plugin.mapper.MapperSizePlugin' } restResources { diff --git a/plugins/query-insights/build.gradle b/plugins/query-insights/build.gradle index eabbd395bd3bd..317b6e9949608 100644 --- a/plugins/query-insights/build.gradle +++ b/plugins/query-insights/build.gradle @@ -10,8 +10,8 @@ */ opensearchplugin { - description 'OpenSearch Query Insights Plugin.' - classname 'org.opensearch.plugin.insights.QueryInsightsPlugin' + description = 'OpenSearch Query Insights Plugin.' + classname = 'org.opensearch.plugin.insights.QueryInsightsPlugin' } dependencies { diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 0fa2648bf1c02..c9297bd592437 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -39,16 +39,16 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'The Azure Repository plugin adds support for Azure storage repositories.' - classname 'org.opensearch.repositories.azure.AzureRepositoryPlugin' + description = 'The Azure Repository plugin adds support for Azure storage repositories.' + classname = 'org.opensearch.repositories.azure.AzureRepositoryPlugin' } dependencies { - api 'com.azure:azure-core:1.51.0' + api 'com.azure:azure-core:1.54.1' api 'com.azure:azure-json:1.3.0' api 'com.azure:azure-xml:1.1.0' - api 'com.azure:azure-storage-common:12.27.1' - api 'com.azure:azure-core-http-netty:1.15.5' + api 'com.azure:azure-storage-common:12.28.0' + api 'com.azure:azure-core-http-netty:1.15.7' api "io.netty:netty-codec-dns:${versions.netty}" api "io.netty:netty-codec-socks:${versions.netty}" api "io.netty:netty-codec-http2:${versions.netty}" @@ -61,8 +61,8 @@ dependencies { // Start of transitive dependencies for azure-identity api 'com.microsoft.azure:msal4j-persistence-extension:1.3.0' api "net.java.dev.jna:jna-platform:${versions.jna}" - api 'com.microsoft.azure:msal4j:1.17.2' - api 'com.nimbusds:oauth2-oidc-sdk:11.19.1' + api 'com.microsoft.azure:msal4j:1.18.0' + api 'com.nimbusds:oauth2-oidc-sdk:11.21' api 'com.nimbusds:nimbus-jose-jwt:9.41.1' api 'com.nimbusds:content-type:2.3' api 'com.nimbusds:lang-tag:1.7' @@ -108,7 +108,6 @@ thirdPartyAudit { // Optional and not enabled by Elasticsearch 'com.google.common.util.concurrent.internal.InternalFutureFailureAccess', 'com.google.common.util.concurrent.internal.InternalFutures', - 'com.azure.core.credential.ProofOfPossessionOptions', 'com.azure.storage.internal.avro.implementation.AvroObject', 'com.azure.storage.internal.avro.implementation.AvroReader', 'com.azure.storage.internal.avro.implementation.AvroReaderFactory', diff --git a/plugins/repository-azure/licenses/azure-core-1.51.0.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.51.0.jar.sha1 deleted file mode 100644 index 7200f59af2f9a..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-1.51.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ff5d0aedf75ca45ec0ace24673f790d2f7a57096 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-1.54.1.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.54.1.jar.sha1 new file mode 100644 index 0000000000000..9246d0dd8443a --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-1.54.1.jar.sha1 @@ -0,0 +1 @@ +9ae0cc4a8ff02a0146510ec9e1c06ab48950a66b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.15.5.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.15.5.jar.sha1 deleted file mode 100644 index 2f5239cc26148..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-http-netty-1.15.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -44d99705d3759e2ad7ee8110f811d4ed304a6a7c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.15.7.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.15.7.jar.sha1 new file mode 100644 index 0000000000000..d72f835c69903 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-http-netty-1.15.7.jar.sha1 @@ -0,0 +1 @@ +a83247eeeb7f63f891e725228d54c3c24132c66a \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-common-12.27.1.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-common-12.27.1.jar.sha1 deleted file mode 100644 index d7602da1418d1..0000000000000 --- a/plugins/repository-azure/licenses/azure-storage-common-12.27.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c477c5d8c0f2076da1c5345c1097be6a319fe7c4 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-common-12.28.0.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-common-12.28.0.jar.sha1 new file mode 100644 index 0000000000000..ed932cd0a07e9 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-storage-common-12.28.0.jar.sha1 @@ -0,0 +1 @@ +3c5b7de96c68947ab74cc7925b27ca2b9f6b91d0 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/msal4j-1.17.2.jar.sha1 b/plugins/repository-azure/licenses/msal4j-1.17.2.jar.sha1 deleted file mode 100644 index b5219ee17e9fa..0000000000000 --- a/plugins/repository-azure/licenses/msal4j-1.17.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a6211e3d71d0388929babaa0ff0951b30d001852 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/msal4j-1.18.0.jar.sha1 b/plugins/repository-azure/licenses/msal4j-1.18.0.jar.sha1 new file mode 100644 index 0000000000000..292259e9d862d --- /dev/null +++ b/plugins/repository-azure/licenses/msal4j-1.18.0.jar.sha1 @@ -0,0 +1 @@ +a47e4e9257a5d9cdb8282c331278492968e06250 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.19.1.jar.sha1 b/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.19.1.jar.sha1 deleted file mode 100644 index 7d83b0e8ca639..0000000000000 --- a/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.19.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -58db85a807a56ae76baffa519772271ad5808195 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.21.jar.sha1 b/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.21.jar.sha1 new file mode 100644 index 0000000000000..9736182141a0a --- /dev/null +++ b/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.21.jar.sha1 @@ -0,0 +1 @@ +97bec173d2a199fdd7f5c1f3a61f7ccc2e992fc1 \ No newline at end of file diff --git a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java index a5d549a178366..6f8ff4d85d368 100644 --- a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java +++ b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java @@ -89,6 +89,7 @@ import fixture.azure.AzureHttpHandler; import reactor.core.scheduler.Schedulers; +import reactor.netty.http.HttpResources; import static java.nio.charset.StandardCharsets.UTF_8; import static org.opensearch.repositories.azure.AzureRepository.Repository.CONTAINER_SETTING; @@ -143,6 +144,7 @@ public void tearDown() throws Exception { @AfterClass public static void shutdownSchedulers() { + HttpResources.disposeLoopsAndConnections(); Schedulers.shutdownNow(); } diff --git a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureRepositorySettingsTests.java b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureRepositorySettingsTests.java index 3356e5174592a..0433a13baec2c 100644 --- a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureRepositorySettingsTests.java +++ b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureRepositorySettingsTests.java @@ -49,6 +49,7 @@ import java.util.List; import reactor.core.scheduler.Schedulers; +import reactor.netty.http.HttpResources; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; @@ -57,6 +58,7 @@ public class AzureRepositorySettingsTests extends OpenSearchTestCase { @AfterClass public static void shutdownSchedulers() { + HttpResources.disposeLoopsAndConnections(); Schedulers.shutdownNow(); } diff --git a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java index 9cff5bc2c30f1..324a20c9030c6 100644 --- a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java +++ b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java @@ -43,7 +43,6 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; import org.opensearch.test.OpenSearchTestCase; -import org.junit.After; import org.junit.AfterClass; import java.io.IOException; @@ -71,19 +70,10 @@ public class AzureStorageServiceTests extends OpenSearchTestCase { @AfterClass public static void shutdownSchedulers() { + HttpResources.disposeLoopsAndConnections(); Schedulers.shutdownNow(); } - @After - public void tearDown() throws Exception { - try { - // Properly shut down resources - HttpResources.disposeLoopsAndConnectionsLater().block(); - } finally { - super.tearDown(); - } - } - public void testReadSecuredSettings() { final Settings settings = Settings.builder() .setSecureSettings(buildSecureSettings()) diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 0a87c3e4ab4a7..37e51eb593972 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -43,8 +43,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'The GCS repository plugin adds Google Cloud Storage support for repositories.' - classname 'org.opensearch.repositories.gcs.GoogleCloudStoragePlugin' + description = 'The GCS repository plugin adds Google Cloud Storage support for repositories.' + classname = 'org.opensearch.repositories.gcs.GoogleCloudStoragePlugin' } dependencies { diff --git a/plugins/repository-gcs/licenses/grpc-api-1.68.0.jar.sha1 b/plugins/repository-gcs/licenses/grpc-api-1.68.0.jar.sha1 deleted file mode 100644 index bf45716c5b8ce..0000000000000 --- a/plugins/repository-gcs/licenses/grpc-api-1.68.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9a9f25c58d8d5b0fcf37ae889a50fec87e34ac08 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/grpc-api-1.68.2.jar.sha1 b/plugins/repository-gcs/licenses/grpc-api-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..1844172dec982 --- /dev/null +++ b/plugins/repository-gcs/licenses/grpc-api-1.68.2.jar.sha1 @@ -0,0 +1 @@ +a257a5dd25dda1c97a99b56d5b9c1e56c12ae554 \ No newline at end of file diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 29d25b5c55eeb..c2685a525c8ba 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -43,8 +43,8 @@ apply plugin: 'opensearch.rest-resources' apply plugin: 'opensearch.rest-test' opensearchplugin { - description 'The HDFS repository plugin adds support for Hadoop Distributed File-System (HDFS) repositories.' - classname 'org.opensearch.repositories.hdfs.HdfsPlugin' + description = 'The HDFS repository plugin adds support for Hadoop Distributed File-System (HDFS) repositories.' + classname = 'org.opensearch.repositories.hdfs.HdfsPlugin' } versions << [ @@ -76,8 +76,8 @@ dependencies { api "org.apache.commons:commons-compress:${versions.commonscompress}" api 'org.apache.commons:commons-configuration2:2.11.0' api "commons-io:commons-io:${versions.commonsio}" - api 'org.apache.commons:commons-lang3:3.15.0' - implementation 'com.google.re2j:re2j:1.6' + api 'org.apache.commons:commons-lang3:3.17.0' + implementation 'com.google.re2j:re2j:1.8' api 'javax.servlet:servlet-api:2.5' api "org.slf4j:slf4j-api:${versions.slf4j}" api "org.apache.logging.log4j:log4j-slf4j-impl:${versions.log4j}" @@ -133,11 +133,11 @@ project(':test:fixtures:krb5kdc-fixture').tasks.preProcessFixture { // Create HDFS File System Testing Fixtures for HA/Secure combinations for (String fixtureName : ['hdfsFixture', 'haHdfsFixture', 'secureHdfsFixture', 'secureHaHdfsFixture']) { - def tsk = project.tasks.register(fixtureName, org.opensearch.gradle.test.AntFixture) { - dependsOn project.configurations.hdfsFixture, project(':test:fixtures:krb5kdc-fixture').tasks.postProcessFixture + def tsk = tasks.register(fixtureName, org.opensearch.gradle.test.AntFixture) { + dependsOn configurations.hdfsFixture, project(':test:fixtures:krb5kdc-fixture').tasks.postProcessFixture executable = "${BuildParams.runtimeJavaHome}/bin/java" - env 'CLASSPATH', "${-> project.configurations.hdfsFixture.asPath}" - maxWaitInSeconds 60 + env 'CLASSPATH', "${-> configurations.hdfsFixture.asPath}" + maxWaitInSeconds = 60 onlyIf { BuildParams.inFipsJvm == false } waitCondition = { fixture, ant -> // the hdfs.MiniHDFS fixture writes the ports file when @@ -187,7 +187,7 @@ Set disabledIntegTestTaskNames = [] for (String integTestTaskName : ['integTestHa', 'integTestSecure', 'integTestSecureHa']) { task "${integTestTaskName}"(type: RestIntegTestTask) { description = "Runs rest tests against an opensearch cluster with HDFS." - dependsOn(project.bundlePlugin) + dependsOn(bundlePlugin) if (disabledIntegTestTaskNames.contains(integTestTaskName)) { enabled = false; diff --git a/plugins/repository-hdfs/licenses/commons-lang3-3.15.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-lang3-3.15.0.jar.sha1 deleted file mode 100644 index 4b1179c935946..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-lang3-3.15.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21581109b4be710ea4b195d5760392ec284f9f11 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-lang3-3.17.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-lang3-3.17.0.jar.sha1 new file mode 100644 index 0000000000000..073922fda1dbe --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-lang3-3.17.0.jar.sha1 @@ -0,0 +1 @@ +b17d2136f0460dcc0d2016ceefca8723bdf4ee70 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/re2j-1.6.jar.sha1 b/plugins/repository-hdfs/licenses/re2j-1.6.jar.sha1 deleted file mode 100644 index 854bd3a225b92..0000000000000 --- a/plugins/repository-hdfs/licenses/re2j-1.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a13e879fd7971738d06020fefeb108cc14e14169 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/re2j-1.8.jar.sha1 b/plugins/repository-hdfs/licenses/re2j-1.8.jar.sha1 new file mode 100644 index 0000000000000..8887078965f56 --- /dev/null +++ b/plugins/repository-hdfs/licenses/re2j-1.8.jar.sha1 @@ -0,0 +1 @@ +12c25e923e9e4fb1575a7640a2698745c6f19a94 \ No newline at end of file diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 398611a016ed2..6e84edddcc252 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -41,8 +41,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'The S3 repository plugin adds S3 repositories' - classname 'org.opensearch.repositories.s3.S3RepositoryPlugin' + description = 'The S3 repository plugin adds S3 repositories' + classname = 'org.opensearch.repositories.s3.S3RepositoryPlugin' } dependencies { diff --git a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java index 909c64ce25372..c4b47f3cc899f 100644 --- a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -65,6 +65,7 @@ import org.opensearch.repositories.RepositoryStats; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.blobstore.OpenSearchMockAPIBasedRepositoryIntegTestCase; +import org.opensearch.repositories.s3.async.AsyncTransferManager; import org.opensearch.repositories.s3.utils.AwsRequestSigner; import org.opensearch.snapshots.SnapshotId; import org.opensearch.snapshots.SnapshotsService; @@ -166,7 +167,6 @@ protected Settings nodeSettings(int nodeOrdinal) { // Disable request throttling because some random values in tests might generate too many failures for the S3 client .put(S3ClientSettings.USE_THROTTLE_RETRIES_SETTING.getConcreteSettingForNamespace("test").getKey(), false) .put(S3ClientSettings.PROXY_TYPE_SETTING.getConcreteSettingForNamespace("test").getKey(), ProxySettings.ProxyType.DIRECT) - .put(BlobStoreRepository.SNAPSHOT_ASYNC_DELETION_ENABLE_SETTING.getKey(), false) .put(super.nodeSettings(nodeOrdinal)) .setSecureSettings(secureSettings); @@ -316,22 +316,27 @@ protected S3Repository createRepository( ClusterService clusterService, RecoverySettings recoverySettings ) { - GenericStatsMetricPublisher genericStatsMetricPublisher = new GenericStatsMetricPublisher(10000L, 10, 10000L, 10); - + AsyncTransferManager asyncUploadUtils = new AsyncTransferManager( + S3Repository.PARALLEL_MULTIPART_UPLOAD_MINIMUM_PART_SIZE_SETTING.get(clusterService.getSettings()).getBytes(), + normalExecutorBuilder.getStreamReader(), + priorityExecutorBuilder.getStreamReader(), + urgentExecutorBuilder.getStreamReader(), + transferSemaphoresHolder + ); return new S3Repository( metadata, registry, service, clusterService, recoverySettings, - null, - null, - null, - null, - null, - false, - null, - null, + asyncUploadUtils, + urgentExecutorBuilder, + priorityExecutorBuilder, + normalExecutorBuilder, + s3AsyncService, + S3Repository.PARALLEL_MULTIPART_UPLOAD_ENABLED_SETTING.get(clusterService.getSettings()), + normalPrioritySizeBasedBlockingQ, + lowPrioritySizeBasedBlockingQ, genericStatsMetricPublisher ) { diff --git a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java index f0e40db965646..7db9a0d3ba790 100644 --- a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java +++ b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java @@ -55,14 +55,6 @@ public class S3RepositoryThirdPartyTests extends AbstractThirdPartyRepositoryTestCase { - @Override - protected Settings nodeSettings() { - return Settings.builder() - .put(super.nodeSettings()) - .put(BlobStoreRepository.SNAPSHOT_ASYNC_DELETION_ENABLE_SETTING.getKey(), false) - .build(); - } - @Override @Before @SuppressForbidden(reason = "Need to set system property here for AWS SDK v2") diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java index 8bbef168de89c..7397c3132c17c 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java @@ -25,7 +25,6 @@ import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; import software.amazon.awssdk.http.nio.netty.ProxyConfiguration; import software.amazon.awssdk.http.nio.netty.SdkEventLoopGroup; -import software.amazon.awssdk.profiles.ProfileFileSystemSetting; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.s3.S3AsyncClient; import software.amazon.awssdk.services.s3.S3AsyncClientBuilder; @@ -120,6 +119,7 @@ public AmazonAsyncS3Reference client( if (existing != null && existing.tryIncRef()) { return existing; } + final AmazonAsyncS3Reference clientReference = new AmazonAsyncS3Reference( buildClient(clientSettings, urgentExecutorBuilder, priorityExecutorBuilder, normalExecutorBuilder) ); @@ -235,17 +235,17 @@ synchronized AmazonAsyncS3WithCredentials buildClient( } static ClientOverrideConfiguration buildOverrideConfiguration(final S3ClientSettings clientSettings) { + RetryPolicy retryPolicy = SocketAccess.doPrivileged( + () -> RetryPolicy.builder() + .numRetries(clientSettings.maxRetries) + .throttlingBackoffStrategy( + clientSettings.throttleRetries ? BackoffStrategy.defaultThrottlingStrategy(RetryMode.STANDARD) : BackoffStrategy.none() + ) + .build() + ); + return ClientOverrideConfiguration.builder() - .retryPolicy( - RetryPolicy.builder() - .numRetries(clientSettings.maxRetries) - .throttlingBackoffStrategy( - clientSettings.throttleRetries - ? BackoffStrategy.defaultThrottlingStrategy(RetryMode.STANDARD) - : BackoffStrategy.none() - ) - .build() - ) + .retryPolicy(retryPolicy) .apiCallAttemptTimeout(Duration.ofMillis(clientSettings.requestTimeoutMillis)) .build(); } @@ -346,12 +346,7 @@ static AwsCredentialsProvider buildCredentials(Logger logger, S3ClientSettings c // valid paths. @SuppressForbidden(reason = "Need to provide this override to v2 SDK so that path does not default to home path") private static void setDefaultAwsProfilePath() { - if (ProfileFileSystemSetting.AWS_SHARED_CREDENTIALS_FILE.getStringValue().isEmpty()) { - System.setProperty(ProfileFileSystemSetting.AWS_SHARED_CREDENTIALS_FILE.property(), System.getProperty("opensearch.path.conf")); - } - if (ProfileFileSystemSetting.AWS_CONFIG_FILE.getStringValue().isEmpty()) { - System.setProperty(ProfileFileSystemSetting.AWS_CONFIG_FILE.property(), System.getProperty("opensearch.path.conf")); - } + S3Service.setDefaultAwsProfilePath(); } private static IrsaCredentials buildFromEnvironment(IrsaCredentials defaults) { @@ -443,5 +438,6 @@ public AwsCredentials resolveCredentials() { @Override public void close() { releaseCachedClients(); + } } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java index 1a402e8431e25..8690a5c91680a 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java @@ -43,9 +43,6 @@ import software.amazon.awssdk.services.s3.model.CompletedMultipartUpload; import software.amazon.awssdk.services.s3.model.CompletedPart; import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; -import software.amazon.awssdk.services.s3.model.Delete; -import software.amazon.awssdk.services.s3.model.DeleteObjectsRequest; -import software.amazon.awssdk.services.s3.model.DeleteObjectsResponse; import software.amazon.awssdk.services.s3.model.GetObjectAttributesRequest; import software.amazon.awssdk.services.s3.model.GetObjectAttributesResponse; import software.amazon.awssdk.services.s3.model.GetObjectRequest; @@ -55,9 +52,7 @@ import software.amazon.awssdk.services.s3.model.ListObjectsV2Response; import software.amazon.awssdk.services.s3.model.NoSuchKeyException; import software.amazon.awssdk.services.s3.model.ObjectAttributes; -import software.amazon.awssdk.services.s3.model.ObjectIdentifier; import software.amazon.awssdk.services.s3.model.PutObjectRequest; -import software.amazon.awssdk.services.s3.model.S3Error; import software.amazon.awssdk.services.s3.model.ServerSideEncryption; import software.amazon.awssdk.services.s3.model.UploadPartRequest; import software.amazon.awssdk.services.s3.model.UploadPartResponse; @@ -68,7 +63,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.ExceptionsHelper; +import org.opensearch.action.support.PlainActionFuture; import org.opensearch.common.Nullable; import org.opensearch.common.SetOnce; import org.opensearch.common.StreamContext; @@ -101,11 +96,8 @@ import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; -import java.util.HashSet; -import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Function; @@ -381,125 +373,17 @@ public void writeBlobAtomic(String blobName, InputStream inputStream, long blobS } @Override - public DeleteResult delete() throws IOException { - final AtomicLong deletedBlobs = new AtomicLong(); - final AtomicLong deletedBytes = new AtomicLong(); - try (AmazonS3Reference clientReference = blobStore.clientReference()) { - ListObjectsV2Iterable listObjectsIterable = SocketAccess.doPrivileged( - () -> clientReference.get() - .listObjectsV2Paginator( - ListObjectsV2Request.builder() - .bucket(blobStore.bucket()) - .prefix(keyPath) - .overrideConfiguration( - o -> o.addMetricPublisher(blobStore.getStatsMetricPublisher().listObjectsMetricPublisher) - ) - .build() - ) - ); - - Iterator listObjectsResponseIterator = listObjectsIterable.iterator(); - while (listObjectsResponseIterator.hasNext()) { - ListObjectsV2Response listObjectsResponse = SocketAccess.doPrivileged(listObjectsResponseIterator::next); - List blobsToDelete = listObjectsResponse.contents().stream().map(s3Object -> { - deletedBlobs.incrementAndGet(); - deletedBytes.addAndGet(s3Object.size()); - - return s3Object.key(); - }).collect(Collectors.toList()); - - if (!listObjectsResponseIterator.hasNext()) { - blobsToDelete.add(keyPath); - } - - doDeleteBlobs(blobsToDelete, false); - } - } catch (SdkException e) { - throw new IOException("Exception when deleting blob container [" + keyPath + "]", e); - } - - return new DeleteResult(deletedBlobs.get(), deletedBytes.get()); + public DeleteResult delete() { + PlainActionFuture future = new PlainActionFuture<>(); + deleteAsync(future); + return future.actionGet(); } @Override - public void deleteBlobsIgnoringIfNotExists(List blobNames) throws IOException { - doDeleteBlobs(blobNames, true); - } - - private void doDeleteBlobs(List blobNames, boolean relative) throws IOException { - if (blobNames.isEmpty()) { - return; - } - final Set outstanding; - if (relative) { - outstanding = blobNames.stream().map(this::buildKey).collect(Collectors.toSet()); - } else { - outstanding = new HashSet<>(blobNames); - } - try (AmazonS3Reference clientReference = blobStore.clientReference()) { - // S3 API allows 1k blobs per delete so we split up the given blobs into requests of bulk size deletes - final List deleteRequests = new ArrayList<>(); - final List partition = new ArrayList<>(); - for (String key : outstanding) { - partition.add(key); - if (partition.size() == blobStore.getBulkDeletesSize()) { - deleteRequests.add(bulkDelete(blobStore.bucket(), partition)); - partition.clear(); - } - } - if (partition.isEmpty() == false) { - deleteRequests.add(bulkDelete(blobStore.bucket(), partition)); - } - SocketAccess.doPrivilegedVoid(() -> { - SdkException aex = null; - for (DeleteObjectsRequest deleteRequest : deleteRequests) { - List keysInRequest = deleteRequest.delete() - .objects() - .stream() - .map(ObjectIdentifier::key) - .collect(Collectors.toList()); - try { - DeleteObjectsResponse deleteObjectsResponse = clientReference.get().deleteObjects(deleteRequest); - outstanding.removeAll(keysInRequest); - outstanding.addAll(deleteObjectsResponse.errors().stream().map(S3Error::key).collect(Collectors.toSet())); - if (!deleteObjectsResponse.errors().isEmpty()) { - logger.warn( - () -> new ParameterizedMessage( - "Failed to delete some blobs {}", - deleteObjectsResponse.errors() - .stream() - .map(s3Error -> "[" + s3Error.key() + "][" + s3Error.code() + "][" + s3Error.message() + "]") - .collect(Collectors.toList()) - ) - ); - } - } catch (SdkException e) { - // The AWS client threw any unexpected exception and did not execute the request at all so we do not - // remove any keys from the outstanding deletes set. - aex = ExceptionsHelper.useOrSuppress(aex, e); - } - } - if (aex != null) { - throw aex; - } - }); - } catch (Exception e) { - throw new IOException("Failed to delete blobs [" + outstanding + "]", e); - } - assert outstanding.isEmpty(); - } - - private DeleteObjectsRequest bulkDelete(String bucket, List blobs) { - return DeleteObjectsRequest.builder() - .bucket(bucket) - .delete( - Delete.builder() - .objects(blobs.stream().map(blob -> ObjectIdentifier.builder().key(blob).build()).collect(Collectors.toList())) - .quiet(true) - .build() - ) - .overrideConfiguration(o -> o.addMetricPublisher(blobStore.getStatsMetricPublisher().deleteObjectsMetricPublisher)) - .build(); + public void deleteBlobsIgnoringIfNotExists(List blobNames) { + PlainActionFuture future = new PlainActionFuture<>(); + deleteBlobsAsyncIgnoringIfNotExists(blobNames, future); + future.actionGet(); } @Override @@ -886,7 +770,11 @@ public void deleteAsync(ActionListener completionListener) { try (AmazonAsyncS3Reference asyncClientReference = blobStore.asyncClientReference()) { S3AsyncClient s3AsyncClient = asyncClientReference.get().client(); - ListObjectsV2Request listRequest = ListObjectsV2Request.builder().bucket(blobStore.bucket()).prefix(keyPath).build(); + ListObjectsV2Request listRequest = ListObjectsV2Request.builder() + .bucket(blobStore.bucket()) + .prefix(keyPath) + .overrideConfiguration(o -> o.addMetricPublisher(blobStore.getStatsMetricPublisher().listObjectsMetricPublisher)) + .build(); ListObjectsV2Publisher listPublisher = s3AsyncClient.listObjectsV2Paginator(listRequest); AtomicLong deletedBlobs = new AtomicLong(); diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java index 1048ec784ec4e..72a812339e387 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java @@ -93,19 +93,19 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin, Relo private static final String NORMAL_TRANSFER_QUEUE_CONSUMER = "normal_transfer_queue_consumer"; protected final S3Service service; - private final S3AsyncService s3AsyncService; + protected final S3AsyncService s3AsyncService; private final Path configPath; - private AsyncExecutorContainer urgentExecutorBuilder; - private AsyncExecutorContainer priorityExecutorBuilder; - private AsyncExecutorContainer normalExecutorBuilder; + protected AsyncExecutorContainer urgentExecutorBuilder; + protected AsyncExecutorContainer priorityExecutorBuilder; + protected AsyncExecutorContainer normalExecutorBuilder; private ExecutorService lowTransferQConsumerService; private ExecutorService normalTransferQConsumerService; - private SizeBasedBlockingQ normalPrioritySizeBasedBlockingQ; - private SizeBasedBlockingQ lowPrioritySizeBasedBlockingQ; - private TransferSemaphoresHolder transferSemaphoresHolder; - private GenericStatsMetricPublisher genericStatsMetricPublisher; + protected SizeBasedBlockingQ normalPrioritySizeBasedBlockingQ; + protected SizeBasedBlockingQ lowPrioritySizeBasedBlockingQ; + protected TransferSemaphoresHolder transferSemaphoresHolder; + protected GenericStatsMetricPublisher genericStatsMetricPublisher; public S3RepositoryPlugin(final Settings settings, final Path configPath) { this(settings, configPath, new S3Service(configPath), new S3AsyncService(configPath)); @@ -387,5 +387,8 @@ public void reload(Settings settings) { public void close() throws IOException { service.close(); s3AsyncService.close(); + urgentExecutorBuilder.getAsyncTransferEventLoopGroup().close(); + priorityExecutorBuilder.getAsyncTransferEventLoopGroup().close(); + normalExecutorBuilder.getAsyncTransferEventLoopGroup().close(); } } diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java index 2cb11541d924f..53371cd1529ce 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java @@ -48,6 +48,7 @@ import software.amazon.awssdk.services.s3.model.CreateMultipartUploadResponse; import software.amazon.awssdk.services.s3.model.DeleteObjectsRequest; import software.amazon.awssdk.services.s3.model.DeleteObjectsResponse; +import software.amazon.awssdk.services.s3.model.DeletedObject; import software.amazon.awssdk.services.s3.model.GetObjectAttributesParts; import software.amazon.awssdk.services.s3.model.GetObjectAttributesRequest; import software.amazon.awssdk.services.s3.model.GetObjectAttributesResponse; @@ -92,7 +93,6 @@ import java.util.Collections; import java.util.Comparator; import java.util.HashMap; -import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -102,6 +102,7 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; @@ -286,9 +287,8 @@ public int numberOfPagesFetched() { } } - public void testDelete() throws IOException { + public void testDelete() throws Exception { final String bucketName = randomAlphaOfLengthBetween(1, 10); - final BlobPath blobPath = new BlobPath(); int bulkDeleteSize = 5; @@ -297,147 +297,314 @@ public void testDelete() throws IOException { when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); when(blobStore.getBulkDeletesSize()).thenReturn(bulkDeleteSize); - final S3Client client = mock(S3Client.class); - doAnswer(invocation -> new AmazonS3Reference(client)).when(blobStore).clientReference(); + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final AmazonAsyncS3Reference asyncClientReference = mock(AmazonAsyncS3Reference.class); + when(blobStore.asyncClientReference()).thenReturn(asyncClientReference); + AmazonAsyncS3WithCredentials amazonAsyncS3WithCredentials = AmazonAsyncS3WithCredentials.create( + s3AsyncClient, + s3AsyncClient, + s3AsyncClient, + null + ); + when(asyncClientReference.get()).thenReturn(amazonAsyncS3WithCredentials); - ListObjectsV2Iterable listObjectsV2Iterable = mock(ListObjectsV2Iterable.class); + final ListObjectsV2Publisher listPublisher = mock(ListObjectsV2Publisher.class); final int totalPageCount = 3; final long s3ObjectSize = ByteSizeUnit.MB.toBytes(5); final int s3ObjectsPerPage = 5; - MockListObjectsV2ResponseIterator listObjectsV2ResponseIterator = new MockListObjectsV2ResponseIterator( - totalPageCount, - s3ObjectsPerPage, - s3ObjectSize - ); - when(listObjectsV2Iterable.iterator()).thenReturn(listObjectsV2ResponseIterator); - when(client.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listObjectsV2Iterable); - final List keysDeleted = new ArrayList<>(); - AtomicInteger deleteCount = new AtomicInteger(); + List responses = new ArrayList<>(); + List allObjects = new ArrayList<>(); + long totalSize = 0; + + for (int i = 0; i < totalPageCount; i++) { + List pageObjects = new ArrayList<>(); + for (int j = 0; j < s3ObjectsPerPage; j++) { + pageObjects.add(S3Object.builder().key(randomAlphaOfLength(10)).size(s3ObjectSize).build()); + totalSize += s3ObjectSize; + } + allObjects.addAll(pageObjects); + responses.add(ListObjectsV2Response.builder().contents(pageObjects).build()); + } + + AtomicInteger counter = new AtomicInteger(); doAnswer(invocation -> { - DeleteObjectsRequest deleteObjectsRequest = invocation.getArgument(0); - deleteCount.getAndIncrement(); - logger.info("Object sizes are{}", deleteObjectsRequest.delete().objects().size()); - keysDeleted.addAll(deleteObjectsRequest.delete().objects().stream().map(ObjectIdentifier::key).collect(Collectors.toList())); - return DeleteObjectsResponse.builder().build(); - }).when(client).deleteObjects(any(DeleteObjectsRequest.class)); + Subscriber subscriber = invocation.getArgument(0); + subscriber.onSubscribe(new Subscription() { + @Override + public void request(long n) { + int currentCounter = counter.getAndIncrement(); + if (currentCounter < responses.size()) { + subscriber.onNext(responses.get(currentCounter)); + } + if (currentCounter == responses.size() - 1) { + subscriber.onComplete(); + } + } + + @Override + public void cancel() {} + }); + return null; + }).when(listPublisher).subscribe(ArgumentMatchers.>any()); + + when(s3AsyncClient.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listPublisher); + + when(s3AsyncClient.deleteObjects(any(DeleteObjectsRequest.class))).thenReturn( + CompletableFuture.completedFuture(DeleteObjectsResponse.builder().build()) + ); final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); - DeleteResult deleteResult = blobContainer.delete(); - assertEquals(s3ObjectSize * s3ObjectsPerPage * totalPageCount, deleteResult.bytesDeleted()); - assertEquals(s3ObjectsPerPage * totalPageCount, deleteResult.blobsDeleted()); - // keysDeleted will have blobPath also - assertEquals(listObjectsV2ResponseIterator.getKeysListed().size(), keysDeleted.size() - 1); - assertTrue(keysDeleted.contains(blobPath.buildAsString())); - // keysDeleted will have blobPath also - assertEquals((int) Math.ceil(((double) keysDeleted.size() + 1) / bulkDeleteSize), deleteCount.get()); - keysDeleted.remove(blobPath.buildAsString()); - assertEquals(new HashSet<>(listObjectsV2ResponseIterator.getKeysListed()), new HashSet<>(keysDeleted)); + CountDownLatch latch = new CountDownLatch(1); + AtomicReference resultRef = new AtomicReference<>(); + + blobContainer.deleteAsync(new ActionListener<>() { + @Override + public void onResponse(DeleteResult deleteResult) { + resultRef.set(deleteResult); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + fail("Unexpected failure: " + e.getMessage()); + } + }); + + assertTrue(latch.await(5, TimeUnit.SECONDS)); + DeleteResult result = resultRef.get(); + + assertEquals(totalSize, result.bytesDeleted()); + assertEquals(allObjects.size(), result.blobsDeleted()); + + verify(s3AsyncClient, times(1)).listObjectsV2Paginator(any(ListObjectsV2Request.class)); + int expectedDeleteCalls = (int) Math.ceil((double) allObjects.size() / bulkDeleteSize); + verify(s3AsyncClient, times(expectedDeleteCalls)).deleteObjects(any(DeleteObjectsRequest.class)); } - public void testDeleteItemLevelErrorsDuringDelete() { + public void testDeleteItemLevelErrorsDuringDelete() throws Exception { final String bucketName = randomAlphaOfLengthBetween(1, 10); - final BlobPath blobPath = new BlobPath(); final S3BlobStore blobStore = mock(S3BlobStore.class); when(blobStore.bucket()).thenReturn(bucketName); when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + int bulkDeleteSize = 3; // Small size to force multiple delete requests + when(blobStore.getBulkDeletesSize()).thenReturn(bulkDeleteSize); - final S3Client client = mock(S3Client.class); - doAnswer(invocation -> new AmazonS3Reference(client)).when(blobStore).clientReference(); + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final AmazonAsyncS3Reference asyncClientReference = mock(AmazonAsyncS3Reference.class); + when(blobStore.asyncClientReference()).thenReturn(asyncClientReference); + when(asyncClientReference.get()).thenReturn(AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null)); - ListObjectsV2Iterable listObjectsV2Iterable = mock(ListObjectsV2Iterable.class); - final int totalPageCount = 3; - final long s3ObjectSize = ByteSizeUnit.MB.toBytes(5); - final int s3ObjectsPerPage = 5; - MockListObjectsV2ResponseIterator listObjectsV2ResponseIterator = new MockListObjectsV2ResponseIterator( - totalPageCount, - s3ObjectsPerPage, - s3ObjectSize - ); - when(listObjectsV2Iterable.iterator()).thenReturn(listObjectsV2ResponseIterator); - when(client.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listObjectsV2Iterable); + final ListObjectsV2Publisher listPublisher = mock(ListObjectsV2Publisher.class); + final int totalObjects = 10; + List s3Objects = new ArrayList<>(); + for (int i = 0; i < totalObjects; i++) { + s3Objects.add(S3Object.builder().key("key-" + i).size(100L).build()); + } - final List keysFailedDeletion = new ArrayList<>(); + AtomicBoolean onNext = new AtomicBoolean(false); doAnswer(invocation -> { - DeleteObjectsRequest deleteObjectsRequest = invocation.getArgument(0); - int i = 0; - for (ObjectIdentifier objectIdentifier : deleteObjectsRequest.delete().objects()) { + Subscriber subscriber = invocation.getArgument(0); + subscriber.onSubscribe(new Subscription() { + @Override + public void request(long n) { + if (onNext.compareAndSet(false, true)) { + subscriber.onNext(ListObjectsV2Response.builder().contents(s3Objects).build()); + } else { + subscriber.onComplete(); + } + } + + @Override + public void cancel() {} + }); + return null; + }).when(listPublisher).subscribe(ArgumentMatchers.>any()); + + when(s3AsyncClient.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listPublisher); + + // Simulate item-level errors during delete + AtomicInteger deleteCallCount = new AtomicInteger(0); + when(s3AsyncClient.deleteObjects(any(DeleteObjectsRequest.class))).thenAnswer(invocation -> { + DeleteObjectsRequest request = invocation.getArgument(0); + List errors = new ArrayList<>(); + List deletedObjects = new ArrayList<>(); + + for (int i = 0; i < request.delete().objects().size(); i++) { if (i % 2 == 0) { - keysFailedDeletion.add(objectIdentifier.key()); + errors.add( + S3Error.builder() + .key(request.delete().objects().get(i).key()) + .code("InternalError") + .message("Simulated error") + .build() + ); + } else { + deletedObjects.add(DeletedObject.builder().key(request.delete().objects().get(i).key()).build()); } - i++; } - return DeleteObjectsResponse.builder() - .errors(keysFailedDeletion.stream().map(key -> S3Error.builder().key(key).build()).collect(Collectors.toList())) - .build(); - }).when(client).deleteObjects(any(DeleteObjectsRequest.class)); + + deleteCallCount.incrementAndGet(); + return CompletableFuture.completedFuture(DeleteObjectsResponse.builder().errors(errors).deleted(deletedObjects).build()); + }); final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); - assertThrows(AssertionError.class, blobContainer::delete); + CountDownLatch latch = new CountDownLatch(1); + AtomicReference resultRef = new AtomicReference<>(); + AtomicReference exceptionRef = new AtomicReference<>(); + + blobContainer.deleteAsync(new ActionListener<>() { + @Override + public void onResponse(DeleteResult deleteResult) { + resultRef.set(deleteResult); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + exceptionRef.set(e); + latch.countDown(); + } + }); + + assertTrue(latch.await(5, TimeUnit.SECONDS)); + + assertNull("Unexpected exception: " + exceptionRef.get(), exceptionRef.get()); + DeleteResult result = resultRef.get(); + assertNotNull("Expected DeleteResult but got null", result); + + // We expect half of the objects to be deleted successfully + // But as of today, the blob delete count and bytes is updated a bit earlier. + assertEquals(totalObjects, result.blobsDeleted()); + assertEquals(totalObjects * 100L, result.bytesDeleted()); + + verify(s3AsyncClient, times(1)).listObjectsV2Paginator(any(ListObjectsV2Request.class)); + + // Calculate expected number of deleteObjects calls + int expectedDeleteCalls = (int) Math.ceil((double) totalObjects / bulkDeleteSize); + assertEquals(expectedDeleteCalls, deleteCallCount.get()); } - public void testDeleteSdkExceptionDuringListOperation() { + public void testDeleteSdkExceptionDuringListOperation() throws Exception { final String bucketName = randomAlphaOfLengthBetween(1, 10); - final BlobPath blobPath = new BlobPath(); final S3BlobStore blobStore = mock(S3BlobStore.class); when(blobStore.bucket()).thenReturn(bucketName); when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); - final S3Client client = mock(S3Client.class); - doAnswer(invocation -> new AmazonS3Reference(client)).when(blobStore).clientReference(); + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final AmazonAsyncS3Reference asyncClientReference = mock(AmazonAsyncS3Reference.class); + when(blobStore.asyncClientReference()).thenReturn(asyncClientReference); + when(asyncClientReference.get()).thenReturn(AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null)); - ListObjectsV2Iterable listObjectsV2Iterable = mock(ListObjectsV2Iterable.class); - final int totalPageCount = 3; - final long s3ObjectSize = ByteSizeUnit.MB.toBytes(5); - final int s3ObjectsPerPage = 5; - MockListObjectsV2ResponseIterator listObjectsV2ResponseIterator = new MockListObjectsV2ResponseIterator( - totalPageCount, - s3ObjectsPerPage, - s3ObjectSize - ); - when(listObjectsV2Iterable.iterator()).thenReturn(listObjectsV2ResponseIterator); - when(client.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listObjectsV2Iterable); + final ListObjectsV2Publisher listPublisher = mock(ListObjectsV2Publisher.class); + doAnswer(invocation -> { + Subscriber subscriber = invocation.getArgument(0); + subscriber.onSubscribe(new Subscription() { + @Override + public void request(long n) { + subscriber.onError(new RuntimeException("Simulated listing error")); + } + + @Override + public void cancel() {} + }); + return null; + }).when(listPublisher).subscribe(ArgumentMatchers.>any()); + + when(s3AsyncClient.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listPublisher); final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); - assertThrows(IOException.class, blobContainer::delete); + CountDownLatch latch = new CountDownLatch(1); + AtomicReference exceptionRef = new AtomicReference<>(); + + blobContainer.deleteAsync(new ActionListener<>() { + @Override + public void onResponse(DeleteResult deleteResult) { + fail("Expected failure but got success"); + } + + @Override + public void onFailure(Exception e) { + exceptionRef.set(e); + latch.countDown(); + } + }); + + assertTrue(latch.await(5, TimeUnit.SECONDS)); + assertNotNull(exceptionRef.get()); + assertEquals(IOException.class, exceptionRef.get().getClass()); + assertEquals("Failed to list objects for deletion", exceptionRef.get().getMessage()); } - public void testDeleteSdkExceptionDuringDeleteOperation() { + public void testDeleteSdkExceptionDuringDeleteOperation() throws Exception { final String bucketName = randomAlphaOfLengthBetween(1, 10); - final BlobPath blobPath = new BlobPath(); + int bulkDeleteSize = 5; final S3BlobStore blobStore = mock(S3BlobStore.class); when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getBulkDeletesSize()).thenReturn(bulkDeleteSize); when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); - final S3Client client = mock(S3Client.class); - doAnswer(invocation -> new AmazonS3Reference(client)).when(blobStore).clientReference(); + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final AmazonAsyncS3Reference asyncClientReference = mock(AmazonAsyncS3Reference.class); + when(blobStore.asyncClientReference()).thenReturn(asyncClientReference); + when(asyncClientReference.get()).thenReturn(AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null)); - ListObjectsV2Iterable listObjectsV2Iterable = mock(ListObjectsV2Iterable.class); - final int totalPageCount = 3; - final long s3ObjectSize = ByteSizeUnit.MB.toBytes(5); - final int s3ObjectsPerPage = 5; - MockListObjectsV2ResponseIterator listObjectsV2ResponseIterator = new MockListObjectsV2ResponseIterator( - totalPageCount, - s3ObjectsPerPage, - s3ObjectSize - ); - when(listObjectsV2Iterable.iterator()).thenReturn(listObjectsV2ResponseIterator); - when(client.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listObjectsV2Iterable); + final ListObjectsV2Publisher listPublisher = mock(ListObjectsV2Publisher.class); + doAnswer(invocation -> { + Subscriber subscriber = invocation.getArgument(0); + subscriber.onSubscribe(new Subscription() { + @Override + public void request(long n) { + subscriber.onNext( + ListObjectsV2Response.builder().contents(S3Object.builder().key("test-key").size(100L).build()).build() + ); + subscriber.onComplete(); + } + + @Override + public void cancel() {} + }); + return null; + }).when(listPublisher).subscribe(ArgumentMatchers.>any()); - when(client.deleteObjects(any(DeleteObjectsRequest.class))).thenThrow(SdkException.builder().build()); + when(s3AsyncClient.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listPublisher); + + CompletableFuture failedFuture = new CompletableFuture<>(); + failedFuture.completeExceptionally(new RuntimeException("Simulated delete error")); + when(s3AsyncClient.deleteObjects(any(DeleteObjectsRequest.class))).thenReturn(failedFuture); final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); - assertThrows(IOException.class, blobContainer::delete); + CountDownLatch latch = new CountDownLatch(1); + AtomicReference exceptionRef = new AtomicReference<>(); + + blobContainer.deleteAsync(new ActionListener<>() { + @Override + public void onResponse(DeleteResult deleteResult) { + fail("Expected failure but got success"); + } + + @Override + public void onFailure(Exception e) { + exceptionRef.set(e); + latch.countDown(); + } + }); + + assertTrue(latch.await(5, TimeUnit.SECONDS)); + assertNotNull(exceptionRef.get()); + logger.error("", exceptionRef.get()); + assertTrue(exceptionRef.get() instanceof CompletionException); + assertEquals("java.lang.RuntimeException: Simulated delete error", exceptionRef.get().getMessage()); } public void testExecuteSingleUpload() throws IOException { diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryPluginTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryPluginTests.java index 9ac1564c807c3..c0ee9cb6d980f 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryPluginTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryPluginTests.java @@ -8,6 +8,7 @@ package org.opensearch.repositories.s3; +import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.SizeUnit; import org.opensearch.common.unit.SizeValue; @@ -25,6 +26,8 @@ import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class S3RepositoryPluginTests extends OpenSearchTestCase { @@ -37,8 +40,6 @@ public void testGetExecutorBuilders() throws IOException { ThreadPool threadPool = null; try (S3RepositoryPlugin plugin = new S3RepositoryPlugin(settings, configPath)) { List> executorBuilders = plugin.getExecutorBuilders(settings); - assertNotNull(executorBuilders); - assertFalse(executorBuilders.isEmpty()); threadPool = new ThreadPool(settings, executorBuilders.toArray(new ExecutorBuilder[0])); final Executor executor = threadPool.executor(URGENT_FUTURE_COMPLETION); assertNotNull(executor); @@ -57,6 +58,12 @@ public void testGetExecutorBuilders() throws IOException { assertThat(info.getMax(), equalTo(size)); assertThat(openSearchThreadPoolExecutor.getMaximumPoolSize(), equalTo(size)); + ClusterService clusterService = mock(ClusterService.class); + when(clusterService.getSettings()).thenReturn(Settings.EMPTY); + plugin.createComponents(null, clusterService, threadPool, null, null, null, null, null, null, null, null); + assertNotNull(executorBuilders); + assertFalse(executorBuilders.isEmpty()); + final int availableProcessors = Runtime.getRuntime().availableProcessors(); if (processors > availableProcessors) { assertWarnings( diff --git a/plugins/store-smb/build.gradle b/plugins/store-smb/build.gradle index add4abb22329f..d702978730f45 100644 --- a/plugins/store-smb/build.gradle +++ b/plugins/store-smb/build.gradle @@ -31,8 +31,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'The Store SMB plugin adds support for SMB stores.' - classname 'org.opensearch.plugin.store.smb.SMBStorePlugin' + description = 'The Store SMB plugin adds support for SMB stores.' + classname = 'org.opensearch.plugin.store.smb.SMBStorePlugin' } restResources { restApi { diff --git a/plugins/telemetry-otel/build.gradle b/plugins/telemetry-otel/build.gradle index 872d928aa093f..54f4f2f897562 100644 --- a/plugins/telemetry-otel/build.gradle +++ b/plugins/telemetry-otel/build.gradle @@ -14,8 +14,8 @@ import org.opensearch.gradle.info.BuildParams apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'Opentelemetry based telemetry implementation.' - classname 'org.opensearch.telemetry.OTelTelemetryPlugin' + description = 'Opentelemetry based telemetry implementation.' + classname = 'org.opensearch.telemetry.OTelTelemetryPlugin' hasClientJar = false } @@ -88,6 +88,7 @@ thirdPartyAudit { 'io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider', 'io.opentelemetry.sdk.autoconfigure.spi.internal.AutoConfigureListener', 'io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider', + 'io.opentelemetry.sdk.autoconfigure.spi.internal.DefaultConfigProperties', 'io.opentelemetry.sdk.autoconfigure.spi.internal.StructuredConfigProperties' ) } diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.41.0.jar.sha1 deleted file mode 100644 index ead8fb235fa12..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-api-1.41.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ec5ad3b420c9fba4b340e85a3199fd0f2accd023 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.46.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.46.0.jar.sha1 new file mode 100644 index 0000000000000..b2d1d3575fcde --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-1.46.0.jar.sha1 @@ -0,0 +1 @@ +afd2d5781454088400cceabbe84f7a9b29d27161 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.41.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.41.0-alpha.jar.sha1 deleted file mode 100644 index b601a4fb5246f..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.41.0-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fd387313cc37a6e93062e9a80a2526634d22cb19 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.46.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.46.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..e89de4cb29f16 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.46.0-alpha.jar.sha1 @@ -0,0 +1 @@ +1a708444d2818ac1a47767a2b35d74ef55d26af8 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.41.0.jar.sha1 deleted file mode 100644 index 74b7cb25cdfe5..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-context-1.41.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3d7cf15ef425053e24e825160ca7b4ac08d721aa \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.46.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.46.0.jar.sha1 new file mode 100644 index 0000000000000..df658f4c87ac2 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-context-1.46.0.jar.sha1 @@ -0,0 +1 @@ +8cee1fa7ec9129f7b252595c612c19f4570d567f \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.41.0.jar.sha1 deleted file mode 100644 index d8d8f75850cb6..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.41.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cf92f4c1b60c2359c12f6f323f6a2a623c333910 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.46.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.46.0.jar.sha1 new file mode 100644 index 0000000000000..e6503871bff53 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.46.0.jar.sha1 @@ -0,0 +1 @@ +2e2d8f3b51b1a2b1184f11d9059e129c5e39147a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.41.0.jar.sha1 deleted file mode 100644 index 3e1212943f894..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.41.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8dee21440b811004ecc1c36c1cd44f9d3494546c \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.46.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.46.0.jar.sha1 new file mode 100644 index 0000000000000..65757fff8b0e7 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.46.0.jar.sha1 @@ -0,0 +1 @@ +a0ef76a383a086b812395ca5a5cdf94804a59a3f \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.41.0.jar.sha1 deleted file mode 100644 index 21a29cc8445e5..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.41.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d86e60b6d49e389ebe5797d42a7288a20d30c162 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.46.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.46.0.jar.sha1 new file mode 100644 index 0000000000000..0fc550e83748e --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.46.0.jar.sha1 @@ -0,0 +1 @@ +1122a5ea0562147547ddf0eb28e1035d549c0ea0 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.41.0.jar.sha1 deleted file mode 100644 index ae522ac698aa8..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.41.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -aeba3075b8dfd97779edadc0a3711d999bb0e396 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.46.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.46.0.jar.sha1 new file mode 100644 index 0000000000000..a01f85d9e1258 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.46.0.jar.sha1 @@ -0,0 +1 @@ +abeb93b8b6d2cb0007b1d6122325f94a11e61ca4 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.41.0.jar.sha1 deleted file mode 100644 index a741d0a167d60..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.41.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -368d7905d6a0a313c63e3a91f895a3a08500519e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.46.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.46.0.jar.sha1 new file mode 100644 index 0000000000000..8c755281bab05 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.46.0.jar.sha1 @@ -0,0 +1 @@ +32a0fe0fa7cd9831b502075f27c1fe6d28280cdb \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.41.0.jar.sha1 deleted file mode 100644 index 972e7de1c74be..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.41.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c740e8f7d0d914d6acd310ac53901bb8753c6e8d \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.46.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.46.0.jar.sha1 new file mode 100644 index 0000000000000..a41c756db7096 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.46.0.jar.sha1 @@ -0,0 +1 @@ +b3a77fff1084177c4f5099bbb7db6181d6efd752 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.41.0.jar.sha1 deleted file mode 100644 index c56ca0b9e8169..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.41.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b820861f85ba83db0ad896c47f723208d7473d5a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.46.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.46.0.jar.sha1 new file mode 100644 index 0000000000000..1bd211a143c03 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.46.0.jar.sha1 @@ -0,0 +1 @@ +1d353ee4e980ff77c742350fc7000b732b6c6b3f \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.41.0.jar.sha1 deleted file mode 100644 index 39db6cb73727f..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.41.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f88ee292f5605c87dfe85c8d90131bce9f0b3b8e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.46.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.46.0.jar.sha1 new file mode 100644 index 0000000000000..084a703a4d4cc --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.46.0.jar.sha1 @@ -0,0 +1 @@ +1bd9bb4f3ce9ac573613b353a78d51491cd02bbd \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.41.0.jar.sha1 deleted file mode 100644 index 6dcd496e033d3..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.41.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9d1200befb28e3e9f61073ac3de23cc55e509dc7 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.46.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.46.0.jar.sha1 new file mode 100644 index 0000000000000..1fe3c4842d41d --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.46.0.jar.sha1 @@ -0,0 +1 @@ +475d900ffd0567a7ddf2452290b2e5d51ac35c58 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.41.0.jar.sha1 deleted file mode 100644 index 161e400f87077..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.41.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d9bbc2e2e800317d72fbf3141ae8391e95fa6229 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.46.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.46.0.jar.sha1 new file mode 100644 index 0000000000000..da00b35812afb --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.46.0.jar.sha1 @@ -0,0 +1 @@ +c6e39faabf0741780189861156d0a7763e942796 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.27.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.27.0-alpha.jar.sha1 deleted file mode 100644 index e986b4b53388e..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.27.0-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -906d916bee46f60260c09314284b5948c54a0662 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.29.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.29.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..3326c366cb4c9 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.29.0-alpha.jar.sha1 @@ -0,0 +1 @@ +613d7f7743eb2b974680ad1af1685802e6a7cb58 \ No newline at end of file diff --git a/plugins/transport-grpc/build.gradle b/plugins/transport-grpc/build.gradle new file mode 100644 index 0000000000000..5c6bc8efe1098 --- /dev/null +++ b/plugins/transport-grpc/build.gradle @@ -0,0 +1,168 @@ +import org.gradle.api.attributes.java.TargetJvmEnvironment + +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +opensearchplugin { + description = 'gRPC based transport implementation' + classname = 'org.opensearch.transport.grpc.GrpcPlugin' +} + +dependencies { + compileOnly "com.google.code.findbugs:jsr305:3.0.2" + runtimeOnly "com.google.guava:guava:${versions.guava}" + implementation "com.google.errorprone:error_prone_annotations:2.24.1" + implementation "com.google.guava:failureaccess:1.0.1" + implementation "io.grpc:grpc-api:${versions.grpc}" + implementation "io.grpc:grpc-core:${versions.grpc}" + implementation "io.grpc:grpc-netty-shaded:${versions.grpc}" + implementation "io.grpc:grpc-protobuf-lite:${versions.grpc}" + implementation "io.grpc:grpc-protobuf:${versions.grpc}" + implementation "io.grpc:grpc-services:${versions.grpc}" + implementation "io.grpc:grpc-stub:${versions.grpc}" + implementation "io.grpc:grpc-util:${versions.grpc}" + implementation "io.perfmark:perfmark-api:0.26.0" +} + +tasks.named("dependencyLicenses").configure { + mapping from: /grpc-.*/, to: 'grpc' +} + +thirdPartyAudit { + ignoreMissingClasses( + 'com.aayushatharva.brotli4j.Brotli4jLoader', + 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Status', + 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Wrapper', + 'com.aayushatharva.brotli4j.encoder.BrotliEncoderChannel', + 'com.aayushatharva.brotli4j.encoder.Encoder$Mode', + 'com.aayushatharva.brotli4j.encoder.Encoder$Parameters', + // classes are missing + + // from io.netty.logging.CommonsLoggerFactory (netty) + 'org.apache.commons.logging.Log', + 'org.apache.commons.logging.LogFactory', + + // from Log4j (deliberate, Netty will fallback to Log4j 2) + 'org.apache.log4j.Level', + 'org.apache.log4j.Logger', + + // from io.netty.handler.ssl.util.BouncyCastleSelfSignedCertGenerator (netty) + 'org.bouncycastle.cert.X509v3CertificateBuilder', + 'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter', + 'org.bouncycastle.operator.jcajce.JcaContentSignerBuilder', + 'org.bouncycastle.openssl.PEMEncryptedKeyPair', + 'org.bouncycastle.openssl.PEMParser', + 'org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter', + 'org.bouncycastle.openssl.jcajce.JceOpenSSLPKCS8DecryptorProviderBuilder', + 'org.bouncycastle.openssl.jcajce.JcePEMDecryptorProviderBuilder', + 'org.bouncycastle.pkcs.PKCS8EncryptedPrivateKeyInfo', + + // from io.netty.handler.ssl.JettyNpnSslEngine (netty) + 'org.eclipse.jetty.npn.NextProtoNego$ClientProvider', + 'org.eclipse.jetty.npn.NextProtoNego$ServerProvider', + 'org.eclipse.jetty.npn.NextProtoNego', + + // from io.netty.handler.codec.marshalling.ChannelBufferByteInput (netty) + 'org.jboss.marshalling.ByteInput', + + // from io.netty.handler.codec.marshalling.ChannelBufferByteOutput (netty) + 'org.jboss.marshalling.ByteOutput', + + // from io.netty.handler.codec.marshalling.CompatibleMarshallingEncoder (netty) + 'org.jboss.marshalling.Marshaller', + + // from io.netty.handler.codec.marshalling.ContextBoundUnmarshallerProvider (netty) + 'org.jboss.marshalling.MarshallerFactory', + 'org.jboss.marshalling.MarshallingConfiguration', + 'org.jboss.marshalling.Unmarshaller', + + // from io.netty.util.internal.logging.InternalLoggerFactory (netty) - it's optional + 'org.slf4j.helpers.FormattingTuple', + 'org.slf4j.helpers.MessageFormatter', + 'org.slf4j.Logger', + 'org.slf4j.LoggerFactory', + 'org.slf4j.spi.LocationAwareLogger', + + 'com.google.gson.stream.JsonReader', + 'com.google.gson.stream.JsonToken', + 'com.google.protobuf.util.Durations', + 'com.google.protobuf.util.Timestamps', + 'com.google.protobuf.nano.CodedOutputByteBufferNano', + 'com.google.protobuf.nano.MessageNano', + 'com.google.rpc.Status', + 'com.google.rpc.Status$Builder', + 'com.ning.compress.BufferRecycler', + 'com.ning.compress.lzf.ChunkDecoder', + 'com.ning.compress.lzf.ChunkEncoder', + 'com.ning.compress.lzf.LZFChunk', + 'com.ning.compress.lzf.LZFEncoder', + 'com.ning.compress.lzf.util.ChunkDecoderFactory', + 'com.ning.compress.lzf.util.ChunkEncoderFactory', + 'lzma.sdk.lzma.Encoder', + 'net.jpountz.lz4.LZ4Compressor', + 'net.jpountz.lz4.LZ4Factory', + 'net.jpountz.lz4.LZ4FastDecompressor', + 'net.jpountz.xxhash.XXHash32', + 'net.jpountz.xxhash.XXHashFactory', + 'org.eclipse.jetty.alpn.ALPN$ClientProvider', + 'org.eclipse.jetty.alpn.ALPN$ServerProvider', + 'org.eclipse.jetty.alpn.ALPN', + + 'org.conscrypt.AllocatedBuffer', + 'org.conscrypt.BufferAllocator', + 'org.conscrypt.Conscrypt', + 'org.conscrypt.HandshakeListener', + + 'reactor.blockhound.BlockHound$Builder', + 'reactor.blockhound.integration.BlockHoundIntegration' + ) + + ignoreViolations( + // uses internal java api: sun.misc.Unsafe + 'com.google.common.cache.Striped64', + 'com.google.common.cache.Striped64$1', + 'com.google.common.cache.Striped64$Cell', + 'com.google.common.hash.Striped64', + 'com.google.common.hash.Striped64$1', + 'com.google.common.hash.Striped64$Cell', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$1', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$2', + 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper', + 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1', + 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator', + 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1', + + 'io.grpc.netty.shaded.io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator', + 'io.grpc.netty.shaded.io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$1', + 'io.grpc.netty.shaded.io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$2', + 'io.grpc.netty.shaded.io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$3', + 'io.grpc.netty.shaded.io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$4', + 'io.grpc.netty.shaded.io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$5', + 'io.grpc.netty.shaded.io.netty.util.internal.PlatformDependent0', + 'io.grpc.netty.shaded.io.netty.util.internal.PlatformDependent0$1', + 'io.grpc.netty.shaded.io.netty.util.internal.PlatformDependent0$2', + 'io.grpc.netty.shaded.io.netty.util.internal.PlatformDependent0$3', + 'io.grpc.netty.shaded.io.netty.util.internal.PlatformDependent0$4', + 'io.grpc.netty.shaded.io.netty.util.internal.PlatformDependent0$6', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueConsumerNodeRef', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueProducerNodeRef', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueColdProducerFields', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueConsumerFields', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueProducerFields', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.LinkedQueueNode', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueConsumerIndexField', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueProducerIndexField', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.util.UnsafeLongArrayAccess', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess' + ) +} diff --git a/plugins/transport-grpc/licenses/error_prone_annotations-2.24.1.jar.sha1 b/plugins/transport-grpc/licenses/error_prone_annotations-2.24.1.jar.sha1 new file mode 100644 index 0000000000000..67723f6f51248 --- /dev/null +++ b/plugins/transport-grpc/licenses/error_prone_annotations-2.24.1.jar.sha1 @@ -0,0 +1 @@ +32b299e45105aa9b0df8279c74dc1edfcf313ff0 \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/error_prone_annotations-LICENSE.txt b/plugins/transport-grpc/licenses/error_prone_annotations-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/transport-grpc/licenses/error_prone_annotations-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/transport-grpc/licenses/error_prone_annotations-NOTICE.txt b/plugins/transport-grpc/licenses/error_prone_annotations-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/transport-grpc/licenses/failureaccess-1.0.1.jar.sha1 b/plugins/transport-grpc/licenses/failureaccess-1.0.1.jar.sha1 new file mode 100644 index 0000000000000..4798b37e20691 --- /dev/null +++ b/plugins/transport-grpc/licenses/failureaccess-1.0.1.jar.sha1 @@ -0,0 +1 @@ +1dcf1de382a0bf95a3d8b0849546c88bac1292c9 \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/failureaccess-LICENSE.txt b/plugins/transport-grpc/licenses/failureaccess-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/transport-grpc/licenses/failureaccess-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/transport-grpc/licenses/failureaccess-NOTICE.txt b/plugins/transport-grpc/licenses/failureaccess-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/transport-grpc/licenses/grpc-LICENSE.txt b/plugins/transport-grpc/licenses/grpc-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/transport-grpc/licenses/grpc-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/transport-grpc/licenses/grpc-NOTICE.txt b/plugins/transport-grpc/licenses/grpc-NOTICE.txt new file mode 100644 index 0000000000000..f70c5620cf75a --- /dev/null +++ b/plugins/transport-grpc/licenses/grpc-NOTICE.txt @@ -0,0 +1,62 @@ +Copyright 2014 The gRPC Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +----------------------------------------------------------------------- + +This product contains a modified portion of 'OkHttp', an open source +HTTP & SPDY client for Android and Java applications, which can be obtained +at: + + * LICENSE: + * okhttp/third_party/okhttp/LICENSE (Apache License 2.0) + * HOMEPAGE: + * https://github.com/square/okhttp + * LOCATION_IN_GRPC: + * okhttp/third_party/okhttp + +This product contains a modified portion of 'Envoy', an open source +cloud-native high-performance edge/middle/service proxy, which can be +obtained at: + + * LICENSE: + * xds/third_party/envoy/LICENSE (Apache License 2.0) + * NOTICE: + * xds/third_party/envoy/NOTICE + * HOMEPAGE: + * https://www.envoyproxy.io + * LOCATION_IN_GRPC: + * xds/third_party/envoy + +This product contains a modified portion of 'protoc-gen-validate (PGV)', +an open source protoc plugin to generate polyglot message validators, +which can be obtained at: + + * LICENSE: + * xds/third_party/protoc-gen-validate/LICENSE (Apache License 2.0) + * NOTICE: + * xds/third_party/protoc-gen-validate/NOTICE + * HOMEPAGE: + * https://github.com/envoyproxy/protoc-gen-validate + * LOCATION_IN_GRPC: + * xds/third_party/protoc-gen-validate + +This product contains a modified portion of 'udpa', +an open source universal data plane API, which can be obtained at: + + * LICENSE: + * xds/third_party/udpa/LICENSE (Apache License 2.0) + * HOMEPAGE: + * https://github.com/cncf/udpa + * LOCATION_IN_GRPC: + * xds/third_party/udpa diff --git a/plugins/transport-grpc/licenses/grpc-api-1.68.2.jar.sha1 b/plugins/transport-grpc/licenses/grpc-api-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..1844172dec982 --- /dev/null +++ b/plugins/transport-grpc/licenses/grpc-api-1.68.2.jar.sha1 @@ -0,0 +1 @@ +a257a5dd25dda1c97a99b56d5b9c1e56c12ae554 \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/grpc-core-1.68.2.jar.sha1 b/plugins/transport-grpc/licenses/grpc-core-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..e20345d29e914 --- /dev/null +++ b/plugins/transport-grpc/licenses/grpc-core-1.68.2.jar.sha1 @@ -0,0 +1 @@ +b0fd51a1c029785d1c9ae2cfc80a296b60dfcfdb \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/grpc-netty-shaded-1.68.2.jar.sha1 b/plugins/transport-grpc/licenses/grpc-netty-shaded-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..53fa705a66129 --- /dev/null +++ b/plugins/transport-grpc/licenses/grpc-netty-shaded-1.68.2.jar.sha1 @@ -0,0 +1 @@ +8ea4186fbdcc5432664364ed53e03cf0d458c3ec \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/grpc-protobuf-1.68.2.jar.sha1 b/plugins/transport-grpc/licenses/grpc-protobuf-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..e861b41837f33 --- /dev/null +++ b/plugins/transport-grpc/licenses/grpc-protobuf-1.68.2.jar.sha1 @@ -0,0 +1 @@ +35b28e0d57874021cd31e76dd4a795f76a82471e \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/grpc-protobuf-lite-1.68.2.jar.sha1 b/plugins/transport-grpc/licenses/grpc-protobuf-lite-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..b2401f9752829 --- /dev/null +++ b/plugins/transport-grpc/licenses/grpc-protobuf-lite-1.68.2.jar.sha1 @@ -0,0 +1 @@ +a53064b896adcfefe74362a33e111492351dfc03 \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/grpc-services-1.68.2.jar.sha1 b/plugins/transport-grpc/licenses/grpc-services-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..c4edf923791e5 --- /dev/null +++ b/plugins/transport-grpc/licenses/grpc-services-1.68.2.jar.sha1 @@ -0,0 +1 @@ +6c2a0b0640544b9010a42bcf76f2791116a75c9d \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/grpc-stub-1.68.2.jar.sha1 b/plugins/transport-grpc/licenses/grpc-stub-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..118464f8f48ff --- /dev/null +++ b/plugins/transport-grpc/licenses/grpc-stub-1.68.2.jar.sha1 @@ -0,0 +1 @@ +d58ee1cf723b4b5536d44b67e328c163580a8d98 \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/grpc-util-1.68.2.jar.sha1 b/plugins/transport-grpc/licenses/grpc-util-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..c3261b012e502 --- /dev/null +++ b/plugins/transport-grpc/licenses/grpc-util-1.68.2.jar.sha1 @@ -0,0 +1 @@ +2d195570e9256d1357d584146a8e6b19587d4044 \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/guava-33.2.1-jre.jar.sha1 b/plugins/transport-grpc/licenses/guava-33.2.1-jre.jar.sha1 new file mode 100644 index 0000000000000..27d5304e326df --- /dev/null +++ b/plugins/transport-grpc/licenses/guava-33.2.1-jre.jar.sha1 @@ -0,0 +1 @@ +818e780da2c66c63bbb6480fef1f3855eeafa3e4 \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/guava-LICENSE.txt b/plugins/transport-grpc/licenses/guava-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/transport-grpc/licenses/guava-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/transport-grpc/licenses/guava-NOTICE.txt b/plugins/transport-grpc/licenses/guava-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/transport-grpc/licenses/perfmark-api-0.26.0.jar.sha1 b/plugins/transport-grpc/licenses/perfmark-api-0.26.0.jar.sha1 new file mode 100644 index 0000000000000..abf1becd13298 --- /dev/null +++ b/plugins/transport-grpc/licenses/perfmark-api-0.26.0.jar.sha1 @@ -0,0 +1 @@ +ef65452adaf20bf7d12ef55913aba24037b82738 \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/perfmark-api-LICENSE.txt b/plugins/transport-grpc/licenses/perfmark-api-LICENSE.txt new file mode 100644 index 0000000000000..261eeb9e9f8b2 --- /dev/null +++ b/plugins/transport-grpc/licenses/perfmark-api-LICENSE.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/transport-grpc/licenses/perfmark-api-NOTICE.txt b/plugins/transport-grpc/licenses/perfmark-api-NOTICE.txt new file mode 100644 index 0000000000000..7d74b6569cf64 --- /dev/null +++ b/plugins/transport-grpc/licenses/perfmark-api-NOTICE.txt @@ -0,0 +1,40 @@ +Copyright 2019 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +----------------------------------------------------------------------- + +This product contains a modified portion of 'Catapult', an open source +Trace Event viewer for Chome, Linux, and Android applications, which can +be obtained at: + + * LICENSE: + * traceviewer/src/main/resources/io/perfmark/traceviewer/third_party/catapult/LICENSE (New BSD License) + * HOMEPAGE: + * https://github.com/catapult-project/catapult + +This product contains a modified portion of 'Polymer', a library for Web +Components, which can be obtained at: + * LICENSE: + * traceviewer/src/main/resources/io/perfmark/traceviewer/third_party/polymer/LICENSE (New BSD License) + * HOMEPAGE: + * https://github.com/Polymer/polymer + + +This product contains a modified portion of 'ASM', an open source +Java Bytecode library, which can be obtained at: + + * LICENSE: + * agent/src/main/resources/io/perfmark/agent/third_party/asm/LICENSE (BSD style License) + * HOMEPAGE: + * https://asm.ow2.io/ diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/GrpcPlugin.java b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/GrpcPlugin.java new file mode 100644 index 0000000000000..0a464e135350b --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/GrpcPlugin.java @@ -0,0 +1,69 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.transport.grpc; + +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.plugins.NetworkPlugin; +import org.opensearch.plugins.Plugin; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.threadpool.ThreadPool; + +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; + +import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.GRPC_TRANSPORT_SETTING_KEY; +import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_BIND_HOST; +import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_HOST; +import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PORTS; +import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PUBLISH_HOST; +import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PUBLISH_PORT; +import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_WORKER_COUNT; + +/** + * Main class for the gRPC plugin. + */ +public final class GrpcPlugin extends Plugin implements NetworkPlugin { + + /** + * Creates a new GrpcPlugin instance. + */ + public GrpcPlugin() {} + + @Override + public Map> getAuxTransports( + Settings settings, + ThreadPool threadPool, + CircuitBreakerService circuitBreakerService, + NetworkService networkService, + ClusterSettings clusterSettings, + Tracer tracer + ) { + return Collections.singletonMap( + GRPC_TRANSPORT_SETTING_KEY, + () -> new Netty4GrpcServerTransport(settings, Collections.emptyList(), networkService) + ); + } + + @Override + public List> getSettings() { + return List.of( + SETTING_GRPC_PORTS, + SETTING_GRPC_HOST, + SETTING_GRPC_PUBLISH_HOST, + SETTING_GRPC_BIND_HOST, + SETTING_GRPC_WORKER_COUNT, + SETTING_GRPC_PUBLISH_PORT + ); + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/Netty4GrpcServerTransport.java b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/Netty4GrpcServerTransport.java new file mode 100644 index 0000000000000..61c0722772b92 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/Netty4GrpcServerTransport.java @@ -0,0 +1,277 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.transport.grpc; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.transport.PortsRange; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.plugins.NetworkPlugin; +import org.opensearch.transport.BindTransportException; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; + +import io.grpc.BindableService; +import io.grpc.InsecureServerCredentials; +import io.grpc.Server; +import io.grpc.netty.shaded.io.grpc.netty.NettyServerBuilder; +import io.grpc.netty.shaded.io.netty.channel.EventLoopGroup; +import io.grpc.netty.shaded.io.netty.channel.nio.NioEventLoopGroup; +import io.grpc.netty.shaded.io.netty.channel.socket.nio.NioServerSocketChannel; +import io.grpc.protobuf.services.HealthStatusManager; +import io.grpc.protobuf.services.ProtoReflectionService; + +import static java.util.Collections.emptyList; +import static org.opensearch.common.settings.Setting.intSetting; +import static org.opensearch.common.settings.Setting.listSetting; +import static org.opensearch.common.util.concurrent.OpenSearchExecutors.daemonThreadFactory; +import static org.opensearch.transport.Transport.resolveTransportPublishPort; + +/** + * Netty4 gRPC server implemented as a LifecycleComponent. + * Services injected through BindableService list. + */ +public class Netty4GrpcServerTransport extends NetworkPlugin.AuxTransport { + private static final Logger logger = LogManager.getLogger(Netty4GrpcServerTransport.class); + + /** + * Type key for configuring settings of this auxiliary transport. + */ + public static final String GRPC_TRANSPORT_SETTING_KEY = "experimental-transport-grpc"; + + /** + * Port range on which to bind. + * Note this setting is configured through AffixSetting AUX_TRANSPORT_PORTS where the aux transport type matches the GRPC_TRANSPORT_SETTING_KEY. + */ + public static final Setting SETTING_GRPC_PORTS = AUX_TRANSPORT_PORTS.getConcreteSettingForNamespace( + GRPC_TRANSPORT_SETTING_KEY + ); + + /** + * Port published to peers for this server. + */ + public static final Setting SETTING_GRPC_PUBLISH_PORT = intSetting("grpc.publish_port", -1, -1, Setting.Property.NodeScope); + + /** + * Host list to bind and publish. + * For distinct bind/publish hosts configure SETTING_GRPC_BIND_HOST + SETTING_GRPC_PUBLISH_HOST separately. + */ + public static final Setting> SETTING_GRPC_HOST = listSetting( + "grpc.host", + emptyList(), + Function.identity(), + Setting.Property.NodeScope + ); + + /** + * Host list to bind. + */ + public static final Setting> SETTING_GRPC_BIND_HOST = listSetting( + "grpc.bind_host", + SETTING_GRPC_HOST, + Function.identity(), + Setting.Property.NodeScope + ); + + /** + * Host list published to peers. + */ + public static final Setting> SETTING_GRPC_PUBLISH_HOST = listSetting( + "grpc.publish_host", + SETTING_GRPC_HOST, + Function.identity(), + Setting.Property.NodeScope + ); + + /** + * Configure size of thread pool backing this transport server. + */ + public static final Setting SETTING_GRPC_WORKER_COUNT = new Setting<>( + "grpc.netty.worker_count", + (s) -> Integer.toString(OpenSearchExecutors.allocatedProcessors(s)), + (s) -> Setting.parseInt(s, 1, "grpc.netty.worker_count"), + Setting.Property.NodeScope + ); + + private final Settings settings; + private final NetworkService networkService; + private final List services; + private final CopyOnWriteArrayList servers = new CopyOnWriteArrayList<>(); + private final String[] bindHosts; + private final String[] publishHosts; + private final PortsRange port; + private final int nettyEventLoopThreads; + + private volatile BoundTransportAddress boundAddress; + private volatile EventLoopGroup eventLoopGroup; + + /** + * Creates a new Netty4GrpcServerTransport instance. + * @param settings the configured settings. + * @param services the gRPC compatible services to be registered with the server. + * @param networkService the bind/publish addresses. + */ + public Netty4GrpcServerTransport(Settings settings, List services, NetworkService networkService) { + this.settings = Objects.requireNonNull(settings); + this.services = Objects.requireNonNull(services); + this.networkService = Objects.requireNonNull(networkService); + + final List httpBindHost = SETTING_GRPC_BIND_HOST.get(settings); + this.bindHosts = (httpBindHost.isEmpty() ? NetworkService.GLOBAL_NETWORK_BIND_HOST_SETTING.get(settings) : httpBindHost).toArray( + Strings.EMPTY_ARRAY + ); + + final List httpPublishHost = SETTING_GRPC_PUBLISH_HOST.get(settings); + this.publishHosts = (httpPublishHost.isEmpty() ? NetworkService.GLOBAL_NETWORK_PUBLISH_HOST_SETTING.get(settings) : httpPublishHost) + .toArray(Strings.EMPTY_ARRAY); + + this.port = SETTING_GRPC_PORTS.get(settings); + this.nettyEventLoopThreads = SETTING_GRPC_WORKER_COUNT.get(settings); + } + + BoundTransportAddress boundAddress() { + return this.boundAddress; + } + + @Override + protected void doStart() { + boolean success = false; + try { + this.eventLoopGroup = new NioEventLoopGroup(nettyEventLoopThreads, daemonThreadFactory(settings, "grpc_event_loop")); + bindServer(); + success = true; + logger.info("Started gRPC server on port {}", port); + } finally { + if (!success) { + doStop(); + } + } + } + + @Override + protected void doStop() { + for (Server server : servers) { + if (server != null) { + server.shutdown(); + try { + server.awaitTermination(30, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + logger.warn("Interrupted while shutting down gRPC server"); + } finally { + server.shutdownNow(); + } + } + } + if (eventLoopGroup != null) { + try { + eventLoopGroup.shutdownGracefully(0, 10, TimeUnit.SECONDS).await(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + logger.warn("Failed to shut down event loop group"); + } + } + } + + @Override + protected void doClose() { + + } + + private void bindServer() { + InetAddress[] hostAddresses; + try { + hostAddresses = networkService.resolveBindHostAddresses(bindHosts); + } catch (IOException e) { + throw new BindTransportException("Failed to resolve host [" + Arrays.toString(bindHosts) + "]", e); + } + + List boundAddresses = new ArrayList<>(hostAddresses.length); + for (InetAddress address : hostAddresses) { + boundAddresses.add(bindAddress(address, port)); + } + + final InetAddress publishInetAddress; + try { + publishInetAddress = networkService.resolvePublishHostAddresses(publishHosts); + } catch (Exception e) { + throw new BindTransportException("Failed to resolve publish address", e); + } + + final int publishPort = resolveTransportPublishPort(SETTING_GRPC_PUBLISH_PORT.get(settings), boundAddresses, publishInetAddress); + if (publishPort < 0) { + throw new BindTransportException( + "Failed to auto-resolve grpc publish port, multiple bound addresses " + + boundAddresses + + " with distinct ports and none of them matched the publish address (" + + publishInetAddress + + "). " + + "Please specify a unique port by setting " + + SETTING_GRPC_PORTS.getKey() + + " or " + + SETTING_GRPC_PUBLISH_PORT.getKey() + ); + } + + TransportAddress publishAddress = new TransportAddress(new InetSocketAddress(publishInetAddress, publishPort)); + this.boundAddress = new BoundTransportAddress(boundAddresses.toArray(new TransportAddress[0]), publishAddress); + logger.info("{}", boundAddress); + } + + private TransportAddress bindAddress(InetAddress hostAddress, PortsRange portRange) { + AtomicReference lastException = new AtomicReference<>(); + AtomicReference addr = new AtomicReference<>(); + + boolean success = portRange.iterate(portNumber -> { + try { + + final InetSocketAddress address = new InetSocketAddress(hostAddress, portNumber); + final NettyServerBuilder serverBuilder = NettyServerBuilder.forAddress(address, InsecureServerCredentials.create()) + .bossEventLoopGroup(eventLoopGroup) + .workerEventLoopGroup(eventLoopGroup) + .channelType(NioServerSocketChannel.class) + .addService(new HealthStatusManager().getHealthService()) + .addService(ProtoReflectionService.newInstance()); + + services.forEach(serverBuilder::addService); + + Server srv = serverBuilder.build().start(); + servers.add(srv); + addr.set(new TransportAddress(hostAddress, portNumber)); + logger.debug("Bound gRPC to address {{}}", address); + return true; + } catch (Exception e) { + lastException.set(e); + return false; + } + }); + + if (!success) { + throw new RuntimeException("Failed to bind to " + hostAddress + " on ports " + portRange, lastException.get()); + } + + return addr.get(); + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/package-info.java b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/package-info.java new file mode 100644 index 0000000000000..4a5d9d02b5b91 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/package-info.java @@ -0,0 +1,13 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * gRPC transport implementation for OpenSearch. + * Provides network communication using the gRPC protocol. + */ +package org.opensearch.transport.grpc; diff --git a/plugins/transport-grpc/src/main/plugin-metadata/plugin-security.policy b/plugins/transport-grpc/src/main/plugin-metadata/plugin-security.policy new file mode 100644 index 0000000000000..398de576b6c5a --- /dev/null +++ b/plugins/transport-grpc/src/main/plugin-metadata/plugin-security.policy @@ -0,0 +1,18 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +grant codeBase "${codebase.grpc-netty-shaded}" { + // for reading the system-wide configuration for the backlog of established sockets + permission java.io.FilePermission "/proc/sys/net/core/somaxconn", "read"; + + // netty makes and accepts socket connections + permission java.net.SocketPermission "*", "accept,connect"; + + // Netty sets custom classloader for some of its internal threads + permission java.lang.RuntimePermission "*", "setContextClassLoader"; +}; diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/transport/grpc/Netty4GrpcServerTransportTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/transport/grpc/Netty4GrpcServerTransportTests.java new file mode 100644 index 0000000000000..ebeff62c2c23c --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/transport/grpc/Netty4GrpcServerTransportTests.java @@ -0,0 +1,49 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.transport.grpc; + +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.OpenSearchTestCase; +import org.hamcrest.MatcherAssert; +import org.junit.Before; + +import java.util.List; + +import io.grpc.BindableService; + +import static org.hamcrest.Matchers.emptyArray; +import static org.hamcrest.Matchers.not; + +public class Netty4GrpcServerTransportTests extends OpenSearchTestCase { + + private NetworkService networkService; + private List services; + + @Before + public void setup() { + networkService = new NetworkService(List.of()); + services = List.of(); + } + + public void test() { + try (Netty4GrpcServerTransport transport = new Netty4GrpcServerTransport(createSettings(), services, networkService)) { + transport.start(); + + MatcherAssert.assertThat(transport.boundAddress().boundAddresses(), not(emptyArray())); + assertNotNull(transport.boundAddress().publishAddress().address()); + + transport.stop(); + } + } + + private static Settings createSettings() { + return Settings.builder().put(Netty4GrpcServerTransport.SETTING_GRPC_PORTS.getKey(), getPortRange()).build(); + } +} diff --git a/plugins/transport-nio/build.gradle b/plugins/transport-nio/build.gradle index 7132c97864238..6ac27b51f8902 100644 --- a/plugins/transport-nio/build.gradle +++ b/plugins/transport-nio/build.gradle @@ -34,8 +34,8 @@ apply plugin: "opensearch.publish" apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'The nio transport.' - classname 'org.opensearch.transport.nio.NioTransportPlugin' + description = 'The nio transport.' + classname = 'org.opensearch.transport.nio.NioTransportPlugin' hasClientJar = true } diff --git a/plugins/transport-reactor-netty4/build.gradle b/plugins/transport-reactor-netty4/build.gradle index 1e76d1a29efc1..12ae5ce99632e 100644 --- a/plugins/transport-reactor-netty4/build.gradle +++ b/plugins/transport-reactor-netty4/build.gradle @@ -23,8 +23,8 @@ apply plugin: 'opensearch.internal-cluster-test' apply plugin: 'opensearch.publish' opensearchplugin { - description 'Reactor Netty 4 based transport implementation' - classname 'org.opensearch.transport.reactor.ReactorNetty4Plugin' + description = 'Reactor Netty 4 based transport implementation' + classname = 'org.opensearch.transport.reactor.ReactorNetty4Plugin' hasClientJar = true } diff --git a/plugins/workload-management/build.gradle b/plugins/workload-management/build.gradle index ad6737bbd24b0..2e8b0df468092 100644 --- a/plugins/workload-management/build.gradle +++ b/plugins/workload-management/build.gradle @@ -14,8 +14,8 @@ apply plugin: 'opensearch.java-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'OpenSearch Workload Management Plugin.' - classname 'org.opensearch.plugin.wlm.WorkloadManagementPlugin' + description = 'OpenSearch Workload Management Plugin.' + classname = 'org.opensearch.plugin.wlm.WorkloadManagementPlugin' } dependencies { diff --git a/qa/die-with-dignity/build.gradle b/qa/die-with-dignity/build.gradle index db8762fe921bf..a3e5f295001bc 100644 --- a/qa/die-with-dignity/build.gradle +++ b/qa/die-with-dignity/build.gradle @@ -16,8 +16,8 @@ apply plugin: 'opensearch.java-rest-test' apply plugin: 'opensearch.opensearchplugin' opensearchplugin { - description 'Die with dignity plugin' - classname 'org.opensearch.DieWithDignityPlugin' + description = 'Die with dignity plugin' + classname = 'org.opensearch.DieWithDignityPlugin' } // let the javaRestTest see the classpath of main diff --git a/qa/full-cluster-restart/build.gradle b/qa/full-cluster-restart/build.gradle index e50ca63c3da69..91c30d27fe8a9 100644 --- a/qa/full-cluster-restart/build.gradle +++ b/qa/full-cluster-restart/build.gradle @@ -55,7 +55,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.indexCompatible) { } tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) { - useCluster testClusters."${baseName}" + useCluster project, testClusters."${baseName}" mustRunAfter(precommit) doFirst { delete("${buildDir}/cluster/shared/repo/${baseName}") @@ -65,7 +65,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.indexCompatible) { } tasks.register("${baseName}#upgradedClusterTest", StandaloneRestIntegTestTask) { - useCluster testClusters."${baseName}" + useCluster project, testClusters."${baseName}" dependsOn "${baseName}#oldClusterTest" doFirst { testClusters."${baseName}".goToNextVersion() diff --git a/qa/mixed-cluster/build.gradle b/qa/mixed-cluster/build.gradle index 822977c55368a..9148f5a3ba3e6 100644 --- a/qa/mixed-cluster/build.gradle +++ b/qa/mixed-cluster/build.gradle @@ -69,7 +69,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.wireCompatible) { } tasks.register("${baseName}#mixedClusterTest", StandaloneRestIntegTestTask) { - useCluster testClusters."${baseName}" + useCluster project, testClusters."${baseName}" mustRunAfter(precommit) doFirst { delete("${buildDir}/cluster/shared/repo/${baseName}") diff --git a/qa/multi-cluster-search/build.gradle b/qa/multi-cluster-search/build.gradle index 907791bd6a7de..a0a271fa01fb3 100644 --- a/qa/multi-cluster-search/build.gradle +++ b/qa/multi-cluster-search/build.gradle @@ -49,7 +49,7 @@ testClusters.'remote-cluster' { } task mixedClusterTest(type: RestIntegTestTask) { - useCluster testClusters.'remote-cluster' + useCluster project, testClusters.'remote-cluster' dependsOn 'remote-cluster' systemProperty 'tests.rest.suite', 'multi_cluster' } diff --git a/qa/remote-clusters/build.gradle b/qa/remote-clusters/build.gradle index 2f3cd9d2d898d..a52d4f2035bea 100644 --- a/qa/remote-clusters/build.gradle +++ b/qa/remote-clusters/build.gradle @@ -59,7 +59,7 @@ tasks.named("preProcessFixture").configure { } doLast { // tests expect to have an empty repo - project.delete( + delete( "${buildDir}/repo" ) createAndSetWritable( diff --git a/qa/repository-multi-version/build.gradle b/qa/repository-multi-version/build.gradle index fdde0997df371..13d55fe4a4a41 100644 --- a/qa/repository-multi-version/build.gradle +++ b/qa/repository-multi-version/build.gradle @@ -62,7 +62,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.indexCompatible) { } tasks.register("${baseName}#Step1OldClusterTest", StandaloneRestIntegTestTask) { - useCluster testClusters."${oldClusterName}" + useCluster project, testClusters."${oldClusterName}" mustRunAfter(precommit) doFirst { delete("${buildDir}/cluster/shared/repo/${baseName}") @@ -71,7 +71,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.indexCompatible) { } tasks.register("${baseName}#Step2NewClusterTest", StandaloneRestIntegTestTask) { - useCluster testClusters."${newClusterName}" + useCluster project, testClusters."${newClusterName}" dependsOn "${baseName}#Step1OldClusterTest" systemProperty 'tests.rest.suite', 'step2' } @@ -80,13 +80,13 @@ for (Version bwcVersion : BuildParams.bwcVersions.indexCompatible) { // since the ES cluster would not be able to read snapshots from OpenSearch cluster in Step 3. if (bwcVersion.after('7.10.2')) { tasks.register("${baseName}#Step3OldClusterTest", StandaloneRestIntegTestTask) { - useCluster testClusters."${oldClusterName}" - dependsOn "${baseName}#Step2NewClusterTest" - systemProperty 'tests.rest.suite', 'step3' - } + useCluster project, testClusters."${oldClusterName}" + dependsOn "${baseName}#Step2NewClusterTest" + systemProperty 'tests.rest.suite', 'step3' + } tasks.register("${baseName}#Step4NewClusterTest", StandaloneRestIntegTestTask) { - useCluster testClusters."${newClusterName}" + useCluster project, testClusters."${newClusterName}" dependsOn "${baseName}#Step3OldClusterTest" systemProperty 'tests.rest.suite', 'step4' } diff --git a/qa/rolling-upgrade/build.gradle b/qa/rolling-upgrade/build.gradle index 68a9dcafbdc47..ff8aecc6c3f69 100644 --- a/qa/rolling-upgrade/build.gradle +++ b/qa/rolling-upgrade/build.gradle @@ -67,7 +67,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.wireCompatible) { tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) { dependsOn processTestResources - useCluster testClusters."${baseName}" + useCluster project, testClusters."${baseName}" mustRunAfter(precommit) doFirst { delete("${buildDir}/cluster/shared/repo/${baseName}") @@ -81,7 +81,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.wireCompatible) { tasks.register("${baseName}#oneThirdUpgradedTest", StandaloneRestIntegTestTask) { dependsOn "${baseName}#oldClusterTest" - useCluster testClusters."${baseName}" + useCluster project, testClusters."${baseName}" doFirst { testClusters."${baseName}".nextNodeToNextVersion() } @@ -94,7 +94,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.wireCompatible) { tasks.register("${baseName}#twoThirdsUpgradedTest", StandaloneRestIntegTestTask) { dependsOn "${baseName}#oneThirdUpgradedTest" - useCluster testClusters."${baseName}" + useCluster project, testClusters."${baseName}" doFirst { testClusters."${baseName}".nextNodeToNextVersion() } @@ -110,7 +110,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.wireCompatible) { doFirst { testClusters."${baseName}".nextNodeToNextVersion() } - useCluster testClusters."${baseName}" + useCluster project, testClusters."${baseName}" systemProperty 'tests.repo_location', "${buildDir}/cluster/shared/repo/${baseName}/test" systemProperty 'tests.rest.suite', 'upgraded_cluster' systemProperty 'tests.upgrade_from_version', bwcVersionStr diff --git a/qa/smoke-test-multinode/build.gradle b/qa/smoke-test-multinode/build.gradle index 25261f5e3ff7d..af389a7c59835 100644 --- a/qa/smoke-test-multinode/build.gradle +++ b/qa/smoke-test-multinode/build.gradle @@ -47,7 +47,7 @@ testClusters.integTest { integTest { doFirst { - project.delete(repo) + delete(repo) repo.mkdirs() } } diff --git a/qa/verify-version-constants/build.gradle b/qa/verify-version-constants/build.gradle index 27a3b07157d21..46dcfce18c726 100644 --- a/qa/verify-version-constants/build.gradle +++ b/qa/verify-version-constants/build.gradle @@ -51,7 +51,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.indexCompatible) { } tasks.register("${baseName}#integTest", StandaloneRestIntegTestTask) { - useCluster testClusters."${baseName}" + useCluster project, testClusters."${baseName}" nonInputProperties.systemProperty('tests.rest.cluster', "${-> testClusters."${baseName}".allHttpSocketURI.join(",")}") nonInputProperties.systemProperty('tests.clustername', "${-> testClusters."${baseName}".getName()}") } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search_shards.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search_shards.json index 74b7055b4c4b0..9d3d420e8945c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search_shards.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search_shards.json @@ -62,6 +62,9 @@ "default":"open", "description":"Whether to expand wildcard expression to concrete indices that are open, closed or both." } + }, + "body":{ + "description":"The search source (in order to specify slice parameters)" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/92_flat_object_support_doc_values.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/92_flat_object_support_doc_values.yml index 9ec39660a4928..d9a95ad5a0815 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/92_flat_object_support_doc_values.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/92_flat_object_support_doc_values.yml @@ -44,6 +44,8 @@ setup: {"order":"order7","issue":{"labels":{"number":7,"name":"abc7","status":1}}} {"index":{"_index":"flat_object_doc_values_test","_id":"8"}} {"order":"order8","issue":{"labels":{"number":8,"name":"abc8","status":1}}} + {"index":{"_index":"flat_object_doc_values_test","_id":"9"}} + {"order":"order9","issue":{"labels":{"number":9,"name":"abC8","status":1}}} --- # Delete Index when connection is teardown @@ -67,7 +69,53 @@ teardown: } } - - length: { hits.hits: 9 } + - length: { hits.hits: 10 } + + # Case Insensitive Term Query with exact dot path. + - do: + search: + body: { + _source: true, + query: { + bool: { + must: [ + { + term: { + issue.labels.name: { + value: "abc8", + case_insensitive: "true" + } + } + } + ] + } + } + } + + - length: { hits.hits: 2 } + + # Case Insensitive Term Query with no path. + - do: + search: + body: { + _source: true, + query: { + bool: { + must: [ + { + term: { + issue.labels: { + value: "abc8", + case_insensitive: "true" + } + } + } + ] + } + } + } + + - length: { hits.hits: 2 } # Term Query with exact dot path. - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/70_adjacency_matrix.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/70_adjacency_matrix.yml index f8fa537ed91bf..8b1956c6152d2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/70_adjacency_matrix.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/70_adjacency_matrix.yml @@ -125,3 +125,40 @@ setup: - match: { aggregations.conns.buckets.3.doc_count: 1 } - match: { aggregations.conns.buckets.3.key: "4" } + + +--- +"Show only intersections": + - skip: + version: " - 2.19.0" + reason: "show_only_intersecting was added in 2.19.0" + features: node_selector + - do: + node_selector: + version: "2.19.0 - " + search: + index: test + rest_total_hits_as_int: true + body: + size: 0 + aggs: + conns: + adjacency_matrix: + show_only_intersecting: true + filters: + 1: + term: + num: 1 + 2: + term: + num: 2 + 4: + term: + num: 4 + + - match: { hits.total: 3 } + + - length: { aggregations.conns.buckets: 1 } + + - match: { aggregations.conns.buckets.0.doc_count: 1 } + - match: { aggregations.conns.buckets.0.key: "1&2" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_double.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_double.yml new file mode 100644 index 0000000000000..eccafaf96dd23 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_double.yml @@ -0,0 +1,136 @@ +setup: + - do: + indices.create: + index: double_sort + body: + settings: + number_of_shards: 3 + number_of_replicas: 0 + mappings: + properties: + field: + type: double + +--- +"test sorting against double only fields": + + - do: + bulk: + refresh: true + body: + - '{ "index" : { "_index" : "double_sort", "_id" : "1" } }' + - '{"field" : [ 900719925474099.1, 1.1 ] }' + - '{ "index" : { "_index" : "double_sort", "_id" : "2" } }' + - '{"field" : [ 900719925474099.2, 900719925474099.3 ] }' + - '{ "index" : { "_index" : "double_sort", "_id" : "3" } }' + - '{"field" : [ 450359962737049.4, 3.5, 4.6 ] }' + - '{ "index" : { "_index" : "double_sort", "_id" : "4" } }' + - '{"field" : [ 450359962737049.7, 5.8, -1.9, -2.0 ] }' + + - do: + search: + index: double_sort + body: + size: 5 + sort: [{ field: { mode: max, order: desc } } ] + - match: {hits.total.value: 4 } + - length: {hits.hits: 4 } + - match: { hits.hits.0._index: double_sort } + - match: { hits.hits.0._source.field: [ 900719925474099.2, 900719925474099.2 ] } + - match: { hits.hits.0.sort.0: 900719925474099.2 } + - match: { hits.hits.1._source.field: [ 900719925474099.1, 1.1 ] } + - match: { hits.hits.1.sort.0: 900719925474099.1 } + - match: { hits.hits.2._source.field: [ 450359962737049.7, 5.8, -1.9, -2.0 ] } + - match: { hits.hits.2.sort.0: 450359962737049.7 } + - match: { hits.hits.3._source.field: [ 450359962737049.4, 3.5, 4.6 ] } + - match: { hits.hits.3.sort.0: 450359962737049.4 } + + - do: + search: + index: double_sort + body: + size: 5 + sort: [ { field: { mode: max, order: asc } } ] + - match: { hits.total.value: 4 } + - length: { hits.hits: 4 } + - match: { hits.hits.0._index: double_sort } + - match: { hits.hits.0._source.field: [ 450359962737049.4, 3.5, 4.6 ] } + - match: { hits.hits.0.sort.0: 450359962737049.4 } + - match: { hits.hits.1._source.field: [ 450359962737049.7, 5.8, -1.9, -2.0 ] } + - match: { hits.hits.1.sort.0: 450359962737049.7 } + - match: { hits.hits.2._source.field: [ 900719925474099.1, 1.1 ] } + - match: { hits.hits.2.sort.0: 900719925474099.1 } + - match: { hits.hits.3._source.field: [ 900719925474099.2, 900719925474099.2 ] } + - match: { hits.hits.3.sort.0: 900719925474099.2 } + + - do: + search: + index: double_sort + body: + size: 5 + sort: [ { field: { mode: min, order: asc } } ] + - match: { hits.total.value: 4 } + - length: { hits.hits: 4 } + - match: { hits.hits.0._index: double_sort } + - match: { hits.hits.0._source.field: [ 450359962737049.7, 5.8, -1.9, -2.0 ] } + - match: { hits.hits.0.sort: [ -2.0 ] } + - match: { hits.hits.1._source.field: [ 900719925474099.1, 1.1 ] } + - match: { hits.hits.1.sort.0: 1.1 } + - match: { hits.hits.2._source.field: [ 450359962737049.4, 3.5, 4.6 ] } + - match: { hits.hits.2.sort.0: 3.5 } + - match: { hits.hits.3._source.field: [ 900719925474099.2, 900719925474099.2 ] } + - match: { hits.hits.3.sort.0: 900719925474099.2 } + + - do: + search: + index: double_sort + body: + size: 5 + sort: [ { field: { mode: median, order: desc } } ] + - match: { hits.total.value: 4 } + - length: { hits.hits: 4 } + - match: { hits.hits.0._index: double_sort } + - match: { hits.hits.0._source.field: [ 900719925474099.2, 900719925474099.2 ] } + - match: { hits.hits.0.sort.0: 900719925474099.2 } + - match: { hits.hits.1._source.field: [ 900719925474099.1, 1.1 ] } + - match: { hits.hits.1.sort.0: 450359962737050.1 } + - match: { hits.hits.2._source.field: [ 450359962737049.4, 3.5, 4.6 ] } + - match: { hits.hits.2.sort.0: 4.6 } + - match: { hits.hits.3._source.field: [ 450359962737049.7, 5.8, -1.9, -2.0 ] } + - match: { hits.hits.3.sort.0: 1.95 } + + - do: + search: + index: double_sort + body: + size: 5 + sort: [ { field: { mode: avg, order: asc } } ] + - match: { hits.total.value: 4 } + - length: { hits.hits: 4 } + - match: { hits.hits.0._index: double_sort } + - match: { hits.hits.0._source.field: [ 450359962737049.7, 5.8, -1.9, -2.0 ] } + - match: { hits.hits.0.sort.0: 112589990684262.89 } + - match: { hits.hits.1._source.field: [ 450359962737049.4, 3.5, 4.6 ] } + - match: { hits.hits.1.sort.0: 150119987579019.16 } + - match: { hits.hits.2._source.field: [ 900719925474099.1, 1.1 ] } + - match: { hits.hits.2.sort.0: 450359962737050.1 } + - match: { hits.hits.3._source.field: [ 900719925474099.2, 900719925474099.2 ] } + - match: { hits.hits.3.sort.0: 900719925474099.2 } + + - do: + search: + index: double_sort + body: + size: 5 + sort: [ { field: { mode: sum, order: desc } } ] + - match: { hits.total.value: 4 } + - length: { hits.hits: 4 } + - match: { hits.hits.0._index: double_sort } + - match: { hits.hits.0._source.field: [ 900719925474099.2, 900719925474099.2 ] } + - match: { hits.hits.0.sort.0: 1801439850948198.5 } + - match: { hits.hits.1._source.field: [ 900719925474099.1, 1.1 ] } + - match: { hits.hits.1.sort.0: 900719925474100.2 } + - match: { hits.hits.2._source.field: [ 450359962737049.4, 3.5, 4.6 ] } + - match: { hits.hits.2.sort.0: 450359962737057.5 } + - match: { hits.hits.3._source.field: [ 450359962737049.7, 5.8, -1.9, -2.0 ] } + - match: { hits.hits.3.sort.0: 450359962737051.56 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_long.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_long.yml new file mode 100644 index 0000000000000..f354dff6cbf02 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_long.yml @@ -0,0 +1,137 @@ +setup: + - do: + indices.create: + index: long_sort + body: + settings: + number_of_shards: 3 + number_of_replicas: 0 + mappings: + properties: + field: + type: long + +--- +"test sorting against long only fields": + + - do: + bulk: + refresh: true + body: + - '{ "index" : { "_index" : "long_sort", "_id" : "1" } }' + - '{"field" : [ 9223372036854775807, 1 ] }' + - '{ "index" : { "_index" : "long_sort", "_id" : "2" } }' + - '{"field" : [ 922337203685477777, 2 ] }' + - '{ "index" : { "_index" : "long_sort", "_id" : "3" } }' + - '{"field" : [ 2147483647, 3, 4 ] }' + - '{ "index" : { "_index" : "long_sort", "_id" : "4" } }' + - '{"field" : [ 2147483648, 5, -1, -2 ] }' + + - do: + search: + index: long_sort + body: + size: 5 + sort: [{ field: { mode: max, order: desc } } ] + - match: {hits.total.value: 4 } + - length: {hits.hits: 4 } + - match: { hits.hits.0._index: long_sort } + - match: { hits.hits.0._source.field: [ 9223372036854775807, 1 ] } + - match: { hits.hits.0.sort.0: 9223372036854775807 } + - match: { hits.hits.1._source.field: [ 922337203685477777, 2 ] } + - match: { hits.hits.1.sort.0: 922337203685477777 } + - match: { hits.hits.2._source.field: [ 2147483648, 5, -1, -2 ] } + - match: { hits.hits.2.sort.0: 2147483648 } + - match: { hits.hits.3._source.field: [ 2147483647, 3, 4 ] } + - match: { hits.hits.3.sort.0: 2147483647 } + + - do: + search: + index: long_sort + body: + size: 5 + sort: [ { field: { mode: max, order: asc } } ] + - match: { hits.total.value: 4 } + - length: { hits.hits: 4 } + - match: { hits.hits.0._index: long_sort } + - match: { hits.hits.0._source.field: [ 2147483647, 3, 4 ] } + - match: { hits.hits.0.sort.0: 2147483647 } + - match: { hits.hits.1._source.field: [ 2147483648, 5, -1, -2 ] } + - match: { hits.hits.1.sort.0: 2147483648 } + - match: { hits.hits.2._source.field: [ 922337203685477777, 2 ] } + - match: { hits.hits.2.sort.0: 922337203685477777 } + - match: { hits.hits.3._source.field: [ 9223372036854775807, 1 ] } + - match: { hits.hits.3.sort.0: 9223372036854775807 } + + + - do: + search: + index: long_sort + body: + size: 5 + sort: [{ field: { mode: min, order: desc } } ] + - match: { hits.total.value: 4 } + - length: { hits.hits: 4 } + - match: { hits.hits.0._index: long_sort } + - match: { hits.hits.0._source.field: [ 2147483647, 3, 4 ] } + - match: { hits.hits.0.sort.0: 3 } + - match: { hits.hits.1._source.field: [ 922337203685477777, 2 ] } + - match: { hits.hits.1.sort.0: 2 } + - match: { hits.hits.2._source.field: [ 9223372036854775807, 1 ] } + - match: { hits.hits.2.sort.0: 1 } + - match: { hits.hits.3._source.field: [ 2147483648, 5, -1, -2 ] } + - match: { hits.hits.3.sort: [ -2 ] } + + - do: + search: + index: long_sort + body: + size: 5 + sort: [ { field: { mode: median, order: asc } } ] + - match: { hits.total.value: 4 } + - length: { hits.hits: 4 } + - match: { hits.hits.0._index: long_sort } + - match: { hits.hits.0._source.field: [ 2147483648, 5, -1, -2 ] } + - match: { hits.hits.0.sort.0: 2 } + - match: { hits.hits.1._source.field: [ 2147483647, 3, 4 ] } + - match: { hits.hits.1.sort.0: 4 } + - match: { hits.hits.2._source.field: [ 922337203685477777, 2 ] } + - match: { hits.hits.2.sort.0: 461168601842738880 } + - match: { hits.hits.3._source.field: [ 9223372036854775807, 1 ] } + - match: { hits.hits.3.sort.0: 4611686018427387904 } + + - do: + search: + index: long_sort + body: + size: 5 + sort: [ { field: { mode: avg, order: desc } } ] + - match: { hits.total.value: 4 } + - length: { hits.hits: 4 } + - match: { hits.hits.0._index: long_sort } + - match: { hits.hits.0._source.field: [ 922337203685477777, 2 ] } + - match: { hits.hits.0.sort.0: 461168601842738880 } + - match: { hits.hits.1._source.field: [ 2147483647, 3, 4 ] } + - match: { hits.hits.1.sort.0: 715827885 } + - match: { hits.hits.2._source.field: [ 2147483648, 5, -1, -2 ] } + - match: { hits.hits.2.sort.0: 536870913 } + - match: { hits.hits.3._source.field: [ 9223372036854775807, 1 ] } + - match: { hits.hits.3.sort: [ -4611686018427387904 ] } + + - do: + search: + index: long_sort + body: + size: 5 + sort: [ { field: { mode: sum, order: asc } } ] + - match: { hits.total.value: 4 } + - length: { hits.hits: 4 } + - match: { hits.hits.0._index: long_sort } + - match: { hits.hits.0._source.field: [ 9223372036854775807, 1 ] } + - match: { hits.hits.0.sort: [ -9223372036854775808 ] } + - match: { hits.hits.1._source.field: [ 2147483648, 5, -1, -2 ] } + - match: { hits.hits.1.sort.0: 2147483650 } + - match: { hits.hits.2._source.field: [ 2147483647, 3, 4 ] } + - match: { hits.hits.2.sort.0: 2147483654 } + - match: { hits.hits.3._source.field: [ 922337203685477777, 2 ] } + - match: { hits.hits.3.sort.0: 922337203685477779 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_unsigned_long.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_unsigned_long.yml new file mode 100644 index 0000000000000..056b2f58b2229 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_unsigned_long.yml @@ -0,0 +1,167 @@ +setup: + - do: + indices.create: + index: unsigned_long_sort + body: + settings: + number_of_shards: 3 + number_of_replicas: 0 + mappings: + properties: + field: + type: unsigned_long + +--- +"test sorting against unsigned_long only fields": + - skip: + version: " - 2.19.99" + reason: "this change is added in 3.0.0" + + - do: + bulk: + refresh: true + body: + - '{ "index" : { "_index" : "unsigned_long_sort", "_id" : "1" } }' + - '{"field" : [ 13835058055282163712, 1 ] }' + - '{ "index" : { "_index" : "unsigned_long_sort", "_id" : "2" } }' + - '{"field" : [ 13835058055282163713, 13835058055282163714 ] }' + - '{ "index" : { "_index" : "unsigned_long_sort", "_id" : "3" } }' + - '{"field" : [ 13835058055282163715, 13835058055282163716, 2 ] }' + - '{ "index" : { "_index" : "unsigned_long_sort", "_id" : "4" } }' + - '{"field" : [ 13835058055282163717, 13835058055282163718, 13835058055282163719 ] }' + - '{ "index" : { "_index" : "unsigned_long_sort", "_id" : "5" } }' + - '{"field" : [ 13835058055282163720, 13835058055282163721, 3, 4 ] }' + - '{ "index" : { "_index" : "unsigned_long_sort", "_id" : "6" } }' + - '{"field" : [ 13835058055282163722, 5, 6, 7 ] }' + + - do: + search: + index: unsigned_long_sort + body: + size: 10 + sort: [{ field: { mode: max, order: desc } } ] + - match: {hits.total.value: 6 } + - length: {hits.hits: 6 } + - match: { hits.hits.0._index: unsigned_long_sort } + - match: { hits.hits.0._source.field: [ 13835058055282163722, 5, 6, 7 ] } + - match: { hits.hits.0.sort.0: 13835058055282163722 } + - match: { hits.hits.1._source.field: [ 13835058055282163720, 13835058055282163721, 3, 4 ] } + - match: { hits.hits.1.sort.0: 13835058055282163721 } + - match: { hits.hits.2._source.field: [ 13835058055282163717, 13835058055282163718, 13835058055282163719 ] } + - match: { hits.hits.2.sort.0: 13835058055282163719 } + - match: { hits.hits.3._source.field: [ 13835058055282163715, 13835058055282163716, 2 ] } + - match: { hits.hits.3.sort.0: 13835058055282163716 } + - match: { hits.hits.4._source.field: [ 13835058055282163713, 13835058055282163714 ] } + - match: { hits.hits.4.sort.0: 13835058055282163714 } + - match: { hits.hits.5._source.field: [ 13835058055282163712, 1 ] } + - match: { hits.hits.5.sort.0: 13835058055282163712 } + + - do: + search: + index: unsigned_long_sort + body: + size: 10 + sort: [{ field: { mode: max, order: asc } } ] + - match: {hits.total.value: 6 } + - length: {hits.hits: 6 } + - match: { hits.hits.0._index: unsigned_long_sort } + - match: { hits.hits.0._source.field: [ 13835058055282163712, 1 ] } + - match: { hits.hits.0.sort.0: 13835058055282163712 } + - match: { hits.hits.1._source.field: [ 13835058055282163713, 13835058055282163714 ] } + - match: { hits.hits.1.sort.0: 13835058055282163714 } + - match: { hits.hits.2._source.field: [ 13835058055282163715, 13835058055282163716, 2 ] } + - match: { hits.hits.2.sort.0: 13835058055282163716 } + - match: { hits.hits.3._source.field: [ 13835058055282163717, 13835058055282163718, 13835058055282163719 ] } + - match: { hits.hits.3.sort.0: 13835058055282163719 } + - match: { hits.hits.4._source.field: [ 13835058055282163720, 13835058055282163721, 3, 4 ] } + - match: { hits.hits.4.sort.0: 13835058055282163721 } + - match: { hits.hits.5._source.field: [ 13835058055282163722, 5, 6, 7 ] } + - match: { hits.hits.5.sort.0: 13835058055282163722 } + + - do: + search: + index: unsigned_long_sort + body: + size: 10 + sort: [ { field: { mode: median, order: asc } } ] + - match: { hits.total.value: 6 } + - length: { hits.hits: 6 } + - match: { hits.hits.0._index: unsigned_long_sort } + - match: { hits.hits.0._source.field: [ 13835058055282163722, 5, 6, 7 ] } + - match: { hits.hits.0.sort.0: 7 } + - match: { hits.hits.1._source.field: [ 13835058055282163713, 13835058055282163714 ] } + - match: { hits.hits.1.sort.0: 4611686018427387906 } + - match: { hits.hits.2._source.field: [ 13835058055282163712, 1 ] } + - match: { hits.hits.2.sort.0: 6917529027641081857 } + - match: { hits.hits.3._source.field: [ 13835058055282163720, 13835058055282163721, 3, 4 ] } + - match: { hits.hits.3.sort.0: 6917529027641081862 } + - match: { hits.hits.4._source.field: [ 13835058055282163715, 13835058055282163716, 2 ] } + - match: { hits.hits.4.sort.0: 13835058055282163715 } + - match: { hits.hits.5._source.field: [ 13835058055282163717, 13835058055282163718, 13835058055282163719 ] } + - match: { hits.hits.5.sort.0: 13835058055282163718 } + + - do: + search: + index: unsigned_long_sort + body: + size: 10 + sort: [ { field: { mode: sum, order: desc } } ] + - match: { hits.total.value: 6 } + - length: { hits.hits: 6 } + - match: { hits.hits.0._index: unsigned_long_sort } + - match: { hits.hits.0._source.field: [ 13835058055282163722, 5, 6, 7 ] } + - match: { hits.hits.0.sort.0: 13835058055282163740 } + - match: { hits.hits.1._source.field: [ 13835058055282163712, 1 ] } + - match: { hits.hits.1.sort.0: 13835058055282163713 } + - match: { hits.hits.2._source.field: [ 13835058055282163720, 13835058055282163721, 3, 4 ] } + - match: { hits.hits.2.sort.0: 9223372036854775832 } + - match: { hits.hits.3._source.field: [ 13835058055282163715, 13835058055282163716, 2 ] } + - match: { hits.hits.3.sort.0: 9223372036854775817 } + - match: { hits.hits.4._source.field: [ 13835058055282163713, 13835058055282163714 ] } + - match: { hits.hits.4.sort.0: 9223372036854775811 } + - match: { hits.hits.5._source.field: [ 13835058055282163717, 13835058055282163718, 13835058055282163719 ] } + - match: { hits.hits.5.sort.0: 4611686018427387922 } + + - do: + search: + index: unsigned_long_sort + body: + size: 10 + sort: [ { field: { mode: avg, order: desc } } ] + - match: { hits.total.value: 6 } + - length: { hits.hits: 6 } + - match: { hits.hits.0._index: unsigned_long_sort } + - match: { hits.hits.0._source.field: [ 13835058055282163712, 1 ] } + - match: { hits.hits.0.sort.0: 6917529027641081857 } + - match: { hits.hits.1._source.field: [ 13835058055282163713, 13835058055282163714 ] } + - match: { hits.hits.1.sort.0: 4611686018427387906 } + - match: { hits.hits.2._source.field: [ 13835058055282163722, 5, 6, 7 ] } + - match: { hits.hits.2.sort.0: 3458764513820540935 } + - match: { hits.hits.3._source.field: [ 13835058055282163715, 13835058055282163716, 2 ] } + - match: { hits.hits.3.sort.0: 3074457345618258606 } + - match: { hits.hits.4._source.field: [ 13835058055282163720, 13835058055282163721, 3, 4 ] } + - match: { hits.hits.4.sort.0: 2305843009213693958 } + - match: { hits.hits.5._source.field: [ 13835058055282163717, 13835058055282163718, 13835058055282163719 ] } + - match: { hits.hits.5.sort.0: 1537228672809129307 } + + - do: + search: + index: unsigned_long_sort + body: + size: 10 + sort: [ { field: { mode: min, order: asc } } ] + - match: { hits.total.value: 6 } + - length: { hits.hits: 6 } + - match: { hits.hits.0._index: unsigned_long_sort } + - match: { hits.hits.0._source.field: [ 13835058055282163712, 1 ] } + - match: { hits.hits.0.sort.0: 1 } + - match: { hits.hits.1._source.field: [ 13835058055282163715, 13835058055282163716, 2 ] } + - match: { hits.hits.1.sort.0: 2 } + - match: { hits.hits.2._source.field: [ 13835058055282163720, 13835058055282163721, 3, 4 ] } + - match: { hits.hits.2.sort.0: 3 } + - match: { hits.hits.3._source.field: [ 13835058055282163722, 5, 6, 7 ] } + - match: { hits.hits.3.sort.0: 5 } + - match: { hits.hits.4._source.field: [ 13835058055282163713, 13835058055282163714 ] } + - match: { hits.hits.4.sort.0: 13835058055282163713 } + - match: { hits.hits.5._source.field: [ 13835058055282163717, 13835058055282163718, 13835058055282163719 ] } + - match: { hits.hits.5.sort.0: 13835058055282163717 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_doc_values_field.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_doc_values_field.yml index ed8aa2c76ee92..4bdb13e0341be 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_doc_values_field.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_doc_values_field.yml @@ -1126,8 +1126,8 @@ setup: "search on fields with only doc_values enabled": - skip: features: [ "headers" ] - version: " - 2.18.99" - reason: "searching with only doc_values was finally added in 2.19.0" + version: " - 2.99.99" + reason: "searching with only doc_values was finally added in 3.0.0" - do: indices.create: index: test-doc-values @@ -1198,6 +1198,37 @@ setup: - '{ "some_keyword": "400", "byte": 121, "double": 101.0, "float": "801.0", "half_float": "401.0", "integer": 1291, "long": 13457, "short": 151, "unsigned_long": 10223372036854775801, "ip_field": "192.168.0.2", "boolean": true, "date_nanos": "2020-10-29T12:12:12.123456789Z", "date": "2020-10-29T12:12:12.987Z" }' - '{ "index": { "_index": "test-doc-values", "_id": "3" } }' - '{ "some_keyword": "5", "byte": 122, "double": 102.0, "float": "802.0", "half_float": "402.0", "integer": 1292, "long": 13458, "short": 152, "unsigned_long": 10223372036854775802, "ip_field": "192.168.0.3", "boolean": false, "date_nanos": "2024-10-29T12:12:12.123456789Z", "date": "2024-10-29T12:12:12.987Z" }' + - '{ "index": { "_index": "test-doc-values", "_id": "4" } }' + - '{ "some_keyword": "Keyword1" }' + - '{ "index": { "_index": "test-doc-values", "_id": "5" } }' + - '{ "some_keyword": "keyword1" }' + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + term: { + "some_keyword": { + "value": "Keyword1" + } } + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + term: { + "some_keyword": { + "value": "keyword1", + "case_insensitive": "true" + } } + + - match: { hits.total: 2 } - do: search: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/20_slice.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/20_slice.yml new file mode 100644 index 0000000000000..dafb38df20157 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/20_slice.yml @@ -0,0 +1,88 @@ +--- +"Search shards with slice specified in body": + - skip: + version: " - 2.18.99" + reason: "Added slice body to search_shards in 2.19" + - do: + indices.create: + index: test_index + body: + settings: + index: + number_of_shards: 7 + number_of_replicas: 0 + + - do: + search_shards: + index: test_index + body: + slice: + id: 0 + max: 3 + - length: { shards: 3 } + - match: { shards.0.0.index: "test_index" } + - match: { shards.0.0.shard: 0 } + - match: { shards.1.0.shard: 3 } + - match: { shards.2.0.shard: 6 } + + - do: + search_shards: + index: test_index + body: + slice: + id: 1 + max: 3 + - length: { shards: 2 } + - match: { shards.0.0.index: "test_index" } + - match: { shards.0.0.shard: 1 } + - match: { shards.1.0.shard: 4 } + + - do: + search_shards: + index: test_index + body: + slice: + id: 2 + max: 3 + - length: { shards: 2 } + - match: { shards.0.0.index: "test_index" } + - match: { shards.0.0.shard: 2 } + - match: { shards.1.0.shard: 5 } + + + - do: + search_shards: + index: test_index + preference: "_shards:0,2,4,6" + body: + slice: + id: 0 + max: 3 + - length: { shards: 2 } + - match: { shards.0.0.index: "test_index" } + - match: { shards.0.0.shard: 0 } + - match: { shards.1.0.shard: 6 } + + - do: + search_shards: + index: test_index + preference: "_shards:0,2,4,6" + body: + slice: + id: 1 + max: 3 + - length: { shards: 1 } + - match: { shards.0.0.index: "test_index" } + - match: { shards.0.0.shard: 2 } + + - do: + search_shards: + index: test_index + preference: "_shards:0,2,4,6" + body: + slice: + id: 2 + max: 3 + - length: { shards: 1 } + - match: { shards.0.0.index: "test_index" } + - match: { shards.0.0.shard: 4 } diff --git a/sandbox/plugins/build.gradle b/sandbox/plugins/build.gradle index 61afb2c568e1b..1b7b6889972fd 100644 --- a/sandbox/plugins/build.gradle +++ b/sandbox/plugins/build.gradle @@ -12,8 +12,8 @@ configure(subprojects.findAll { it.parent.path == project.path }) { apply plugin: 'opensearch.opensearchplugin' opensearchplugin { - name project.name - licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') - noticeFile rootProject.file('NOTICE.txt') + name = project.name + licenseFile = rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile = rootProject.file('NOTICE.txt') } } diff --git a/server/build.gradle b/server/build.gradle index 55b9c017d3598..1b40fc980a818 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -42,7 +42,7 @@ plugins { publishing { publications { nebula(MavenPublication) { - artifactId 'opensearch' + artifactId = 'opensearch' } } } @@ -74,60 +74,46 @@ dependencies { compileOnly project(':libs:opensearch-plugin-classloader') testRuntimeOnly project(':libs:opensearch-plugin-classloader') - // lucene - api "org.apache.lucene:lucene-core:${versions.lucene}" - api "org.apache.lucene:lucene-analysis-common:${versions.lucene}" - api "org.apache.lucene:lucene-backward-codecs:${versions.lucene}" - api "org.apache.lucene:lucene-grouping:${versions.lucene}" - api "org.apache.lucene:lucene-highlighter:${versions.lucene}" - api "org.apache.lucene:lucene-join:${versions.lucene}" - api "org.apache.lucene:lucene-memory:${versions.lucene}" - api "org.apache.lucene:lucene-misc:${versions.lucene}" - api "org.apache.lucene:lucene-queries:${versions.lucene}" - api "org.apache.lucene:lucene-queryparser:${versions.lucene}" - api "org.apache.lucene:lucene-sandbox:${versions.lucene}" - api "org.apache.lucene:lucene-spatial-extras:${versions.lucene}" - api "org.apache.lucene:lucene-spatial3d:${versions.lucene}" - api "org.apache.lucene:lucene-suggest:${versions.lucene}" + api libs.bundles.lucene // utilities api project(":libs:opensearch-cli") // time handling, remove with java 8 time - api "joda-time:joda-time:${versions.joda}" + api libs.jodatime // percentiles aggregation - api "com.tdunning:t-digest:${versions.tdigest}" + api libs.tdigest // percentile ranks aggregation - api "org.hdrhistogram:HdrHistogram:${versions.hdrhistogram}" + api libs.hdrhistogram // lucene spatial - api "org.locationtech.spatial4j:spatial4j:${versions.spatial4j}", optional - api "org.locationtech.jts:jts-core:${versions.jts}", optional + api libs.spatial4j, optional + api libs.jtscore, optional // logging - api "org.apache.logging.log4j:log4j-api:${versions.log4j}" - api "org.apache.logging.log4j:log4j-jul:${versions.log4j}" - api "org.apache.logging.log4j:log4j-core:${versions.log4j}", optional - annotationProcessor "org.apache.logging.log4j:log4j-core:${versions.log4j}" + api libs.log4japi + api libs.log4jjul + api libs.log4jcore, optional + annotationProcessor libs.log4jcore annotationProcessor project(':libs:opensearch-common') // jna - api "net.java.dev.jna:jna:${versions.jna}" + api libs.jna // jcraft - api "com.jcraft:jzlib:${versions.jzlib}" + api libs.jzlib // reactor - api "io.projectreactor:reactor-core:${versions.reactor}" - api "org.reactivestreams:reactive-streams:${versions.reactivestreams}" + api libs.reactorcore + api libs.reactivestreams // protobuf - api "com.google.protobuf:protobuf-java:${versions.protobuf}" - api "jakarta.annotation:jakarta.annotation-api:${versions.jakarta_annotation}" + api libs.protobuf + api libs.jakartaannotation // https://mvnrepository.com/artifact/org.roaringbitmap/RoaringBitmap - implementation 'org.roaringbitmap:RoaringBitmap:1.3.0' + api libs.roaringbitmap testImplementation(project(":test:framework")) { // tests use the locally compiled version of server diff --git a/server/licenses/lucene-analysis-common-9.12.0.jar.sha1 b/server/licenses/lucene-analysis-common-9.12.0.jar.sha1 deleted file mode 100644 index fd952034f3742..0000000000000 --- a/server/licenses/lucene-analysis-common-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4c2503cfaba37249e20ea877555cb52ee89d1ae1 \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.12.1.jar.sha1 b/server/licenses/lucene-analysis-common-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..2b9a8cf6e43fd --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.12.1.jar.sha1 @@ -0,0 +1 @@ +86836497e35c1ab33259d9864ceb280c0016075e \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.12.0.jar.sha1 b/server/licenses/lucene-backward-codecs-9.12.0.jar.sha1 deleted file mode 100644 index 2993134edd610..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -68fe98c94e9644a584ea1bf525e68d9406fc61ec \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.12.1.jar.sha1 b/server/licenses/lucene-backward-codecs-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..89d6ddbec3eec --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.12.1.jar.sha1 @@ -0,0 +1 @@ +d0e79d06a0ed021663737e4df777ab7b80cd28c4 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.12.0.jar.sha1 b/server/licenses/lucene-core-9.12.0.jar.sha1 deleted file mode 100644 index e55f896dedb63..0000000000000 --- a/server/licenses/lucene-core-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fdb055d569bb20bfce9618fe2b01c29bab7f290c \ No newline at end of file diff --git a/server/licenses/lucene-core-9.12.1.jar.sha1 b/server/licenses/lucene-core-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..2521c91a81d64 --- /dev/null +++ b/server/licenses/lucene-core-9.12.1.jar.sha1 @@ -0,0 +1 @@ +91447c90c1180122142773b5baddaf8547124794 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.12.0.jar.sha1 b/server/licenses/lucene-grouping-9.12.0.jar.sha1 deleted file mode 100644 index 48388974bb38f..0000000000000 --- a/server/licenses/lucene-grouping-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ccf99f8db57aa97b2c1f95c5cc2a11156a043921 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.12.1.jar.sha1 b/server/licenses/lucene-grouping-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..61d7ff62ac3cc --- /dev/null +++ b/server/licenses/lucene-grouping-9.12.1.jar.sha1 @@ -0,0 +1 @@ +e4bc3d0aa7eec4f41b4f350de0263a8d5625d2b3 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.12.0.jar.sha1 b/server/licenses/lucene-highlighter-9.12.0.jar.sha1 deleted file mode 100644 index 3d457579da892..0000000000000 --- a/server/licenses/lucene-highlighter-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e93429f66fbcd3b58d81f01223d6ce5688047296 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.12.1.jar.sha1 b/server/licenses/lucene-highlighter-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..57fc10a58b806 --- /dev/null +++ b/server/licenses/lucene-highlighter-9.12.1.jar.sha1 @@ -0,0 +1 @@ +2eeedfcec47dd65969f36e88931ed452291dd43e \ No newline at end of file diff --git a/server/licenses/lucene-join-9.12.0.jar.sha1 b/server/licenses/lucene-join-9.12.0.jar.sha1 deleted file mode 100644 index c5f6d16598a60..0000000000000 --- a/server/licenses/lucene-join-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -14c802d6955eaf11107375a2ada8fe8ec53b3e01 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.12.1.jar.sha1 b/server/licenses/lucene-join-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..8d46f20c39974 --- /dev/null +++ b/server/licenses/lucene-join-9.12.1.jar.sha1 @@ -0,0 +1 @@ +3c5e9ff2925a8373ae0d35c1d0a7b2465cebec9f \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.12.0.jar.sha1 b/server/licenses/lucene-memory-9.12.0.jar.sha1 deleted file mode 100644 index e7ac44089c006..0000000000000 --- a/server/licenses/lucene-memory-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ffe090339540876b40df792aee51a42af6b3f37f \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.12.1.jar.sha1 b/server/licenses/lucene-memory-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..55de1c9322aa3 --- /dev/null +++ b/server/licenses/lucene-memory-9.12.1.jar.sha1 @@ -0,0 +1 @@ +e80eecfb1dcc324140387c8357c81e12c2a01937 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.12.0.jar.sha1 b/server/licenses/lucene-misc-9.12.0.jar.sha1 deleted file mode 100644 index afb546be4e032..0000000000000 --- a/server/licenses/lucene-misc-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ad17704ee90eb926b6d3105f7027485cdadbecd9 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.12.1.jar.sha1 b/server/licenses/lucene-misc-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..86982eb1c900c --- /dev/null +++ b/server/licenses/lucene-misc-9.12.1.jar.sha1 @@ -0,0 +1 @@ +4e65d01d1c23f3f49dc325d552701bbefafee7ee \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.12.0.jar.sha1 b/server/licenses/lucene-queries-9.12.0.jar.sha1 deleted file mode 100644 index e24756e38dad2..0000000000000 --- a/server/licenses/lucene-queries-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3ac2a62b0b55c5725bb65f0c5454f9f8a401cf43 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.12.1.jar.sha1 b/server/licenses/lucene-queries-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..f2087ec8eb623 --- /dev/null +++ b/server/licenses/lucene-queries-9.12.1.jar.sha1 @@ -0,0 +1 @@ +14f24315041b686683dba4bc679ca7dc6a505906 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.12.0.jar.sha1 b/server/licenses/lucene-queryparser-9.12.0.jar.sha1 deleted file mode 100644 index e93e00a063dd0..0000000000000 --- a/server/licenses/lucene-queryparser-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -55959399373876f4c184944315458dc6b88fbd81 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.12.1.jar.sha1 b/server/licenses/lucene-queryparser-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..489e6719da342 --- /dev/null +++ b/server/licenses/lucene-queryparser-9.12.1.jar.sha1 @@ -0,0 +1 @@ +aa6df09a99f8881d843e9863aa1713dc9f3ed24f \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.12.0.jar.sha1 b/server/licenses/lucene-sandbox-9.12.0.jar.sha1 deleted file mode 100644 index a3fd8446e0dbc..0000000000000 --- a/server/licenses/lucene-sandbox-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f65882536d681c11a1cbc920e5679201101e3603 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.12.1.jar.sha1 b/server/licenses/lucene-sandbox-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..c1d613e23f1fe --- /dev/null +++ b/server/licenses/lucene-sandbox-9.12.1.jar.sha1 @@ -0,0 +1 @@ +1a66485629d60779f039fc26360f4374ef1496e7 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.12.0.jar.sha1 b/server/licenses/lucene-spatial-extras-9.12.0.jar.sha1 deleted file mode 100644 index b0f11fb667faf..0000000000000 --- a/server/licenses/lucene-spatial-extras-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9d00cc7cc2279822ef6740f0677cafacfb439fa8 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.12.1.jar.sha1 b/server/licenses/lucene-spatial-extras-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..c38b794ce9948 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-9.12.1.jar.sha1 @@ -0,0 +1 @@ +0a7379410eff21676472adc8ea76a57891ec83c2 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.12.0.jar.sha1 b/server/licenses/lucene-spatial3d-9.12.0.jar.sha1 deleted file mode 100644 index 858eee25ac191..0000000000000 --- a/server/licenses/lucene-spatial3d-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e3092632ca1d4427d3ebb2c866ac89d90f5b61ec \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.12.1.jar.sha1 b/server/licenses/lucene-spatial3d-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..bc327a8cec830 --- /dev/null +++ b/server/licenses/lucene-spatial3d-9.12.1.jar.sha1 @@ -0,0 +1 @@ +d2fdea4edabb1f616f494999651c43abfd0aa124 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.12.0.jar.sha1 b/server/licenses/lucene-suggest-9.12.0.jar.sha1 deleted file mode 100644 index 973a7726d845d..0000000000000 --- a/server/licenses/lucene-suggest-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e1c6636499317ebe498f3490a1ec8b86b8a363dd \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.12.1.jar.sha1 b/server/licenses/lucene-suggest-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..961f6da619149 --- /dev/null +++ b/server/licenses/lucene-suggest-9.12.1.jar.sha1 @@ -0,0 +1 @@ +0660e0996ec7653fe0c13c608137e264645eecac \ No newline at end of file diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/shards/TransportCatShardsActionIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/shards/TransportCatShardsActionIT.java index 32d5b3db85629..a7cb4847b45e5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/shards/TransportCatShardsActionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/shards/TransportCatShardsActionIT.java @@ -8,9 +8,15 @@ package org.opensearch.action.admin.cluster.shards; +import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.opensearch.action.admin.indices.datastream.DataStreamTestCase; import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; +import org.opensearch.action.admin.indices.stats.ShardStats; +import org.opensearch.action.pagination.PageParams; +import org.opensearch.client.Requests; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; @@ -20,15 +26,19 @@ import org.opensearch.test.OpenSearchIntegTestCase; import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; import static org.opensearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; import static org.opensearch.common.unit.TimeValue.timeValueMillis; import static org.opensearch.search.SearchService.NO_TIMEOUT; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @OpenSearchIntegTestCase.ClusterScope(numDataNodes = 0, scope = OpenSearchIntegTestCase.Scope.TEST) -public class TransportCatShardsActionIT extends OpenSearchIntegTestCase { +public class TransportCatShardsActionIT extends DataStreamTestCase { public void testCatShardsWithSuccessResponse() throws InterruptedException { internalCluster().startClusterManagerOnlyNodes(1); @@ -125,4 +135,334 @@ public void onFailure(Exception e) { latch.await(); } + public void testListShardsWithHiddenIndex() throws Exception { + final int numShards = 1; + final int numReplicas = 1; + internalCluster().startClusterManagerOnlyNodes(1); + internalCluster().startDataOnlyNodes(2); + createIndex( + "test-hidden-idx", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .put(IndexMetadata.SETTING_INDEX_HIDDEN, true) + .build() + ); + ensureGreen(); + + // Verify result for a default query: "_list/shards" + CatShardsRequest listShardsRequest = getListShardsTransportRequest(Strings.EMPTY_ARRAY, 100); + ActionFuture listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertSingleIndexResponseShards(listShardsResponse.get(), "test-hidden-idx", 2, true); + + // Verify result when hidden index is explicitly queried: "_list/shards" + listShardsRequest = getListShardsTransportRequest(new String[] { "test-hidden-idx" }, 100); + listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertSingleIndexResponseShards(listShardsResponse.get(), "test-hidden-idx", 2, true); + + // Verify result when hidden index is queried with wildcard: "_list/shards*" + // Since the ClusterStateAction underneath is invoked with lenientExpandOpen IndicesOptions, + // Wildcards for hidden indices should not get resolved. + listShardsRequest = getListShardsTransportRequest(new String[] { "test-hidden-idx*" }, 100); + listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertEquals(0, listShardsResponse.get().getResponseShards().size()); + assertSingleIndexResponseShards(listShardsResponse.get(), "test-hidden-idx", 0, false); + } + + public void testListShardsWithClosedIndex() throws Exception { + final int numShards = 1; + final int numReplicas = 1; + internalCluster().startClusterManagerOnlyNodes(1); + internalCluster().startDataOnlyNodes(2); + createIndex( + "test-closed-idx", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .build() + ); + ensureGreen(); + + // close index "test-closed-idx" + client().admin().indices().close(Requests.closeIndexRequest("test-closed-idx")).get(); + ensureGreen(); + + // Verify result for a default query: "_list/shards" + CatShardsRequest listShardsRequest = getListShardsTransportRequest(Strings.EMPTY_ARRAY, 100); + ActionFuture listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertSingleIndexResponseShards(listShardsResponse.get(), "test-closed-idx", 2, false); + + // Verify result when closed index is explicitly queried: "_list/shards" + listShardsRequest = getListShardsTransportRequest(new String[] { "test-closed-idx" }, 100); + listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertSingleIndexResponseShards(listShardsResponse.get(), "test-closed-idx", 2, false); + + // Verify result when closed index is queried with wildcard: "_list/shards*" + // Since the ClusterStateAction underneath is invoked with lenientExpandOpen IndicesOptions, + // Wildcards for closed indices should not get resolved. + listShardsRequest = getListShardsTransportRequest(new String[] { "test-closed-idx*" }, 100); + listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertSingleIndexResponseShards(listShardsResponse.get(), "test-closed-idx", 0, false); + } + + public void testListShardsWithClosedAndHiddenIndices() throws InterruptedException, ExecutionException { + final int numIndices = 4; + final int numShards = 1; + final int numReplicas = 2; + final int pageSize = 100; + internalCluster().startClusterManagerOnlyNodes(1); + internalCluster().startDataOnlyNodes(3); + createIndex( + "test", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .build() + ); + createIndex( + "test-2", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .build() + ); + createIndex( + "test-closed-idx", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .build() + ); + createIndex( + "test-hidden-idx", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .put(IndexMetadata.SETTING_INDEX_HIDDEN, true) + .build() + ); + // close index "test-closed-idx" + client().admin().indices().close(Requests.closeIndexRequest("test-closed-idx")).get(); + ensureGreen(); + + // Verifying response for default queries: /_list/shards + // all the shards should be part of response, however stats should not be displayed for closed index + CatShardsRequest listShardsRequest = getListShardsTransportRequest(Strings.EMPTY_ARRAY, pageSize); + ActionFuture listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertTrue(listShardsResponse.get().getResponseShards().stream().anyMatch(shard -> shard.getIndexName().equals("test-closed-idx"))); + assertTrue(listShardsResponse.get().getResponseShards().stream().anyMatch(shard -> shard.getIndexName().equals("test-hidden-idx"))); + assertEquals(numIndices * numShards * (numReplicas + 1), listShardsResponse.get().getResponseShards().size()); + assertFalse( + Arrays.stream(listShardsResponse.get().getIndicesStatsResponse().getShards()) + .anyMatch(shardStats -> shardStats.getShardRouting().getIndexName().equals("test-closed-idx")) + ); + assertEquals( + (numIndices - 1) * numShards * (numReplicas + 1), + listShardsResponse.get().getIndicesStatsResponse().getShards().length + ); + + // Verifying responses when hidden indices are explicitly queried: /_list/shards/test-hidden-idx + // Shards for hidden index should appear in response along with stats + listShardsRequest.setIndices(List.of("test-hidden-idx").toArray(new String[0])); + listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertTrue(listShardsResponse.get().getResponseShards().stream().allMatch(shard -> shard.getIndexName().equals("test-hidden-idx"))); + assertTrue( + Arrays.stream(listShardsResponse.get().getIndicesStatsResponse().getShards()) + .allMatch(shardStats -> shardStats.getShardRouting().getIndexName().equals("test-hidden-idx")) + ); + assertEquals( + listShardsResponse.get().getResponseShards().size(), + listShardsResponse.get().getIndicesStatsResponse().getShards().length + ); + + // Verifying responses when hidden indices are queried with wildcards: /_list/shards/test-hidden-idx* + // Shards for hidden index should not appear in response with stats. + listShardsRequest.setIndices(List.of("test-hidden-idx*").toArray(new String[0])); + listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertEquals(0, listShardsResponse.get().getResponseShards().size()); + assertEquals(0, listShardsResponse.get().getIndicesStatsResponse().getShards().length); + + // Explicitly querying for closed index: /_list/shards/test-closed-idx + // should output closed shards without stats. + listShardsRequest.setIndices(List.of("test-closed-idx").toArray(new String[0])); + listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertTrue(listShardsResponse.get().getResponseShards().stream().anyMatch(shard -> shard.getIndexName().equals("test-closed-idx"))); + assertEquals(0, listShardsResponse.get().getIndicesStatsResponse().getShards().length); + + // Querying for closed index with wildcards: /_list/shards/test-closed-idx* + // should not output any closed shards. + listShardsRequest.setIndices(List.of("test-closed-idx*").toArray(new String[0])); + listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertEquals(0, listShardsResponse.get().getResponseShards().size()); + assertEquals(0, listShardsResponse.get().getIndicesStatsResponse().getShards().length); + } + + public void testListShardsWithClosedIndicesAcrossPages() throws InterruptedException, ExecutionException { + final int numIndices = 4; + final int numShards = 1; + final int numReplicas = 2; + final int pageSize = numShards * (numReplicas + 1); + internalCluster().startClusterManagerOnlyNodes(1); + internalCluster().startDataOnlyNodes(3); + createIndex( + "test-open-idx-1", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .build() + ); + createIndex( + "test-closed-idx-1", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .build() + ); + createIndex( + "test-open-idx-2", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .build() + ); + createIndex( + "test-closed-idx-2", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .put(IndexMetadata.SETTING_INDEX_HIDDEN, true) + .build() + ); + // close index "test-closed-idx-1" + client().admin().indices().close(Requests.closeIndexRequest("test-closed-idx-1")).get(); + ensureGreen(); + // close index "test-closed-idx-2" + client().admin().indices().close(Requests.closeIndexRequest("test-closed-idx-2")).get(); + ensureGreen(); + + // Verifying response for default queries: /_list/shards + List responseShardRouting = new ArrayList<>(); + List responseShardStats = new ArrayList<>(); + String nextToken = null; + CatShardsRequest listShardsRequest; + ActionFuture listShardsResponse; + do { + listShardsRequest = getListShardsTransportRequest(Strings.EMPTY_ARRAY, nextToken, pageSize); + listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + nextToken = listShardsResponse.get().getPageToken().getNextToken(); + responseShardRouting.addAll(listShardsResponse.get().getResponseShards()); + responseShardStats.addAll(List.of(listShardsResponse.get().getIndicesStatsResponse().getShards())); + } while (nextToken != null); + + assertTrue(responseShardRouting.stream().anyMatch(shard -> shard.getIndexName().equals("test-closed-idx-1"))); + assertTrue(responseShardRouting.stream().anyMatch(shard -> shard.getIndexName().equals("test-closed-idx-2"))); + assertEquals(numIndices * numShards * (numReplicas + 1), responseShardRouting.size()); + // ShardsStats should only appear for 2 open indices + assertFalse( + responseShardStats.stream().anyMatch(shardStats -> shardStats.getShardRouting().getIndexName().contains("test-closed-idx")) + ); + assertEquals(2 * numShards * (numReplicas + 1), responseShardStats.size()); + } + + public void testListShardsWithDataStream() throws Exception { + final int numDataNodes = 3; + String dataStreamName = "logs-test"; + internalCluster().startClusterManagerOnlyNodes(1); + internalCluster().startDataOnlyNodes(numDataNodes); + // Create an index template for data streams. + createDataStreamIndexTemplate("data-stream-template", List.of("logs-*")); + // Create data streams matching the "logs-*" index pattern. + createDataStream(dataStreamName); + ensureGreen(); + // Verifying default query's result. Data stream should have created a hidden backing index in the + // background and all the corresponding shards should appear in the response along with stats. + CatShardsRequest listShardsRequest = getListShardsTransportRequest(Strings.EMPTY_ARRAY, numDataNodes * numDataNodes); + ActionFuture listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertSingleIndexResponseShards(listShardsResponse.get(), dataStreamName, numDataNodes + 1, true); + // Verifying result when data stream is directly queried. Again, all the shards with stats should appear + listShardsRequest = getListShardsTransportRequest(new String[] { dataStreamName }, numDataNodes * numDataNodes); + listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertSingleIndexResponseShards(listShardsResponse.get(), dataStreamName, numDataNodes + 1, true); + } + + public void testListShardsWithAliases() throws Exception { + final int numShards = 1; + final int numReplicas = 1; + final String aliasName = "test-alias"; + internalCluster().startClusterManagerOnlyNodes(1); + internalCluster().startDataOnlyNodes(3); + createIndex( + "test-closed-idx", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .build() + ); + createIndex( + "test-hidden-idx", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .put(IndexMetadata.SETTING_INDEX_HIDDEN, true) + .build() + ); + ensureGreen(); + + // Point test alias to both the indices (one being hidden while the other is closed) + final IndicesAliasesRequest request = new IndicesAliasesRequest().origin("allowed"); + request.addAliasAction(IndicesAliasesRequest.AliasActions.add().index("test-closed-idx").alias(aliasName)); + assertAcked(client().admin().indices().aliases(request).actionGet()); + + request.addAliasAction(IndicesAliasesRequest.AliasActions.add().index("test-hidden-idx").alias(aliasName)); + assertAcked(client().admin().indices().aliases(request).actionGet()); + + // close index "test-closed-idx" + client().admin().indices().close(Requests.closeIndexRequest("test-closed-idx")).get(); + ensureGreen(); + + // Verifying result when an alias is explicitly queried. + CatShardsRequest listShardsRequest = getListShardsTransportRequest(new String[] { aliasName }, 100); + ActionFuture listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertTrue( + listShardsResponse.get() + .getResponseShards() + .stream() + .allMatch(shard -> shard.getIndexName().equals("test-hidden-idx") || shard.getIndexName().equals("test-closed-idx")) + ); + assertTrue( + Arrays.stream(listShardsResponse.get().getIndicesStatsResponse().getShards()) + .allMatch(shardStats -> shardStats.getShardRouting().getIndexName().equals("test-hidden-idx")) + ); + assertEquals(4, listShardsResponse.get().getResponseShards().size()); + assertEquals(2, listShardsResponse.get().getIndicesStatsResponse().getShards().length); + } + + private void assertSingleIndexResponseShards( + CatShardsResponse catShardsResponse, + String indexNamePattern, + final int totalNumShards, + boolean shardStatsExist + ) { + assertTrue(catShardsResponse.getResponseShards().stream().allMatch(shard -> shard.getIndexName().contains(indexNamePattern))); + assertEquals(totalNumShards, catShardsResponse.getResponseShards().size()); + if (shardStatsExist) { + assertTrue( + Arrays.stream(catShardsResponse.getIndicesStatsResponse().getShards()) + .allMatch(shardStats -> shardStats.getShardRouting().getIndexName().contains(indexNamePattern)) + ); + } + assertEquals(shardStatsExist ? totalNumShards : 0, catShardsResponse.getIndicesStatsResponse().getShards().length); + } + + private CatShardsRequest getListShardsTransportRequest(String[] indices, final int pageSize) { + return getListShardsTransportRequest(indices, null, pageSize); + } + + private CatShardsRequest getListShardsTransportRequest(String[] indices, String nextToken, final int pageSize) { + CatShardsRequest listShardsRequest = new CatShardsRequest(); + listShardsRequest.setCancelAfterTimeInterval(NO_TIMEOUT); + listShardsRequest.setIndices(indices); + listShardsRequest.setPageParams(new PageParams(nextToken, PageParams.PARAM_ASC_SORT_VALUE, pageSize)); + return listShardsRequest; + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/SearchReplicaFilteringAllocationIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/SearchReplicaFilteringAllocationIT.java index 5f65d6647f26d..df2620b794686 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/SearchReplicaFilteringAllocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/SearchReplicaFilteringAllocationIT.java @@ -14,6 +14,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase; import java.util.List; @@ -23,7 +24,7 @@ import static org.opensearch.cluster.routing.allocation.decider.SearchReplicaAllocationDecider.SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_SETTING; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) -public class SearchReplicaFilteringAllocationIT extends OpenSearchIntegTestCase { +public class SearchReplicaFilteringAllocationIT extends RemoteStoreBaseIntegTestCase { @Override protected Settings featureFlagSettings() { diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java index 70124c8c46700..377f99cd8b791 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java @@ -33,12 +33,21 @@ package org.opensearch.discovery; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.coordination.FailedToCommitClusterStateException; import org.opensearch.cluster.coordination.JoinHelper; +import org.opensearch.cluster.coordination.PersistedStateRegistry; import org.opensearch.cluster.coordination.PublicationTransportHandler; +import org.opensearch.cluster.metadata.RepositoriesMetadata; +import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Randomness; import org.opensearch.common.settings.Settings; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.Repository; +import org.opensearch.repositories.RepositoryMissingException; +import org.opensearch.repositories.fs.ReloadableFsRepository; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.disruption.NetworkDisruption; import org.opensearch.test.disruption.ServiceDisruptionScheme; @@ -46,10 +55,15 @@ import org.opensearch.test.transport.MockTransportService; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportService; +import org.junit.Assert; +import java.util.Arrays; import java.util.HashSet; +import java.util.List; +import java.util.Objects; import java.util.Set; import java.util.concurrent.CountDownLatch; +import java.util.stream.Collectors; import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING; import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING; @@ -250,4 +264,142 @@ public void testNodeNotReachableFromClusterManager() throws Exception { ensureStableCluster(3); } + /** + * Tests the scenario where-in a cluster-state containing new repository meta-data as part of a node-join from a + * repository-configured node fails on a commit stag and has a master switch. This would lead to master nodes + * doing another round of node-joins with the new cluster-state as the previous attempt had a successful publish. + */ + public void testElectClusterManagerRemotePublicationConfigurationNodeJoinCommitFails() throws Exception { + final String remoteStateRepoName = "remote-state-repo"; + final String remoteRoutingTableRepoName = "routing-table-repo"; + + Settings remotePublicationSettings = buildRemotePublicationNodeAttributes( + remoteStateRepoName, + ReloadableFsRepository.TYPE, + remoteRoutingTableRepoName, + ReloadableFsRepository.TYPE + ); + internalCluster().startClusterManagerOnlyNodes(3); + internalCluster().startDataOnlyNodes(3); + + String clusterManagerNode = internalCluster().getClusterManagerName(); + List nonClusterManagerNodes = Arrays.stream(internalCluster().getNodeNames()) + .filter(node -> !node.equals(clusterManagerNode)) + .collect(Collectors.toList()); + + ensureStableCluster(6); + + MockTransportService clusterManagerTransportService = (MockTransportService) internalCluster().getInstance( + TransportService.class, + clusterManagerNode + ); + logger.info("Blocking Cluster Manager Commit Request on all nodes"); + // This is to allow the new node to have commit failures on the nodes in the send path itself. This will lead to the + // nodes have a successful publish operation but failed commit operation. This will come into play once the new node joins + nonClusterManagerNodes.forEach(node -> { + TransportService targetTransportService = internalCluster().getInstance(TransportService.class, node); + clusterManagerTransportService.addSendBehavior(targetTransportService, (connection, requestId, action, request, options) -> { + if (action.equals(PublicationTransportHandler.COMMIT_STATE_ACTION_NAME)) { + logger.info("--> preventing {} request", PublicationTransportHandler.COMMIT_STATE_ACTION_NAME); + throw new FailedToCommitClusterStateException("Blocking Commit"); + } + connection.sendRequest(requestId, action, request, options); + }); + }); + + logger.info("Starting Node with remote publication settings"); + // Start a node with remote-publication repositories configured. This will lead to the active cluster-manager create + // a new cluster-state event with the new node-join along with new repositories setup in the cluster meta-data. + internalCluster().startDataOnlyNodes(1, remotePublicationSettings, Boolean.TRUE); + + // Checking if publish succeeded in the nodes before shutting down the blocked cluster-manager + assertBusy(() -> { + String randomNode = nonClusterManagerNodes.get(Randomness.get().nextInt(nonClusterManagerNodes.size())); + PersistedStateRegistry registry = internalCluster().getInstance(PersistedStateRegistry.class, randomNode); + + ClusterState state = registry.getPersistedState(PersistedStateRegistry.PersistedStateType.LOCAL).getLastAcceptedState(); + RepositoriesMetadata repositoriesMetadata = state.metadata().custom(RepositoriesMetadata.TYPE); + Boolean isRemoteStateRepoConfigured = Boolean.FALSE; + Boolean isRemoteRoutingTableRepoConfigured = Boolean.FALSE; + + assertNotNull(repositoriesMetadata); + assertNotNull(repositoriesMetadata.repositories()); + + for (RepositoryMetadata repo : repositoriesMetadata.repositories()) { + if (repo.name().equals(remoteStateRepoName)) { + isRemoteStateRepoConfigured = Boolean.TRUE; + } else if (repo.name().equals(remoteRoutingTableRepoName)) { + isRemoteRoutingTableRepoConfigured = Boolean.TRUE; + } + } + // Asserting that the metadata is present in the persisted cluster-state + assertTrue(isRemoteStateRepoConfigured); + assertTrue(isRemoteRoutingTableRepoConfigured); + + RepositoriesService repositoriesService = internalCluster().getInstance(RepositoriesService.class, randomNode); + + isRemoteStateRepoConfigured = isRepoPresentInRepositoryService(repositoriesService, remoteStateRepoName); + isRemoteRoutingTableRepoConfigured = isRepoPresentInRepositoryService(repositoriesService, remoteRoutingTableRepoName); + + // Asserting that the metadata is not present in the repository service. + Assert.assertFalse(isRemoteStateRepoConfigured); + Assert.assertFalse(isRemoteRoutingTableRepoConfigured); + }); + + logger.info("Stopping current Cluster Manager"); + // We stop the current cluster-manager whose outbound paths were blocked. This is to force a new election onto nodes + // we had the new cluster-state published but not commited. + internalCluster().stopCurrentClusterManagerNode(); + + // We expect that the repositories validations are skipped in this case and node-joins succeeds as expected. The + // repositories validations are skipped because even though the cluster-state is updated in the persisted registry, + // the repository service will not be updated as the commit attempt failed. + ensureStableCluster(6); + + String randomNode = nonClusterManagerNodes.get(Randomness.get().nextInt(nonClusterManagerNodes.size())); + + // Checking if the final cluster-state is updated. + RepositoriesMetadata repositoriesMetadata = internalCluster().getInstance(ClusterService.class, randomNode) + .state() + .metadata() + .custom(RepositoriesMetadata.TYPE); + + Boolean isRemoteStateRepoConfigured = Boolean.FALSE; + Boolean isRemoteRoutingTableRepoConfigured = Boolean.FALSE; + + for (RepositoryMetadata repo : repositoriesMetadata.repositories()) { + if (repo.name().equals(remoteStateRepoName)) { + isRemoteStateRepoConfigured = Boolean.TRUE; + } else if (repo.name().equals(remoteRoutingTableRepoName)) { + isRemoteRoutingTableRepoConfigured = Boolean.TRUE; + } + } + + Assert.assertTrue("RemoteState Repo is not set in RepositoriesMetadata", isRemoteStateRepoConfigured); + Assert.assertTrue("RemoteRoutingTable Repo is not set in RepositoriesMetadata", isRemoteRoutingTableRepoConfigured); + + RepositoriesService repositoriesService = internalCluster().getInstance(RepositoriesService.class, randomNode); + + isRemoteStateRepoConfigured = isRepoPresentInRepositoryService(repositoriesService, remoteStateRepoName); + isRemoteRoutingTableRepoConfigured = isRepoPresentInRepositoryService(repositoriesService, remoteRoutingTableRepoName); + + Assert.assertTrue("RemoteState Repo is not set in RepositoryService", isRemoteStateRepoConfigured); + Assert.assertTrue("RemoteRoutingTable Repo is not set in RepositoryService", isRemoteRoutingTableRepoConfigured); + + logger.info("Stopping current Cluster Manager"); + } + + private Boolean isRepoPresentInRepositoryService(RepositoriesService repositoriesService, String repoName) { + try { + Repository remoteStateRepo = repositoriesService.repository(repoName); + if (Objects.nonNull(remoteStateRepo)) { + return Boolean.TRUE; + } + } catch (RepositoryMissingException e) { + return Boolean.FALSE; + } + + return Boolean.FALSE; + } + } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationAndRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationAndRecoveryIT.java new file mode 100644 index 0000000000000..7d4dd62cdca61 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationAndRecoveryIT.java @@ -0,0 +1,325 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.action.admin.cluster.node.stats.NodeStats; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; +import org.opensearch.action.admin.indices.recovery.RecoveryRequest; +import org.opensearch.action.admin.indices.recovery.RecoveryResponse; +import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsResponse; +import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.cluster.health.ClusterHealthStatus; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.routing.RecoverySource; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.index.SegmentReplicationPerGroupStats; +import org.opensearch.index.SegmentReplicationShardStats; +import org.opensearch.indices.recovery.RecoveryState; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.After; + +import java.nio.file.Path; +import java.util.List; +import java.util.Set; +import java.util.concurrent.ExecutionException; + +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS; +import static org.opensearch.cluster.routing.RecoverySource.Type.EMPTY_STORE; +import static org.opensearch.cluster.routing.RecoverySource.Type.EXISTING_STORE; +import static org.opensearch.cluster.routing.allocation.decider.SearchReplicaAllocationDecider.SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_SETTING; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class SearchReplicaReplicationAndRecoveryIT extends SegmentReplicationBaseIT { + + private static final String REPOSITORY_NAME = "test-remote-store-repo"; + protected Path absolutePath; + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + if (absolutePath == null) { + absolutePath = randomRepoPath().toAbsolutePath(); + } + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(remoteStoreClusterSettings(REPOSITORY_NAME, absolutePath)) + .build(); + } + + @After + public void teardown() { + clusterAdmin().prepareCleanupRepository(REPOSITORY_NAME).get(); + + } + + @Override + public Settings indexSettings() { + return Settings.builder() + .put(super.indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS, 1) + .build(); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL, true).build(); + } + + public void testReplication() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + final String primary = internalCluster().startDataOnlyNode(); + createIndex(INDEX_NAME); + ensureYellowAndNoInitializingShards(INDEX_NAME); + final String replica = internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + + final int docCount = 10; + for (int i = 0; i < docCount; i++) { + client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get(); + } + refresh(INDEX_NAME); + waitForSearchableDocs(docCount, primary, replica); + } + + public void testSegmentReplicationStatsResponseWithSearchReplica() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + final List nodes = internalCluster().startDataOnlyNodes(2); + createIndex( + INDEX_NAME, + Settings.builder() + .put("number_of_shards", 1) + .put("number_of_replicas", 0) + .put("number_of_search_only_replicas", 1) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build() + ); + ensureGreen(INDEX_NAME); + + final int docCount = 5; + for (int i = 0; i < docCount; i++) { + client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get(); + } + refresh(INDEX_NAME); + waitForSearchableDocs(docCount, nodes); + + SegmentReplicationStatsResponse segmentReplicationStatsResponse = dataNodeClient().admin() + .indices() + .prepareSegmentReplicationStats(INDEX_NAME) + .setDetailed(true) + .execute() + .actionGet(); + + // Verify the number of indices + assertEquals(1, segmentReplicationStatsResponse.getReplicationStats().size()); + // Verify total shards + assertEquals(2, segmentReplicationStatsResponse.getTotalShards()); + // Verify the number of primary shards + assertEquals(1, segmentReplicationStatsResponse.getReplicationStats().get(INDEX_NAME).size()); + + SegmentReplicationPerGroupStats perGroupStats = segmentReplicationStatsResponse.getReplicationStats().get(INDEX_NAME).get(0); + Set replicaStats = perGroupStats.getReplicaStats(); + // Verify the number of replica stats + assertEquals(1, replicaStats.size()); + for (SegmentReplicationShardStats replicaStat : replicaStats) { + assertNotNull(replicaStat.getCurrentReplicationState()); + } + } + + public void testSearchReplicaRecovery() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + final String primary = internalCluster().startDataOnlyNode(); + final String replica = internalCluster().startDataOnlyNode(); + + // ensure search replicas are only allocated to "replica" node. + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "_name", replica)) + .execute() + .actionGet(); + + createIndex(INDEX_NAME); + ensureGreen(INDEX_NAME); + assertRecoverySourceType(replica, EMPTY_STORE); + + final int docCount = 10; + for (int i = 0; i < docCount; i++) { + client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get(); + } + refresh(INDEX_NAME); + flush(INDEX_NAME); + waitForSearchableDocs(10, primary, replica); + + // Node stats should show remote download stats as nonzero, use this as a precondition to compare + // post restart. + assertDownloadStats(replica, true); + NodesStatsResponse nodesStatsResponse; + NodeStats nodeStats; + + internalCluster().restartNode(replica); + ensureGreen(INDEX_NAME); + assertDocCounts(10, replica); + + // assert existing store recovery + assertRecoverySourceType(replica, EXISTING_STORE); + assertDownloadStats(replica, false); + } + + public void testRecoveryAfterDocsIndexed() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + final String primary = internalCluster().startDataOnlyNode(); + createIndex(INDEX_NAME); + ensureYellowAndNoInitializingShards(INDEX_NAME); + final int docCount = 10; + for (int i = 0; i < docCount; i++) { + client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get(); + } + refresh(INDEX_NAME); + + final String replica = internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + assertDocCounts(10, replica); + + assertRecoverySourceType(replica, EMPTY_STORE); + // replica should have downloaded from remote + assertDownloadStats(replica, true); + + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 0)) + .get(); + + ensureGreen(INDEX_NAME); + + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 1)) + .get(); + ensureGreen(INDEX_NAME); + assertDocCounts(10, replica); + + internalCluster().restartNode(replica); + + ensureGreen(INDEX_NAME); + assertDocCounts(10, replica); + assertRecoverySourceType(replica, EXISTING_STORE); + assertDownloadStats(replica, false); + } + + private static void assertRecoverySourceType(String replica, RecoverySource.Type recoveryType) throws InterruptedException, + ExecutionException { + RecoveryResponse recoveryResponse = client().admin().indices().recoveries(new RecoveryRequest(INDEX_NAME)).get(); + for (RecoveryState recoveryState : recoveryResponse.shardRecoveryStates().get(INDEX_NAME)) { + if (recoveryState.getPrimary() == false) { + assertEquals("All SR should be of expected recovery type", recoveryType, recoveryState.getRecoverySource().getType()); + assertEquals("All SR should be on the specified node", replica, recoveryState.getTargetNode().getName()); + } + } + } + + private static void assertDownloadStats(String replica, boolean expectBytesDownloaded) throws InterruptedException, ExecutionException { + NodesStatsResponse nodesStatsResponse = client().admin().cluster().nodesStats(new NodesStatsRequest(replica)).get(); + assertEquals(1, nodesStatsResponse.getNodes().size()); + NodeStats nodeStats = nodesStatsResponse.getNodes().get(0); + assertEquals(replica, nodeStats.getNode().getName()); + if (expectBytesDownloaded) { + assertTrue(nodeStats.getIndices().getSegments().getRemoteSegmentStats().getDownloadBytesStarted() > 0); + } else { + assertEquals(0, nodeStats.getIndices().getSegments().getRemoteSegmentStats().getDownloadBytesStarted()); + } + } + + public void testStopPrimary_RestoreOnNewNode() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + final String primary = internalCluster().startDataOnlyNode(); + createIndex(INDEX_NAME); + ensureYellowAndNoInitializingShards(INDEX_NAME); + final int docCount = 10; + for (int i = 0; i < docCount; i++) { + client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get(); + } + refresh(INDEX_NAME); + assertDocCounts(docCount, primary); + + final String replica = internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + assertDocCounts(docCount, replica); + // stop the primary + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primary)); + + assertBusy(() -> { + ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth(INDEX_NAME).get(); + assertEquals(ClusterHealthStatus.RED, clusterHealthResponse.getStatus()); + }); + assertDocCounts(docCount, replica); + + String restoredPrimary = internalCluster().startDataOnlyNode(); + + client().admin().cluster().restoreRemoteStore(new RestoreRemoteStoreRequest().indices(INDEX_NAME), PlainActionFuture.newFuture()); + ensureGreen(INDEX_NAME); + assertDocCounts(docCount, replica, restoredPrimary); + + for (int i = docCount; i < docCount * 2; i++) { + client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get(); + } + refresh(INDEX_NAME); + assertBusy(() -> assertDocCounts(20, replica, restoredPrimary)); + } + + public void testFailoverToNewPrimaryWithPollingReplication() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + final String primary = internalCluster().startDataOnlyNode(); + createIndex(INDEX_NAME); + ensureYellowAndNoInitializingShards(INDEX_NAME); + final int docCount = 10; + for (int i = 0; i < docCount; i++) { + client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get(); + } + refresh(INDEX_NAME); + + final String replica = internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + assertDocCounts(10, replica); + + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(SETTING_NUMBER_OF_REPLICAS, 1)) + .get(); + final String writer_replica = internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + + // stop the primary + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primary)); + + assertBusy(() -> { + ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth(INDEX_NAME).get(); + assertEquals(ClusterHealthStatus.YELLOW, clusterHealthResponse.getStatus()); + }); + ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth(INDEX_NAME).get(); + assertEquals(ClusterHealthStatus.YELLOW, clusterHealthResponse.getStatus()); + assertDocCounts(10, replica); + + for (int i = docCount; i < docCount * 2; i++) { + client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get(); + } + refresh(INDEX_NAME); + assertBusy(() -> assertDocCounts(20, replica, writer_replica)); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationIT.java deleted file mode 100644 index a1b512c326ac5..0000000000000 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationIT.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.indices.replication; - -import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; -import org.opensearch.test.OpenSearchIntegTestCase; -import org.junit.After; -import org.junit.Before; - -import java.nio.file.Path; - -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) -public class SearchReplicaReplicationIT extends SegmentReplicationBaseIT { - - private static final String REPOSITORY_NAME = "test-remote-store-repo"; - protected Path absolutePath; - - private Boolean useRemoteStore; - - @Before - public void randomizeRemoteStoreEnabled() { - useRemoteStore = randomBoolean(); - } - - @Override - protected Settings nodeSettings(int nodeOrdinal) { - if (useRemoteStore) { - if (absolutePath == null) { - absolutePath = randomRepoPath().toAbsolutePath(); - } - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put(remoteStoreClusterSettings(REPOSITORY_NAME, absolutePath)) - .build(); - } - return super.nodeSettings(nodeOrdinal); - } - - @After - public void teardown() { - if (useRemoteStore) { - clusterAdmin().prepareCleanupRepository(REPOSITORY_NAME).get(); - } - } - - @Override - public Settings indexSettings() { - return Settings.builder() - .put(super.indexSettings()) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS, 1) - .build(); - } - - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL, true).build(); - } - - public void testReplication() throws Exception { - internalCluster().startClusterManagerOnlyNode(); - final String primary = internalCluster().startDataOnlyNode(); - createIndex(INDEX_NAME); - ensureYellowAndNoInitializingShards(INDEX_NAME); - final String replica = internalCluster().startDataOnlyNode(); - ensureGreen(INDEX_NAME); - - final int docCount = 10; - for (int i = 0; i < docCount; i++) { - client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get(); - } - refresh(INDEX_NAME); - waitForSearchableDocs(docCount, primary, replica); - } - -} diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaRestoreIT.java index 352332b962c92..e8d65e07c7dd9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaRestoreIT.java @@ -15,7 +15,7 @@ import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryBuilders; import org.opensearch.indices.replication.common.ReplicationType; -import org.opensearch.snapshots.AbstractSnapshotIntegTestCase; +import org.opensearch.remotestore.RemoteSnapshotIT; import org.opensearch.snapshots.SnapshotRestoreException; import org.opensearch.test.OpenSearchIntegTestCase; @@ -26,7 +26,7 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) -public class SearchReplicaRestoreIT extends AbstractSnapshotIntegTestCase { +public class SearchReplicaRestoreIT extends RemoteSnapshotIT { private static final String INDEX_NAME = "test-idx-1"; private static final String RESTORED_INDEX_NAME = INDEX_NAME + "-restored"; @@ -40,49 +40,6 @@ protected Settings featureFlagSettings() { return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL, true).build(); } - public void testSearchReplicaRestore_WhenSnapshotOnDocRep_RestoreOnDocRepWithSearchReplica() throws Exception { - bootstrapIndexWithOutSearchReplicas(ReplicationType.DOCUMENT); - createRepoAndSnapshot(REPOSITORY_NAME, FS_REPOSITORY_TYPE, SNAPSHOT_NAME, INDEX_NAME); - - SnapshotRestoreException exception = expectThrows( - SnapshotRestoreException.class, - () -> restoreSnapshot( - REPOSITORY_NAME, - SNAPSHOT_NAME, - INDEX_NAME, - RESTORED_INDEX_NAME, - Settings.builder() - .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT) - .put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 1) - .build() - ) - ); - assertTrue(exception.getMessage().contains(getSnapshotExceptionMessage(ReplicationType.DOCUMENT, ReplicationType.DOCUMENT))); - } - - public void testSearchReplicaRestore_WhenSnapshotOnDocRep_RestoreOnSegRepWithSearchReplica() throws Exception { - bootstrapIndexWithOutSearchReplicas(ReplicationType.DOCUMENT); - createRepoAndSnapshot(REPOSITORY_NAME, FS_REPOSITORY_TYPE, SNAPSHOT_NAME, INDEX_NAME); - - restoreSnapshot( - REPOSITORY_NAME, - SNAPSHOT_NAME, - INDEX_NAME, - RESTORED_INDEX_NAME, - Settings.builder() - .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) - .put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 1) - .build() - ); - ensureYellowAndNoInitializingShards(RESTORED_INDEX_NAME); - internalCluster().startDataOnlyNode(); - ensureGreen(RESTORED_INDEX_NAME); - assertEquals(1, getNumberOfSearchReplicas(RESTORED_INDEX_NAME)); - - SearchResponse resp = client().prepareSearch(RESTORED_INDEX_NAME).setQuery(QueryBuilders.matchAllQuery()).get(); - assertHitCount(resp, DOC_COUNT); - } - public void testSearchReplicaRestore_WhenSnapshotOnSegRep_RestoreOnDocRepWithSearchReplica() throws Exception { bootstrapIndexWithOutSearchReplicas(ReplicationType.SEGMENT); createRepoAndSnapshot(REPOSITORY_NAME, FS_REPOSITORY_TYPE, SNAPSHOT_NAME, INDEX_NAME); @@ -140,27 +97,6 @@ public void testSearchReplicaRestore_WhenSnapshotOnSegRepWithSearchReplica_Resto assertTrue(exception.getMessage().contains(getSnapshotExceptionMessage(ReplicationType.SEGMENT, ReplicationType.DOCUMENT))); } - public void testSearchReplicaRestore_WhenSnapshotOnSegRepWithSearchReplica_RestoreOnDocRepWithNoSearchReplica() throws Exception { - bootstrapIndexWithSearchReplicas(); - createRepoAndSnapshot(REPOSITORY_NAME, FS_REPOSITORY_TYPE, SNAPSHOT_NAME, INDEX_NAME); - - restoreSnapshot( - REPOSITORY_NAME, - SNAPSHOT_NAME, - INDEX_NAME, - RESTORED_INDEX_NAME, - Settings.builder() - .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT) - .put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 0) - .build() - ); - ensureGreen(RESTORED_INDEX_NAME); - assertEquals(0, getNumberOfSearchReplicas(RESTORED_INDEX_NAME)); - - SearchResponse resp = client().prepareSearch(RESTORED_INDEX_NAME).setQuery(QueryBuilders.matchAllQuery()).get(); - assertHitCount(resp, DOC_COUNT); - } - private void bootstrapIndexWithOutSearchReplicas(ReplicationType replicationType) throws InterruptedException { startCluster(2); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaIT.java index fa836e2cc5784..f524f4d1298c1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaIT.java @@ -20,6 +20,7 @@ import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryBuilders; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; @@ -31,7 +32,7 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) -public class SearchOnlyReplicaIT extends OpenSearchIntegTestCase { +public class SearchOnlyReplicaIT extends RemoteStoreBaseIntegTestCase { private static final String TEST_INDEX = "test_index"; @@ -55,35 +56,6 @@ public Settings indexSettings() { .build(); } - public void testCreateDocRepFails() { - Settings settings = Settings.builder().put(indexSettings()).put(SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT).build(); - - IllegalArgumentException illegalArgumentException = expectThrows( - IllegalArgumentException.class, - () -> createIndex(TEST_INDEX, settings) - ); - assertEquals(expectedFailureMessage, illegalArgumentException.getMessage()); - } - - public void testUpdateDocRepFails() { - Settings settings = Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT) - .build(); - // create succeeds - createIndex(TEST_INDEX, settings); - - // update fails - IllegalArgumentException illegalArgumentException = expectThrows(IllegalArgumentException.class, () -> { - client().admin() - .indices() - .prepareUpdateSettings(TEST_INDEX) - .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 1)) - .get(); - }); - assertEquals(expectedFailureMessage, illegalArgumentException.getMessage()); - } - public void testFailoverWithSearchReplica_WithWriterReplicas() throws IOException { int numSearchReplicas = 1; int numWriterReplicas = 1; diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index ebb911c739eb3..1c4585e38ee90 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -39,6 +39,9 @@ import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.plugins.Plugin; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.snapshots.SnapshotInfo; +import org.opensearch.snapshots.SnapshotState; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; @@ -1078,4 +1081,79 @@ public void testCloseIndexWithNoOpSyncAndFlushForAsyncTranslog() throws Interrup Thread.sleep(10000); ensureGreen(INDEX_NAME); } + + public void testSuccessfulShallowV1SnapshotPostIndexClose() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + String dataNode = internalCluster().startDataOnlyNodes(1).get(0); + createIndex(INDEX_NAME, remoteStoreIndexSettings(0, 10000L, -1)); + ensureGreen(INDEX_NAME); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings(Settings.builder().put(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), "0ms")); + + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + logger.info("Create shallow snapshot setting enabled repo"); + String shallowSnapshotRepoName = "shallow-snapshot-repo-name"; + Path shallowSnapshotRepoPath = randomRepoPath(); + Settings.Builder settings = Settings.builder() + .put("location", shallowSnapshotRepoPath) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), Boolean.TRUE); + createRepository(shallowSnapshotRepoName, "fs", settings); + + for (int i = 0; i < 10; i++) { + indexBulk(INDEX_NAME, 1); + } + flushAndRefresh(INDEX_NAME); + + logger.info("Verify shallow snapshot created before close"); + final String snapshot1 = "snapshot1"; + SnapshotInfo snapshotInfo1 = internalCluster().client() + .admin() + .cluster() + .prepareCreateSnapshot(shallowSnapshotRepoName, snapshot1) + .setIndices(INDEX_NAME) + .setWaitForCompletion(true) + .get() + .getSnapshotInfo(); + + assertEquals(SnapshotState.SUCCESS, snapshotInfo1.state()); + assertTrue(snapshotInfo1.successfulShards() > 0); + assertEquals(0, snapshotInfo1.failedShards()); + + for (int i = 0; i < 10; i++) { + indexBulk(INDEX_NAME, 1); + } + + // close index + client().admin().indices().close(Requests.closeIndexRequest(INDEX_NAME)).actionGet(); + Thread.sleep(1000); + logger.info("Verify shallow snapshot created after close"); + final String snapshot2 = "snapshot2"; + + SnapshotInfo snapshotInfo2 = internalCluster().client() + .admin() + .cluster() + .prepareCreateSnapshot(shallowSnapshotRepoName, snapshot2) + .setIndices(INDEX_NAME) + .setWaitForCompletion(true) + .get() + .getSnapshotInfo(); + + assertEquals(SnapshotState.SUCCESS, snapshotInfo2.state()); + assertTrue(snapshotInfo2.successfulShards() > 0); + assertEquals(0, snapshotInfo2.failedShards()); + + // delete the index + cluster().wipeIndices(INDEX_NAME); + // try restoring the snapshot + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(shallowSnapshotRepoName, snapshot2) + .setWaitForCompletion(true) + .execute() + .actionGet(); + assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); + ensureGreen(INDEX_NAME); + flushAndRefresh(INDEX_NAME); + assertBusy(() -> { assertHitCount(client(dataNode).prepareSearch(INDEX_NAME).setSize(0).get(), 20); }); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java b/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java index ef7da395d2151..79caef1f45a26 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java @@ -82,8 +82,7 @@ protected Settings nodeSettings(int nodeOrdinal) { } public void testSimpleTimeout() throws Exception { - final int numDocs = 1000; - for (int i = 0; i < numDocs; i++) { + for (int i = 0; i < 32; i++) { client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value").get(); } refresh("test"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java b/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java index 40c9301ef4bce..d200b9177353a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java @@ -314,7 +314,7 @@ public void testSearchCancellationWithBackpressureDisabled() throws InterruptedE assertNull("SearchShardTask shouldn't have cancelled for monitor_only mode", caughtException); } - private static class ExceptionCatchingListener implements ActionListener { + public static class ExceptionCatchingListener implements ActionListener { private final CountDownLatch latch; private Exception exception = null; @@ -333,7 +333,11 @@ public void onFailure(Exception e) { latch.countDown(); } - private Exception getException() { + public CountDownLatch getLatch() { + return latch; + } + + public Exception getException() { return exception; } } @@ -349,7 +353,7 @@ private Supplier descriptionSupplier(String description) { return () -> description; } - interface TaskFactory { + public interface TaskFactory { T createTask(long id, String type, String action, String description, TaskId parentTaskId, Map headers); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java index fdb12639c65be..cc837019d0b42 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java @@ -49,6 +49,7 @@ import org.opensearch.common.Numbers; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; @@ -63,6 +64,7 @@ import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.ParameterizedDynamicSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; @@ -82,7 +84,9 @@ import java.util.Set; import java.util.TreeMap; import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Function; +import java.util.function.Supplier; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.functionScoreQuery; @@ -2609,4 +2613,99 @@ public void testSimpleSortsPoints() throws Exception { assertThat(searchResponse.toString(), not(containsString("error"))); } + + public void testSortMixedIntegerNumericFields() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(3); + AtomicInteger counter = new AtomicInteger(); + index("long", () -> Long.MAX_VALUE - counter.getAndIncrement()); + index("integer", () -> Integer.MAX_VALUE - counter.getAndIncrement()); + SearchResponse searchResponse = client().prepareSearch("long", "integer") + .setQuery(matchAllQuery()) + .setSize(10) + .addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC).sortMode(SortMode.MAX)) + .get(); + assertNoFailures(searchResponse); + long[] sortValues = new long[10]; + for (int i = 0; i < 10; i++) { + sortValues[i] = ((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).longValue(); + } + for (int i = 1; i < 10; i++) { + assertThat(Arrays.toString(sortValues), sortValues[i - 1], lessThan(sortValues[i])); + } + } + + public void testSortMixedFloatingNumericFields() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(3); + AtomicInteger counter = new AtomicInteger(); + index("double", () -> 100.5 - counter.getAndIncrement()); + counter.set(0); + index("float", () -> 200.5 - counter.getAndIncrement()); + counter.set(0); + index("half_float", () -> 300.5 - counter.getAndIncrement()); + SearchResponse searchResponse = client().prepareSearch("double", "float", "half_float") + .setQuery(matchAllQuery()) + .setSize(15) + .addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC).sortMode(SortMode.MAX)) + .get(); + assertNoFailures(searchResponse); + double[] sortValues = new double[15]; + for (int i = 0; i < 15; i++) { + sortValues[i] = ((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).doubleValue(); + } + for (int i = 1; i < 15; i++) { + assertThat(Arrays.toString(sortValues), sortValues[i - 1], lessThan(sortValues[i])); + } + } + + public void testSortMixedFloatingAndIntegerNumericFields() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(3); + index("long", () -> randomLongBetween(0, (long) 2E53 - 1)); + index("integer", OpenSearchTestCase::randomInt); + index("double", OpenSearchTestCase::randomDouble); + index("float", () -> randomFloat()); + boolean asc = randomBoolean(); + SearchResponse searchResponse = client().prepareSearch("long", "integer", "double", "float") + .setQuery(matchAllQuery()) + .setSize(20) + .addSort(SortBuilders.fieldSort("field").order(asc ? SortOrder.ASC : SortOrder.DESC).sortMode(SortMode.MAX)) + .get(); + assertNoFailures(searchResponse); + double[] sortValues = new double[20]; + for (int i = 0; i < 20; i++) { + sortValues[i] = ((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).doubleValue(); + } + if (asc) { + for (int i = 1; i < 20; i++) { + assertThat(Arrays.toString(sortValues), sortValues[i - 1], lessThanOrEqualTo(sortValues[i])); + } + } else { + for (int i = 1; i < 20; i++) { + assertThat(Arrays.toString(sortValues), sortValues[i - 1], greaterThanOrEqualTo(sortValues[i])); + } + } + } + + private void index(String type, Supplier valueSupplier) throws Exception { + assertAcked( + prepareCreate(type).setMapping( + XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("field") + .field("type", type) + .endObject() + .endObject() + .endObject() + ).setSettings(Settings.builder().put("index.number_of_shards", 3).put("index.number_of_replicas", 0)) + ); + ensureGreen(type); + for (int i = 0; i < 5; i++) { + client().prepareIndex(type) + .setId(Integer.toString(i)) + .setSource("{\"field\" : " + valueSupplier.get() + " }", XContentType.JSON) + .get(); + } + client().admin().indices().prepareRefresh(type).get(); + } + } diff --git a/server/src/internalClusterTest/java/org/opensearch/wlm/WorkloadManagementIT.java b/server/src/internalClusterTest/java/org/opensearch/wlm/WorkloadManagementIT.java new file mode 100644 index 0000000000000..6b68a83da94e2 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/wlm/WorkloadManagementIT.java @@ -0,0 +1,434 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.ActionRequest; +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.ActionType; +import org.opensearch.action.search.SearchTask; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.HandledTransportAction; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.ClusterStateUpdateTask; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.tasks.TaskCancelledException; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.plugins.ActionPlugin; +import org.opensearch.plugins.Plugin; +import org.opensearch.search.backpressure.SearchBackpressureIT.ExceptionCatchingListener; +import org.opensearch.search.backpressure.SearchBackpressureIT.TaskFactory; +import org.opensearch.search.backpressure.SearchBackpressureIT.TestResponse; +import org.opensearch.tasks.CancellableTask; +import org.opensearch.tasks.Task; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; +import org.hamcrest.MatcherAssert; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.threadpool.ThreadPool.Names.SAME; +import static org.opensearch.wlm.QueryGroupTask.QUERY_GROUP_ID_HEADER; +import static org.hamcrest.Matchers.instanceOf; + +public class WorkloadManagementIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + final static String PUT = "PUT"; + final static String MEMORY = "MEMORY"; + final static String CPU = "CPU"; + final static String ENABLED = "enabled"; + final static String DELETE = "DELETE"; + private static final TimeValue TIMEOUT = new TimeValue(1, TimeUnit.SECONDS); + + public WorkloadManagementIT(Settings nodeSettings) { + super(nodeSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Collection> nodePlugins() { + final List> plugins = new ArrayList<>(super.nodePlugins()); + plugins.add(TestClusterUpdatePlugin.class); + return plugins; + } + + @Before + public final void setupNodeSettings() { + Settings request = Settings.builder() + .put(WorkloadManagementSettings.NODE_LEVEL_MEMORY_REJECTION_THRESHOLD.getKey(), 0.8) + .put(WorkloadManagementSettings.NODE_LEVEL_MEMORY_CANCELLATION_THRESHOLD.getKey(), 0.9) + .put(WorkloadManagementSettings.NODE_LEVEL_CPU_REJECTION_THRESHOLD.getKey(), 0.8) + .put(WorkloadManagementSettings.NODE_LEVEL_CPU_CANCELLATION_THRESHOLD.getKey(), 0.9) + .build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(request).get()); + } + + @After + public final void cleanupNodeSettings() { + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().putNull("*")) + .setTransientSettings(Settings.builder().putNull("*")) + ); + } + + public void testHighCPUInEnforcedMode() throws InterruptedException { + Settings request = Settings.builder().put(WorkloadManagementSettings.WLM_MODE_SETTING.getKey(), ENABLED).build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(request).get()); + QueryGroup queryGroup = new QueryGroup( + "name", + new MutableQueryGroupFragment( + MutableQueryGroupFragment.ResiliencyMode.ENFORCED, + Map.of(ResourceType.CPU, 0.01, ResourceType.MEMORY, 0.01) + ) + ); + updateQueryGroupInClusterState(PUT, queryGroup); + Exception caughtException = executeQueryGroupTask(CPU, queryGroup.get_id()); + assertNotNull("SearchTask should have been cancelled with TaskCancelledException", caughtException); + MatcherAssert.assertThat(caughtException, instanceOf(TaskCancelledException.class)); + updateQueryGroupInClusterState(DELETE, queryGroup); + } + + public void testHighCPUInMonitorMode() throws InterruptedException { + QueryGroup queryGroup = new QueryGroup( + "name", + new MutableQueryGroupFragment( + MutableQueryGroupFragment.ResiliencyMode.ENFORCED, + Map.of(ResourceType.CPU, 0.01, ResourceType.MEMORY, 0.01) + ) + ); + updateQueryGroupInClusterState(PUT, queryGroup); + Exception caughtException = executeQueryGroupTask(CPU, queryGroup.get_id()); + assertNull(caughtException); + updateQueryGroupInClusterState(DELETE, queryGroup); + } + + public void testHighMemoryInEnforcedMode() throws InterruptedException { + Settings request = Settings.builder().put(WorkloadManagementSettings.WLM_MODE_SETTING.getKey(), ENABLED).build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(request).get()); + QueryGroup queryGroup = new QueryGroup( + "name", + new MutableQueryGroupFragment(MutableQueryGroupFragment.ResiliencyMode.ENFORCED, Map.of(ResourceType.MEMORY, 0.01)) + ); + updateQueryGroupInClusterState(PUT, queryGroup); + Exception caughtException = executeQueryGroupTask(MEMORY, queryGroup.get_id()); + assertNotNull("SearchTask should have been cancelled with TaskCancelledException", caughtException); + MatcherAssert.assertThat(caughtException, instanceOf(TaskCancelledException.class)); + updateQueryGroupInClusterState(DELETE, queryGroup); + } + + public void testHighMemoryInMonitorMode() throws InterruptedException { + QueryGroup queryGroup = new QueryGroup( + "name", + new MutableQueryGroupFragment(MutableQueryGroupFragment.ResiliencyMode.ENFORCED, Map.of(ResourceType.MEMORY, 0.01)) + ); + updateQueryGroupInClusterState(PUT, queryGroup); + Exception caughtException = executeQueryGroupTask(MEMORY, queryGroup.get_id()); + assertNull("SearchTask should have been cancelled with TaskCancelledException", caughtException); + updateQueryGroupInClusterState(DELETE, queryGroup); + } + + public void testNoCancellation() throws InterruptedException { + QueryGroup queryGroup = new QueryGroup( + "name", + new MutableQueryGroupFragment( + MutableQueryGroupFragment.ResiliencyMode.ENFORCED, + Map.of(ResourceType.CPU, 0.8, ResourceType.MEMORY, 0.8) + ) + ); + updateQueryGroupInClusterState(PUT, queryGroup); + Exception caughtException = executeQueryGroupTask(CPU, queryGroup.get_id()); + assertNull(caughtException); + updateQueryGroupInClusterState(DELETE, queryGroup); + } + + public Exception executeQueryGroupTask(String resourceType, String queryGroupId) throws InterruptedException { + ExceptionCatchingListener listener = new ExceptionCatchingListener(); + client().execute( + TestQueryGroupTaskTransportAction.ACTION, + new TestQueryGroupTaskRequest( + resourceType, + queryGroupId, + (TaskFactory) (id, type, action, description, parentTaskId, headers) -> new SearchTask( + id, + type, + action, + () -> description, + parentTaskId, + headers + ) + ), + listener + ); + assertTrue(listener.getLatch().await(TIMEOUT.getSeconds() + 1, TimeUnit.SECONDS)); + return listener.getException(); + } + + public void updateQueryGroupInClusterState(String method, QueryGroup queryGroup) throws InterruptedException { + ExceptionCatchingListener listener = new ExceptionCatchingListener(); + client().execute(TestClusterUpdateTransportAction.ACTION, new TestClusterUpdateRequest(queryGroup, method), listener); + assertTrue(listener.getLatch().await(TIMEOUT.getSeconds(), TimeUnit.SECONDS)); + assertEquals(0, listener.getLatch().getCount()); + } + + public static class TestClusterUpdateRequest extends ClusterManagerNodeRequest { + final private String method; + final private QueryGroup queryGroup; + + public TestClusterUpdateRequest(QueryGroup queryGroup, String method) { + this.method = method; + this.queryGroup = queryGroup; + } + + public TestClusterUpdateRequest(StreamInput in) throws IOException { + super(in); + this.method = in.readString(); + this.queryGroup = new QueryGroup(in); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(method); + queryGroup.writeTo(out); + } + + public QueryGroup getQueryGroup() { + return queryGroup; + } + + public String getMethod() { + return method; + } + } + + public static class TestClusterUpdateTransportAction extends TransportClusterManagerNodeAction { + public static final ActionType ACTION = new ActionType<>("internal::test_cluster_update_action", TestResponse::new); + + @Inject + public TestClusterUpdateTransportAction( + ThreadPool threadPool, + TransportService transportService, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + ClusterService clusterService + ) { + super( + ACTION.name(), + transportService, + clusterService, + threadPool, + actionFilters, + TestClusterUpdateRequest::new, + indexNameExpressionResolver + ); + } + + @Override + protected String executor() { + return SAME; + } + + @Override + protected TestResponse read(StreamInput in) throws IOException { + return new TestResponse(in); + } + + @Override + protected ClusterBlockException checkBlock(TestClusterUpdateRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + + @Override + protected void clusterManagerOperation( + TestClusterUpdateRequest request, + ClusterState clusterState, + ActionListener listener + ) { + clusterService.submitStateUpdateTask("query-group-persistence-service", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + Map currentGroups = currentState.metadata().queryGroups(); + QueryGroup queryGroup = request.getQueryGroup(); + String id = queryGroup.get_id(); + String method = request.getMethod(); + Metadata metadata; + if (method.equals(PUT)) { // create + metadata = Metadata.builder(currentState.metadata()).put(queryGroup).build(); + } else { // delete + metadata = Metadata.builder(currentState.metadata()).remove(currentGroups.get(id)).build(); + } + return ClusterState.builder(currentState).metadata(metadata).build(); + } + + @Override + public void onFailure(String source, Exception e) { + listener.onFailure(e); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + listener.onResponse(new TestResponse()); + } + }); + } + } + + public static class TestQueryGroupTaskRequest extends ActionRequest { + private final String type; + private final String queryGroupId; + private TaskFactory taskFactory; + + public TestQueryGroupTaskRequest(String type, String queryGroupId, TaskFactory taskFactory) { + this.type = type; + this.queryGroupId = queryGroupId; + this.taskFactory = taskFactory; + } + + public TestQueryGroupTaskRequest(StreamInput in) throws IOException { + super(in); + this.type = in.readString(); + this.queryGroupId = in.readString(); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return taskFactory.createTask(id, type, action, "", parentTaskId, headers); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(type); + out.writeString(queryGroupId); + } + + public String getType() { + return type; + } + + public String getQueryGroupId() { + return queryGroupId; + } + } + + public static class TestQueryGroupTaskTransportAction extends HandledTransportAction { + public static final ActionType ACTION = new ActionType<>("internal::test_query_group_task_action", TestResponse::new); + private final ThreadPool threadPool; + + @Inject + public TestQueryGroupTaskTransportAction(TransportService transportService, ThreadPool threadPool, ActionFilters actionFilters) { + super(ACTION.name(), transportService, actionFilters, TestQueryGroupTaskRequest::new); + this.threadPool = threadPool; + } + + @Override + protected void doExecute(Task task, TestQueryGroupTaskRequest request, ActionListener listener) { + threadPool.getThreadContext().putHeader(QUERY_GROUP_ID_HEADER, request.getQueryGroupId()); + threadPool.executor(ThreadPool.Names.SEARCH).execute(() -> { + try { + CancellableTask cancellableTask = (CancellableTask) task; + ((QueryGroupTask) task).setQueryGroupId(threadPool.getThreadContext()); + assertEquals(request.getQueryGroupId(), ((QueryGroupTask) task).getQueryGroupId()); + long startTime = System.nanoTime(); + while (System.nanoTime() - startTime < TIMEOUT.getNanos()) { + doWork(request); + if (cancellableTask.isCancelled()) { + break; + } + } + if (cancellableTask.isCancelled()) { + throw new TaskCancelledException(cancellableTask.getReasonCancelled()); + } else { + listener.onResponse(new TestResponse()); + } + } catch (Exception e) { + listener.onFailure(e); + } + }); + } + + private void doWork(TestQueryGroupTaskRequest request) throws InterruptedException { + switch (request.getType()) { + case "CPU": + long i = 0, j = 1, k = 1, iterations = 1000; + do { + j += i; + k *= j; + i++; + } while (i < iterations); + break; + case "MEMORY": + int bytesToAllocate = (int) (Runtime.getRuntime().totalMemory() * 0.01); + Byte[] bytes = new Byte[bytesToAllocate]; + int[] ints = new int[bytesToAllocate]; + break; + } + } + } + + public static class TestClusterUpdatePlugin extends Plugin implements ActionPlugin { + @Override + public List> getActions() { + return Arrays.asList( + new ActionHandler<>(TestClusterUpdateTransportAction.ACTION, TestClusterUpdateTransportAction.class), + new ActionHandler<>(TestQueryGroupTaskTransportAction.ACTION, TestQueryGroupTaskTransportAction.class) + ); + } + + @Override + public List> getClientActions() { + return Arrays.asList(TestClusterUpdateTransportAction.ACTION, TestQueryGroupTaskTransportAction.ACTION); + } + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java index 62e05ebb37e28..06bd4da1931de 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.cluster.shards; +import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; @@ -41,6 +42,7 @@ import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.search.slice.SliceBuilder; import java.io.IOException; import java.util.Objects; @@ -61,6 +63,8 @@ public class ClusterSearchShardsRequest extends ClusterManagerNodeReadRequest strategyIndices) { + return strategyIndices.stream().filter(index -> { + IndexMetadata metadata = clusterState.metadata().indices().get(index); + return metadata != null && metadata.getState().equals(IndexMetadata.State.CLOSE) == false; + }).toArray(String[]::new); } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java index 83e104236f640..11323499efd8b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java @@ -133,7 +133,7 @@ protected void clusterManagerOperation( Set nodeIds = new HashSet<>(); GroupShardsIterator groupShardsIterator = clusterService.operationRouting() - .searchShards(clusterState, concreteIndices, routingMap, request.preference()); + .searchShards(clusterState, concreteIndices, routingMap, request.preference(), null, null, request.slice()); ShardRouting shard; ClusterSearchShardsGroup[] groupResponses = new ClusterSearchShardsGroup[groupShardsIterator.size()]; int currentGroup = 0; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/replication/TransportSegmentReplicationStatsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/replication/TransportSegmentReplicationStatsAction.java index fc97d67c6c3af..44408c5043fcf 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/replication/TransportSegmentReplicationStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/replication/TransportSegmentReplicationStatsAction.java @@ -21,7 +21,6 @@ import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.shard.ShardId; -import org.opensearch.index.IndexService; import org.opensearch.index.SegmentReplicationPerGroupStats; import org.opensearch.index.SegmentReplicationPressureService; import org.opensearch.index.SegmentReplicationShardStats; @@ -38,7 +37,9 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.stream.Collectors; +import java.util.stream.Stream; /** * Transport action for shard segment replication operation. This transport action does not actually @@ -96,11 +97,11 @@ protected SegmentReplicationStatsResponse newResponse( ) { String[] shards = request.shards(); final List shardsToFetch = Arrays.stream(shards).map(Integer::valueOf).collect(Collectors.toList()); - // organize replica responses by allocationId. final Map replicaStats = new HashMap<>(); // map of index name to list of replication group stats. final Map> primaryStats = new HashMap<>(); + for (SegmentReplicationShardStatsResponse response : responses) { if (response != null) { if (response.getReplicaStats() != null) { @@ -109,6 +110,7 @@ protected SegmentReplicationStatsResponse newResponse( replicaStats.putIfAbsent(shardRouting.allocationId().getId(), response.getReplicaStats()); } } + if (response.getPrimaryStats() != null) { final ShardId shardId = response.getPrimaryStats().getShardId(); if (shardsToFetch.isEmpty() || shardsToFetch.contains(shardId.getId())) { @@ -126,15 +128,20 @@ protected SegmentReplicationStatsResponse newResponse( } } } - // combine the replica stats to the shard stat entry in each group. - for (Map.Entry> entry : primaryStats.entrySet()) { - for (SegmentReplicationPerGroupStats group : entry.getValue()) { - for (SegmentReplicationShardStats replicaStat : group.getReplicaStats()) { - replicaStat.setCurrentReplicationState(replicaStats.getOrDefault(replicaStat.getAllocationId(), null)); - } - } - } - return new SegmentReplicationStatsResponse(totalShards, successfulShards, failedShards, primaryStats, shardFailures); + + Map> replicationStats = primaryStats.entrySet() + .stream() + .collect( + Collectors.toMap( + Map.Entry::getKey, + entry -> entry.getValue() + .stream() + .map(groupStats -> updateGroupStats(groupStats, replicaStats)) + .collect(Collectors.toList()) + ) + ); + + return new SegmentReplicationStatsResponse(totalShards, successfulShards, failedShards, replicationStats, shardFailures); } @Override @@ -144,9 +151,8 @@ protected SegmentReplicationStatsRequest readRequestFrom(StreamInput in) throws @Override protected SegmentReplicationShardStatsResponse shardOperation(SegmentReplicationStatsRequest request, ShardRouting shardRouting) { - IndexService indexService = indicesService.indexServiceSafe(shardRouting.shardId().getIndex()); - IndexShard indexShard = indexService.getShard(shardRouting.shardId().id()); ShardId shardId = shardRouting.shardId(); + IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShard(shardId.id()); if (indexShard.indexSettings().isSegRepEnabledOrRemoteNode() == false) { return null; @@ -156,11 +162,7 @@ protected SegmentReplicationShardStatsResponse shardOperation(SegmentReplication return new SegmentReplicationShardStatsResponse(pressureService.getStatsForShard(indexShard)); } - // return information about only on-going segment replication events. - if (request.activeOnly()) { - return new SegmentReplicationShardStatsResponse(targetService.getOngoingEventSegmentReplicationState(shardId)); - } - return new SegmentReplicationShardStatsResponse(targetService.getSegmentReplicationState(shardId)); + return new SegmentReplicationShardStatsResponse(getSegmentReplicationState(shardId, request.activeOnly())); } @Override @@ -181,4 +183,83 @@ protected ClusterBlockException checkRequestBlock( ) { return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, concreteIndices); } + + private SegmentReplicationPerGroupStats updateGroupStats( + SegmentReplicationPerGroupStats groupStats, + Map replicaStats + ) { + // Update the SegmentReplicationState for each of the replicas + Set updatedReplicaStats = groupStats.getReplicaStats() + .stream() + .peek(replicaStat -> replicaStat.setCurrentReplicationState(replicaStats.getOrDefault(replicaStat.getAllocationId(), null))) + .collect(Collectors.toSet()); + + // Compute search replica stats + Set searchReplicaStats = computeSearchReplicaStats(groupStats.getShardId(), replicaStats); + + // Combine ReplicaStats and SearchReplicaStats + Set combinedStats = Stream.concat(updatedReplicaStats.stream(), searchReplicaStats.stream()) + .collect(Collectors.toSet()); + + return new SegmentReplicationPerGroupStats(groupStats.getShardId(), combinedStats, groupStats.getRejectedRequestCount()); + } + + private Set computeSearchReplicaStats( + ShardId shardId, + Map replicaStats + ) { + return replicaStats.values() + .stream() + .filter(segmentReplicationState -> segmentReplicationState.getShardRouting().shardId().equals(shardId)) + .filter(segmentReplicationState -> segmentReplicationState.getShardRouting().isSearchOnly()) + .map(segmentReplicationState -> { + ShardRouting shardRouting = segmentReplicationState.getShardRouting(); + SegmentReplicationShardStats segmentReplicationStats = computeSegmentReplicationShardStats(shardRouting); + segmentReplicationStats.setCurrentReplicationState(segmentReplicationState); + return segmentReplicationStats; + }) + .collect(Collectors.toSet()); + } + + SegmentReplicationShardStats computeSegmentReplicationShardStats(ShardRouting shardRouting) { + ShardId shardId = shardRouting.shardId(); + SegmentReplicationState completedSegmentReplicationState = targetService.getlatestCompletedEventSegmentReplicationState(shardId); + SegmentReplicationState ongoingSegmentReplicationState = targetService.getOngoingEventSegmentReplicationState(shardId); + + return new SegmentReplicationShardStats( + shardRouting.allocationId().getId(), + 0, + calculateBytesRemainingToReplicate(ongoingSegmentReplicationState), + 0, + getCurrentReplicationLag(ongoingSegmentReplicationState), + getLastCompletedReplicationLag(completedSegmentReplicationState) + ); + } + + private SegmentReplicationState getSegmentReplicationState(ShardId shardId, boolean isActiveOnly) { + if (isActiveOnly) { + return targetService.getOngoingEventSegmentReplicationState(shardId); + } else { + return targetService.getSegmentReplicationState(shardId); + } + } + + private long calculateBytesRemainingToReplicate(SegmentReplicationState ongoingSegmentReplicationState) { + if (ongoingSegmentReplicationState == null) { + return 0; + } + return ongoingSegmentReplicationState.getIndex() + .fileDetails() + .stream() + .mapToLong(index -> index.length() - index.recovered()) + .sum(); + } + + private long getCurrentReplicationLag(SegmentReplicationState ongoingSegmentReplicationState) { + return ongoingSegmentReplicationState != null ? ongoingSegmentReplicationState.getTimer().time() : 0; + } + + private long getLastCompletedReplicationLag(SegmentReplicationState completedSegmentReplicationState) { + return completedSegmentReplicationState != null ? completedSegmentReplicationState.getTimer().time() : 0; + } } diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java index 9ff8e14ac49d5..493c80757d668 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java @@ -537,6 +537,8 @@ protected void doRun() { } final ConcreteIndices concreteIndices = new ConcreteIndices(clusterState, indexNameExpressionResolver); Metadata metadata = clusterState.metadata(); + // go over all the requests and create a ShardId -> Operations mapping + Map> requestsByShard = new HashMap<>(); for (int i = 0; i < bulkRequest.requests.size(); i++) { DocWriteRequest docWriteRequest = bulkRequest.requests.get(i); // the request can only be null because we set it to null in the previous step, so it gets ignored @@ -592,6 +594,12 @@ protected void doRun() { default: throw new AssertionError("request type not supported: [" + docWriteRequest.opType() + "]"); } + + ShardId shardId = clusterService.operationRouting() + .indexShards(clusterState, concreteIndex.getName(), docWriteRequest.id(), docWriteRequest.routing()) + .shardId(); + List shardRequests = requestsByShard.computeIfAbsent(shardId, shard -> new ArrayList<>()); + shardRequests.add(new BulkItemRequest(i, docWriteRequest)); } catch (OpenSearchParseException | IllegalArgumentException | RoutingMissingException e) { BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex.getName(), docWriteRequest.id(), e); BulkItemResponse bulkItemResponse = new BulkItemResponse(i, docWriteRequest.opType(), failure); @@ -601,21 +609,6 @@ protected void doRun() { } } - // first, go over all the requests and create a ShardId -> Operations mapping - Map> requestsByShard = new HashMap<>(); - for (int i = 0; i < bulkRequest.requests.size(); i++) { - DocWriteRequest request = bulkRequest.requests.get(i); - if (request == null) { - continue; - } - String concreteIndex = concreteIndices.getConcreteIndex(request.index()).getName(); - ShardId shardId = clusterService.operationRouting() - .indexShards(clusterState, concreteIndex, request.id(), request.routing()) - .shardId(); - List shardRequests = requestsByShard.computeIfAbsent(shardId, shard -> new ArrayList<>()); - shardRequests.add(new BulkItemRequest(i, request)); - } - if (requestsByShard.isEmpty()) { BulkItemResponse[] response = responses.toArray(new BulkItemResponse[responses.length()]); long tookMillis = buildTookInMillis(startTimeNanos); diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java b/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java index b303dfea05c0a..180b813604e75 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java @@ -248,8 +248,7 @@ private AsyncShardsAction(FieldCapabilitiesIndexRequest request, ActionListener< throw blockException; } - shardsIt = clusterService.operationRouting() - .searchShards(clusterService.state(), new String[] { request.index() }, null, null, null, null); + shardsIt = clusterService.operationRouting().searchShards(clusterService.state(), new String[] { request.index() }, null, null); } public void start() { diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java b/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java index 161a103cdf36a..d63695447e365 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java @@ -48,6 +48,7 @@ import org.opensearch.common.lucene.search.TopDocsAndMaxScore; import org.opensearch.core.common.breaker.CircuitBreaker; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.search.DocValueFormat; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; @@ -604,36 +605,51 @@ private static void validateMergeSortValueFormats(Collection buildRewriteListener( ); } else { AtomicInteger skippedClusters = new AtomicInteger(0); + SliceBuilder slice = searchRequest.source() == null ? null : searchRequest.source().slice(); collectSearchShards( searchRequest.indicesOptions(), searchRequest.preference(), @@ -559,6 +561,7 @@ private ActionListener buildRewriteListener( remoteClusterIndices, remoteClusterService, threadPool, + slice, ActionListener.wrap(searchShardsResponses -> { final BiFunction clusterNodeLookup = getRemoteClusterNodeLookup( searchShardsResponses @@ -787,6 +790,7 @@ static void collectSearchShards( Map remoteIndicesByCluster, RemoteClusterService remoteClusterService, ThreadPool threadPool, + SliceBuilder slice, ActionListener> listener ) { final CountDown responsesCountDown = new CountDown(remoteIndicesByCluster.size()); @@ -800,7 +804,8 @@ static void collectSearchShards( ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest(indices).indicesOptions(indicesOptions) .local(true) .preference(preference) - .routing(routing); + .routing(routing) + .slice(slice); clusterClient.admin() .cluster() .searchShards( @@ -1042,6 +1047,7 @@ private void executeSearch( concreteLocalIndices[i] = indices[i].getName(); } Map nodeSearchCounts = searchTransportService.getPendingSearchRequests(); + SliceBuilder slice = searchRequest.source() == null ? null : searchRequest.source().slice(); GroupShardsIterator localShardRoutings = clusterService.operationRouting() .searchShards( clusterState, @@ -1049,7 +1055,8 @@ private void executeSearch( routingMap, searchRequest.preference(), searchService.getResponseCollectorService(), - nodeSearchCounts + nodeSearchCounts, + slice ); localShardIterators = StreamSupport.stream(localShardRoutings.spliterator(), false) .map(it -> new SearchShardIterator(searchRequest.getLocalClusterAlias(), it.shardId(), it.getShardRoutings(), localIndices)) diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java index 819e09312a0df..558b7370749d5 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java @@ -430,6 +430,13 @@ private ClusterState getStateFromLocalNode(GetTermVersionResponse termVersionRes if (remoteClusterStateService != null && termVersionResponse.isStatePresentInRemote()) { try { + logger.info( + () -> new ParameterizedMessage( + "Term version checker downloading full cluster state for term {}, version {}", + termVersion.getTerm(), + termVersion.getVersion() + ) + ); ClusterStateTermVersion clusterStateTermVersion = termVersionResponse.getClusterStateTermVersion(); Optional clusterMetadataManifest = remoteClusterStateService .getClusterMetadataManifestByTermVersion( @@ -454,7 +461,7 @@ private ClusterState getStateFromLocalNode(GetTermVersionResponse termVersionRes return clusterStateFromRemote; } } catch (Exception e) { - logger.trace("Error while fetching from remote cluster state", e); + logger.error("Error while fetching from remote cluster state", e); } } return null; diff --git a/server/src/main/java/org/opensearch/bootstrap/Security.java b/server/src/main/java/org/opensearch/bootstrap/Security.java index 0e8559578b916..b01142788fb82 100644 --- a/server/src/main/java/org/opensearch/bootstrap/Security.java +++ b/server/src/main/java/org/opensearch/bootstrap/Security.java @@ -35,7 +35,9 @@ import org.opensearch.cli.Command; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.io.PathUtils; +import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.common.transport.PortsRange; import org.opensearch.env.Environment; import org.opensearch.http.HttpTransportSettings; import org.opensearch.plugins.PluginInfo; @@ -71,6 +73,9 @@ import static org.opensearch.bootstrap.FilePermissionUtils.addDirectoryPath; import static org.opensearch.bootstrap.FilePermissionUtils.addSingleFilePath; +import static org.opensearch.plugins.NetworkPlugin.AuxTransport.AUX_PORT_DEFAULTS; +import static org.opensearch.plugins.NetworkPlugin.AuxTransport.AUX_TRANSPORT_PORTS; +import static org.opensearch.plugins.NetworkPlugin.AuxTransport.AUX_TRANSPORT_TYPES_SETTING; /** * Initializes SecurityManager with necessary permissions. @@ -402,6 +407,7 @@ static void addFilePermissions(Permissions policy, Environment environment) thro private static void addBindPermissions(Permissions policy, Settings settings) { addSocketPermissionForHttp(policy, settings); addSocketPermissionForTransportProfiles(policy, settings); + addSocketPermissionForAux(policy, settings); } /** @@ -416,6 +422,29 @@ private static void addSocketPermissionForHttp(final Permissions policy, final S addSocketPermissionForPortRange(policy, httpRange); } + /** + * Add dynamic {@link SocketPermission} based on AffixSetting AUX_TRANSPORT_PORTS. + * If an auxiliary transport type is enabled but has no corresponding port range setting fall back to AUX_PORT_DEFAULTS. + * + * @param policy the {@link Permissions} instance to apply the dynamic {@link SocketPermission}s to. + * @param settings the {@link Settings} instance to read the gRPC settings from + */ + private static void addSocketPermissionForAux(final Permissions policy, final Settings settings) { + Set portsRanges = new HashSet<>(); + for (String auxType : AUX_TRANSPORT_TYPES_SETTING.get(settings)) { + Setting auxTypePortSettings = AUX_TRANSPORT_PORTS.getConcreteSettingForNamespace(auxType); + if (auxTypePortSettings.exists(settings)) { + portsRanges.add(auxTypePortSettings.get(settings)); + } else { + portsRanges.add(new PortsRange(AUX_PORT_DEFAULTS)); + } + } + + for (PortsRange portRange : portsRanges) { + addSocketPermissionForPortRange(policy, portRange.getPortRangeString()); + } + } + /** * Add dynamic {@link SocketPermission} based on transport settings. This method will first check if there is a port range specified in * the transport profile specified by {@code profileSettings} and will fall back to {@code settings}. diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java index 7275d72f2db9f..4ad5b80038048 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java @@ -258,7 +258,7 @@ PublishWithJoinResponse handleIncomingRemotePublishRequest(RemotePublishRequest } if (applyFullState == true) { - logger.debug( + logger.info( () -> new ParameterizedMessage( "Downloading full cluster state for term {}, version {}, stateUUID {}", manifest.getClusterTerm(), diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index a12ab34dfbc80..4f58ad6bf1d53 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -1062,7 +1062,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws * * @opensearch.internal */ - private static class IndexMetadataDiff implements Diff { + static class IndexMetadataDiff implements Diff { private final String index; private final int routingNumShards; @@ -1192,7 +1192,7 @@ public IndexMetadata apply(IndexMetadata part) { builder.customMetadata.putAll(customData.apply(part.customData)); builder.inSyncAllocationIds.putAll(inSyncAllocationIds.apply(part.inSyncAllocationIds)); builder.rolloverInfos.putAll(rolloverInfos.apply(part.rolloverInfos)); - builder.system(part.isSystem); + builder.system(isSystem); builder.context(context); return builder.build(); } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index 05588620348aa..b9eb2643b0fb5 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -1103,14 +1103,9 @@ static Settings aggregateIndexSettings( private static void updateSearchOnlyReplicas(Settings requestSettings, Settings.Builder builder) { if (INDEX_NUMBER_OF_SEARCH_REPLICAS_SETTING.exists(builder) && builder.get(SETTING_NUMBER_OF_SEARCH_REPLICAS) != null) { if (INDEX_NUMBER_OF_SEARCH_REPLICAS_SETTING.get(requestSettings) > 0 - && ReplicationType.parseString(builder.get(INDEX_REPLICATION_TYPE_SETTING.getKey())).equals(ReplicationType.DOCUMENT)) { + && Boolean.parseBoolean(builder.get(SETTING_REMOTE_STORE_ENABLED)) == false) { throw new IllegalArgumentException( - "To set " - + SETTING_NUMBER_OF_SEARCH_REPLICAS - + ", " - + INDEX_REPLICATION_TYPE_SETTING.getKey() - + " must be set to " - + ReplicationType.SEGMENT + "To set " + SETTING_NUMBER_OF_SEARCH_REPLICAS + ", " + SETTING_REMOTE_STORE_ENABLED + " must be set to true" ); } builder.put(SETTING_NUMBER_OF_SEARCH_REPLICAS, INDEX_NUMBER_OF_SEARCH_REPLICAS_SETTING.get(requestSettings)); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java index 4f0c0f3aa2ab4..ccaa940a0ca9d 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java @@ -63,7 +63,6 @@ import org.opensearch.index.IndexSettings; import org.opensearch.indices.IndicesService; import org.opensearch.indices.ShardLimitValidator; -import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.threadpool.ThreadPool; import java.io.IOException; @@ -77,8 +76,8 @@ import java.util.Set; import static org.opensearch.action.support.ContextPreservingActionListener.wrapPreservingContext; -import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_REPLICATION_TYPE_SETTING; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateOverlap; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateRefreshIntervalSettings; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateTranslogDurabilitySettings; @@ -539,14 +538,12 @@ public ClusterState execute(ClusterState currentState) { private void validateSearchReplicaCountSettings(Settings requestSettings, Index[] indices, ClusterState currentState) { final int updatedNumberOfSearchReplicas = IndexMetadata.INDEX_NUMBER_OF_SEARCH_REPLICAS_SETTING.get(requestSettings); if (updatedNumberOfSearchReplicas > 0) { - if (Arrays.stream(indices).allMatch(index -> currentState.metadata().isSegmentReplicationEnabled(index.getName())) == false) { + if (Arrays.stream(indices) + .allMatch( + index -> currentState.metadata().index(index.getName()).getSettings().getAsBoolean(SETTING_REMOTE_STORE_ENABLED, false) + ) == false) { throw new IllegalArgumentException( - "To set " - + SETTING_NUMBER_OF_SEARCH_REPLICAS - + ", " - + INDEX_REPLICATION_TYPE_SETTING.getKey() - + " must be set to " - + ReplicationType.SEGMENT + "To set " + SETTING_NUMBER_OF_SEARCH_REPLICAS + ", " + SETTING_REMOTE_STORE_ENABLED + " must be set to true" ); } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java index b46efad9207c1..3be63cd81b362 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java +++ b/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java @@ -145,7 +145,10 @@ boolean validate(Metadata metadata) { "Shard [" + indexShardRoutingTable.shardId().id() + "] routing table has wrong number of replicas, expected [" + + "Replicas: " + indexMetadata.getNumberOfReplicas() + + "Search Replicas: " + + indexMetadata.getNumberOfSearchOnlyReplicas() + "], got [" + routingNumberOfReplicas + "]" @@ -513,15 +516,31 @@ public Builder initializeAsRemoteStoreRestore( ShardRouting.newUnassigned(shardId, false, PeerRecoverySource.INSTANCE, unassignedInfo) ); } + // if writers are red we do not want to re-recover search only shards if already assigned. + for (ShardRouting shardRouting : indexShardRoutingTable.searchOnlyReplicas()) { + if (shardRouting.unassigned()) { + indexShardRoutingBuilder.addShard( + ShardRouting.newUnassigned(shardId, false, true, EmptyStoreRecoverySource.INSTANCE, unassignedInfo) + ); + } else { + indexShardRoutingBuilder.addShard(shardRouting); + } + } } else { // Primary is either active or initializing. Do not trigger restore. indexShardRoutingBuilder.addShard(indexShardRoutingTable.primaryShard()); // Replica, if unassigned, trigger peer recovery else no action. for (ShardRouting shardRouting : indexShardRoutingTable.replicaShards()) { if (shardRouting.unassigned()) { - indexShardRoutingBuilder.addShard( - ShardRouting.newUnassigned(shardId, false, PeerRecoverySource.INSTANCE, unassignedInfo) - ); + if (shardRouting.isSearchOnly()) { + indexShardRoutingBuilder.addShard( + ShardRouting.newUnassigned(shardId, false, true, EmptyStoreRecoverySource.INSTANCE, unassignedInfo) + ); + } else { + indexShardRoutingBuilder.addShard( + ShardRouting.newUnassigned(shardId, false, PeerRecoverySource.INSTANCE, unassignedInfo) + ); + } } else { indexShardRoutingBuilder.addShard(shardRouting); } @@ -574,13 +593,7 @@ private Builder initializeAsRestore( } for (int i = 0; i < indexMetadata.getNumberOfSearchOnlyReplicas(); i++) { indexShardRoutingBuilder.addShard( - ShardRouting.newUnassigned( - shardId, - false, - true, - PeerRecoverySource.INSTANCE, // TODO: Update to remote store if enabled - unassignedInfo - ) + ShardRouting.newUnassigned(shardId, false, true, EmptyStoreRecoverySource.INSTANCE, unassignedInfo) ); } shards.put(shardNumber, indexShardRoutingBuilder.build()); @@ -623,13 +636,7 @@ private Builder initializeEmpty(IndexMetadata indexMetadata, UnassignedInfo unas } for (int i = 0; i < indexMetadata.getNumberOfSearchOnlyReplicas(); i++) { indexShardRoutingBuilder.addShard( - ShardRouting.newUnassigned( - shardId, - false, - true, - PeerRecoverySource.INSTANCE, // TODO: Update to remote store if enabled - unassignedInfo - ) + ShardRouting.newUnassigned(shardId, false, true, EmptyStoreRecoverySource.INSTANCE, unassignedInfo) ); } shards.put(shardNumber, indexShardRoutingBuilder.build()); @@ -664,7 +671,7 @@ public Builder addSearchReplica() { shardId, false, true, - PeerRecoverySource.INSTANCE, // TODO: Change to remote store if enabled + EmptyStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.REPLICA_ADDED, null) ); shards.put(shardNumber, new IndexShardRoutingTable.Builder(shards.get(shard.id())).addShard(shard).build()); diff --git a/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java index e4ed65683c5eb..cd53462843dbc 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java +++ b/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java @@ -135,6 +135,23 @@ public class IndexShardRoutingTable extends AbstractDiffable searchShards( @Nullable Map> routing, @Nullable String preference ) { - return searchShards(clusterState, concreteIndices, routing, preference, null, null); + return searchShards(clusterState, concreteIndices, routing, preference, null, null, null); } public GroupShardsIterator searchShards( @@ -240,10 +244,24 @@ public GroupShardsIterator searchShards( @Nullable String preference, @Nullable ResponseCollectorService collectorService, @Nullable Map nodeCounts + ) { + return searchShards(clusterState, concreteIndices, routing, preference, collectorService, nodeCounts, null); + } + + public GroupShardsIterator searchShards( + ClusterState clusterState, + String[] concreteIndices, + @Nullable Map> routing, + @Nullable String preference, + @Nullable ResponseCollectorService collectorService, + @Nullable Map nodeCounts, + @Nullable SliceBuilder slice ) { final Set shards = computeTargetedShards(clusterState, concreteIndices, routing); - final Set set = new HashSet<>(shards.size()); + + Map> shardIterators = new HashMap<>(); for (IndexShardRoutingTable shard : shards) { + IndexMetadata indexMetadataForShard = indexMetadata(clusterState, shard.shardId.getIndex().getName()); if (indexMetadataForShard.isRemoteSnapshot() && (preference == null || preference.isEmpty())) { preference = Preference.PRIMARY.type(); @@ -274,10 +292,31 @@ public GroupShardsIterator searchShards( clusterState.metadata().weightedRoutingMetadata() ); if (iterator != null) { - set.add(iterator); + shardIterators.computeIfAbsent(iterator.shardId().getIndex(), k -> new ArrayList<>()).add(iterator); } } - return GroupShardsIterator.sortAndCreate(new ArrayList<>(set)); + List allShardIterators = new ArrayList<>(); + if (slice != null) { + for (List indexIterators : shardIterators.values()) { + // Filter the returned shards for the given slice + CollectionUtil.timSort(indexIterators); + // We use the ordinal of the iterator in the group (after sorting) rather than the shard id, because + // computeTargetedShards may return a subset of shards for an index, if a routing parameter was + // specified. In that case, the set of routable shards is considered the full universe of available + // shards for each index, when mapping shards to slices. If no routing parameter was specified, + // then ordinals and shard IDs are the same. This mimics the logic in + // org.opensearch.search.slice.SliceBuilder.toFilter. + for (int i = 0; i < indexIterators.size(); i++) { + if (slice.shardMatches(i, indexIterators.size())) { + allShardIterators.add(indexIterators.get(i)); + } + } + } + } else { + shardIterators.values().forEach(allShardIterators::addAll); + } + + return GroupShardsIterator.sortAndCreate(allShardIterators); } public static ShardIterator getShards(ClusterState clusterState, ShardId shardId) { @@ -311,6 +350,7 @@ private Set computeTargetedShards( set.add(indexShard); } } + } return set; } diff --git a/server/src/main/java/org/opensearch/cluster/routing/ShardRouting.java b/server/src/main/java/org/opensearch/cluster/routing/ShardRouting.java index ada35caa1e61e..bdc98061f2fa4 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/ShardRouting.java +++ b/server/src/main/java/org/opensearch/cluster/routing/ShardRouting.java @@ -115,7 +115,7 @@ protected ShardRouting( assert !(state == ShardRoutingState.UNASSIGNED && unassignedInfo == null) : "unassigned shard must be created with meta"; assert (state == ShardRoutingState.UNASSIGNED || state == ShardRoutingState.INITIALIZING) == (recoverySource != null) : "recovery source only available on unassigned or initializing shard but was " + state; - assert recoverySource == null || recoverySource == PeerRecoverySource.INSTANCE || primary + assert recoverySource == null || recoverySource == PeerRecoverySource.INSTANCE || primary || searchOnly : "replica shards always recover from primary"; assert (currentNodeId == null) == (state == ShardRoutingState.UNASSIGNED) : "unassigned shard must not be assigned to a node " + this; @@ -156,7 +156,7 @@ private ShardRouting initializeTargetRelocatingShard() { primary, searchOnly, ShardRoutingState.INITIALIZING, - PeerRecoverySource.INSTANCE, + isSearchOnly() ? RecoverySource.EmptyStoreRecoverySource.INSTANCE : PeerRecoverySource.INSTANCE, unassignedInfo, AllocationId.newTargetRelocation(allocationId), expectedShardSize @@ -440,7 +440,7 @@ public ShardRouting moveToUnassigned(UnassignedInfo unassignedInfo) { assert state != ShardRoutingState.UNASSIGNED : this; final RecoverySource recoverySource; if (active()) { - if (primary()) { + if (primary() || isSearchOnly()) { recoverySource = ExistingStoreRecoverySource.INSTANCE; } else { recoverySource = PeerRecoverySource.INSTANCE; diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java index 113d5803c1d65..e673c1409a869 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java @@ -99,7 +99,11 @@ public void shardStarted(ShardRouting initializingShard, ShardRouting startedSha + startedShard.allocationId().getId() + "] have to have the same"; Updates updates = changes(startedShard.shardId()); - updates.addedAllocationIds.add(startedShard.allocationId().getId()); + // if the started shard is an untracked replica, don't bother sending it as part of the + // in sync id set. + if (startedShard.isSearchOnly() == false) { + updates.addedAllocationIds.add(startedShard.allocationId().getId()); + } if (startedShard.primary() // started shard has to have null recoverySource; have to pick up recoverySource from its initializing state && (initializingShard.recoverySource() == RecoverySource.ExistingStoreRecoverySource.FORCE_STALE_PRIMARY_INSTANCE)) { @@ -259,9 +263,9 @@ private IndexMetadata.Builder updateInSyncAllocations( // We use number_of_replicas + 1 (= possible active shard copies) to bound the inSyncAllocationIds set // Only trim the set of allocation ids when it grows, otherwise we might trim too eagerly when the number // of replicas was decreased while shards were unassigned. - int maxActiveShards = oldIndexMetadata.getNumberOfReplicas() + oldIndexMetadata.getNumberOfSearchOnlyReplicas() + 1; // +1 for - // the - // primary + int maxActiveShards = oldIndexMetadata.getNumberOfReplicas() + 1; // +1 for + // the + // primary IndexShardRoutingTable newShardRoutingTable = newRoutingTable.shardRoutingTable(shardId); assert newShardRoutingTable.assignedShards() .stream() @@ -273,6 +277,7 @@ private IndexMetadata.Builder updateInSyncAllocations( List assignedShards = newShardRoutingTable.assignedShards() .stream() .filter(s -> s.isRelocationTarget() == false) + .filter(s -> s.isSearchOnly() == false) // do not consider search only shards for in sync validation .collect(Collectors.toList()); assert assignedShards.size() <= maxActiveShards : "cannot have more assigned shards " + assignedShards diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java index a05938c176678..7999faece52ca 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java @@ -247,11 +247,17 @@ void balance() { final Map nodePrimaryShardCount = calculateNodePrimaryShardCount(remoteRoutingNodes); int totalPrimaryShardCount = nodePrimaryShardCount.values().stream().reduce(0, Integer::sum); - totalPrimaryShardCount += routingNodes.unassigned().getNumPrimaries(); - int avgPrimaryPerNode = (totalPrimaryShardCount + routingNodes.size() - 1) / routingNodes.size(); + int unassignedRemotePrimaryShardCount = 0; + for (ShardRouting shard : routingNodes.unassigned()) { + if (RoutingPool.REMOTE_CAPABLE.equals(RoutingPool.getShardPool(shard, allocation)) && shard.primary()) { + unassignedRemotePrimaryShardCount++; + } + } + totalPrimaryShardCount += unassignedRemotePrimaryShardCount; + final int avgPrimaryPerNode = (totalPrimaryShardCount + remoteRoutingNodes.size() - 1) / remoteRoutingNodes.size(); - ArrayDeque sourceNodes = new ArrayDeque<>(); - ArrayDeque targetNodes = new ArrayDeque<>(); + final ArrayDeque sourceNodes = new ArrayDeque<>(); + final ArrayDeque targetNodes = new ArrayDeque<>(); for (RoutingNode node : remoteRoutingNodes) { if (nodePrimaryShardCount.get(node.nodeId()) > avgPrimaryPerNode) { sourceNodes.add(node); diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java index 4bde1e282fe78..32639bc3065da 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java @@ -191,7 +191,8 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing } } else { // Peer recovery - assert initializingShard(shardRouting, node.nodeId()).recoverySource().getType() == RecoverySource.Type.PEER; + assert initializingShard(shardRouting, node.nodeId()).recoverySource().getType() == RecoverySource.Type.PEER + || shardRouting.isSearchOnly(); if (shardRouting.unassignedReasonIndexCreated()) { return allocateInitialShardCopies(shardRouting, node, allocation); @@ -204,7 +205,6 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing private Decision allocateInitialShardCopies(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { int currentInRecoveries = allocation.routingNodes().getInitialIncomingRecoveries(node.nodeId()); assert shardRouting.unassignedReasonIndexCreated() && !shardRouting.primary(); - return allocateShardCopies( shardRouting, allocation, @@ -212,7 +212,8 @@ private Decision allocateInitialShardCopies(ShardRouting shardRouting, RoutingNo replicasInitialRecoveries, this::getInitialPrimaryNodeOutgoingRecoveries, replicasInitialRecoveries, - true + true, + node ); } @@ -228,7 +229,8 @@ private Decision allocateNonInitialShardCopies(ShardRouting shardRouting, Routin concurrentIncomingRecoveries, this::getPrimaryNodeOutgoingRecoveries, concurrentOutgoingRecoveries, - false + false, + node ); } @@ -249,7 +251,8 @@ private Decision allocateShardCopies( int inRecoveriesLimit, BiFunction primaryNodeOutRecoveriesFunc, int outRecoveriesLimit, - boolean isInitialShardCopies + boolean isInitialShardCopies, + RoutingNode candidateNode ) { // Allocating a shard to this node will increase the incoming recoveries if (currentInRecoveries >= inRecoveriesLimit) { @@ -274,6 +277,16 @@ private Decision allocateShardCopies( ); } } else { + // if this is a search shard that recovers from remote store, ignore outgoing recovery limits. + if (shardRouting.isSearchOnly() && candidateNode.node().isRemoteStoreNode()) { + return allocation.decision( + YES, + NAME, + "Remote based search replica below incoming recovery limit: [%d < %d]", + currentInRecoveries, + inRecoveriesLimit + ); + } // search for corresponding recovery source (= primary shard) and check number of outgoing recoveries on that node ShardRouting primaryShard = allocation.routingNodes().activePrimary(shardRouting.shardId()); if (primaryShard == null) { @@ -319,6 +332,10 @@ private Decision allocateShardCopies( } } + private static boolean isRemoteStoreNode(ShardRouting shardRouting, RoutingAllocation allocation) { + return allocation.nodes().getNodes().get(shardRouting.currentNodeId()).isRemoteStoreNode(); + } + /** * The shard routing passed to {@link #canAllocate(ShardRouting, RoutingNode, RoutingAllocation)} is not the initializing shard to this * node but: @@ -357,9 +374,18 @@ private ShardRouting initializingShard(ShardRouting shardRouting, String current @Override public Decision canMoveAway(ShardRouting shardRouting, RoutingAllocation allocation) { int outgoingRecoveries = 0; - if (!shardRouting.primary() && !shardRouting.isSearchOnly()) { + if (!shardRouting.primary()) { ShardRouting primaryShard = allocation.routingNodes().activePrimary(shardRouting.shardId()); - outgoingRecoveries = allocation.routingNodes().getOutgoingRecoveries(primaryShard.currentNodeId()); + if (primaryShard != null) { + outgoingRecoveries = allocation.routingNodes().getOutgoingRecoveries(primaryShard.currentNodeId()); + } else { + assert shardRouting.isSearchOnly(); + // check if the moving away search replica is using remote store, if not + // throw an error as the primary it will use for recovery is not active. + if (isRemoteStoreNode(shardRouting, allocation) == false) { + return allocation.decision(Decision.NO, NAME, "primary shard for this replica is not yet active"); + } + } } else { outgoingRecoveries = allocation.routingNodes().getOutgoingRecoveries(shardRouting.currentNodeId()); } diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java index d0b6f812e9ee2..6489f3cb33ce0 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java @@ -233,6 +233,13 @@ public ClusterState state() { return clusterState; } + /** + * Returns true if the appliedClusterState is not null + */ + public boolean isStateInitialised() { + return this.state.get() != null; + } + /** * Returns true if the appliedClusterState is not null */ diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java index 1a79161d223e2..b4f2250f6dec9 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java @@ -183,6 +183,13 @@ public ClusterState state() { return clusterApplierService.state(); } + /** + * Returns true if the state in appliedClusterState is not null + */ + public boolean isStateInitialised() { + return clusterApplierService.isStateInitialised(); + } + /** * The state that is persisted to store but may not be applied to cluster. * @return ClusterState diff --git a/server/src/main/java/org/opensearch/common/network/NetworkModule.java b/server/src/main/java/org/opensearch/common/network/NetworkModule.java index 2bde920d7d6af..3683882540d52 100644 --- a/server/src/main/java/org/opensearch/common/network/NetworkModule.java +++ b/server/src/main/java/org/opensearch/common/network/NetworkModule.java @@ -80,6 +80,9 @@ import java.util.function.Supplier; import java.util.stream.Collectors; +import static org.opensearch.plugins.NetworkPlugin.AuxTransport.AUX_TRANSPORT_TYPES_KEY; +import static org.opensearch.plugins.NetworkPlugin.AuxTransport.AUX_TRANSPORT_TYPES_SETTING; + /** * A module to handle registering and binding all network related classes. * @@ -157,6 +160,8 @@ public final class NetworkModule { private final Map> transportFactories = new HashMap<>(); private final Map> transportHttpFactories = new HashMap<>(); + private final Map> transportAuxFactories = new HashMap<>(); + private final List transportInterceptors = new ArrayList<>(); /** @@ -222,6 +227,18 @@ public NetworkModule( registerHttpTransport(entry.getKey(), entry.getValue()); } + Map> auxTransportFactory = plugin.getAuxTransports( + settings, + threadPool, + circuitBreakerService, + networkService, + clusterSettings, + tracer + ); + for (Map.Entry> entry : auxTransportFactory.entrySet()) { + registerAuxTransport(entry.getKey(), entry.getValue()); + } + Map> transportFactory = plugin.getTransports( settings, threadPool, @@ -305,6 +322,12 @@ private void registerHttpTransport(String key, Supplier fac } } + private void registerAuxTransport(String key, Supplier factory) { + if (transportAuxFactories.putIfAbsent(key, factory) != null) { + throw new IllegalArgumentException("transport for name: " + key + " is already registered"); + } + } + /** * Register an allocation command. *

@@ -346,6 +369,25 @@ public Supplier getHttpServerTransportSupplier() { return factory; } + /** + * Optional client/server transports that run in parallel to HttpServerTransport. + * Multiple transport types can be registered and enabled via AUX_TRANSPORT_TYPES_SETTING. + * An IllegalStateException is thrown if a transport type is enabled not registered. + */ + public List getAuxServerTransportList() { + List serverTransportSuppliers = new ArrayList<>(); + + for (String transportType : AUX_TRANSPORT_TYPES_SETTING.get(settings)) { + final Supplier factory = transportAuxFactories.get(transportType); + if (factory == null) { + throw new IllegalStateException("Unsupported " + AUX_TRANSPORT_TYPES_KEY + " [" + transportType + "]"); + } + serverTransportSuppliers.add(factory.get()); + } + + return serverTransportSuppliers; + } + public Supplier getTransportSupplier() { final String name; if (TRANSPORT_TYPE_SETTING.exists(settings)) { diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index aac53a645342c..92dfd96531877 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -150,6 +150,7 @@ import org.opensearch.node.resource.tracker.ResourceTrackerSettings; import org.opensearch.persistent.PersistentTasksClusterService; import org.opensearch.persistent.decider.EnableAssignmentDecider; +import org.opensearch.plugins.NetworkPlugin; import org.opensearch.plugins.PluginsService; import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlSettings; import org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings; @@ -362,6 +363,7 @@ public void apply(Settings value, Settings current, Settings previous) { NetworkModule.TRANSPORT_SSL_DUAL_MODE_ENABLED, NetworkModule.TRANSPORT_SSL_ENFORCE_HOSTNAME_VERIFICATION, NetworkModule.TRANSPORT_SSL_ENFORCE_HOSTNAME_VERIFICATION_RESOLVE_HOST_NAME, + NetworkPlugin.AuxTransport.AUX_TRANSPORT_TYPES_SETTING, HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS, HttpTransportSettings.SETTING_CORS_ENABLED, HttpTransportSettings.SETTING_CORS_MAX_AGE, @@ -786,7 +788,6 @@ public void apply(Settings value, Settings current, Settings previous) { // Snapshot related Settings BlobStoreRepository.SNAPSHOT_SHARD_PATH_PREFIX_SETTING, - BlobStoreRepository.SNAPSHOT_ASYNC_DELETION_ENABLE_SETTING, BlobStoreRepository.SNAPSHOT_REPOSITORY_DATA_CACHE_THRESHOLD, // Composite index settings diff --git a/server/src/main/java/org/opensearch/common/time/DateUtils.java b/server/src/main/java/org/opensearch/common/time/DateUtils.java index 7ab395a1117e7..e5a019b58f7da 100644 --- a/server/src/main/java/org/opensearch/common/time/DateUtils.java +++ b/server/src/main/java/org/opensearch/common/time/DateUtils.java @@ -272,6 +272,30 @@ public static Instant clampToNanosRange(Instant instant) { return instant; } + static final Instant INSTANT_LONG_MIN_VALUE = Instant.ofEpochMilli(Long.MIN_VALUE); + static final Instant INSTANT_LONG_MAX_VALUE = Instant.ofEpochMilli(Long.MAX_VALUE); + + /** + * Clamps the given {@link Instant} to the valid epoch millisecond range. + * + * - If the input is before {@code Long.MIN_VALUE}, it returns {@code Instant.ofEpochMilli(Long.MIN_VALUE)}. + * - If the input is after {@code Long.MAX_VALUE}, it returns {@code Instant.ofEpochMilli(Long.MAX_VALUE)}. + * - Otherwise, it returns the input as-is. + * + * @param instant the {@link Instant} to clamp + * @return the clamped {@link Instant} + * @throws NullPointerException if the input is {@code null} + */ + public static Instant clampToMillisRange(Instant instant) { + if (instant.isBefore(INSTANT_LONG_MIN_VALUE)) { + return INSTANT_LONG_MIN_VALUE; + } + if (instant.isAfter(INSTANT_LONG_MAX_VALUE)) { + return INSTANT_LONG_MAX_VALUE; + } + return instant; + } + /** * convert a long value to a java time instant * the long value resembles the nanoseconds since the epoch diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index 7c65516132b50..fb6a14ec7d081 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -1473,8 +1473,22 @@ public ClusterState getClusterStateForManifest( try { ClusterState stateFromCache = remoteClusterStateCache.getState(clusterName, manifest); if (stateFromCache != null) { + logger.trace( + () -> new ParameterizedMessage( + "Found cluster state in cache for term {} and version {}", + manifest.getClusterTerm(), + manifest.getStateVersion() + ) + ); return stateFromCache; } + logger.info( + () -> new ParameterizedMessage( + "Cluster state not found in cache for term {} and version {}", + manifest.getClusterTerm(), + manifest.getStateVersion() + ) + ); final ClusterState clusterState; final long startTimeNanos = relativeTimeNanosSupplier.getAsLong(); diff --git a/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java b/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java index 991fbf12072be..7f78ae0b9d2ff 100644 --- a/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java +++ b/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java @@ -62,6 +62,7 @@ import org.opensearch.telemetry.tracing.channels.TraceableRestChannel; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.BindTransportException; +import org.opensearch.transport.Transport; import java.io.IOException; import java.net.InetAddress; @@ -71,7 +72,6 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -192,7 +192,25 @@ protected void bindServer() { throw new BindTransportException("Failed to resolve publish address", e); } - final int publishPort = resolvePublishPort(settings, boundAddresses, publishInetAddress); + final int publishPort = Transport.resolveTransportPublishPort( + SETTING_HTTP_PUBLISH_PORT.get(settings), + boundAddresses, + publishInetAddress + ); + if (publishPort < 0) { + throw new BindHttpException( + "Failed to auto-resolve http publish port, multiple bound addresses " + + boundAddresses + + " with distinct ports and none of them matched the publish address (" + + publishInetAddress + + "). " + + "Please specify a unique port by setting " + + SETTING_HTTP_PORT.getKey() + + " or " + + SETTING_HTTP_PUBLISH_PORT.getKey() + ); + } + TransportAddress publishAddress = new TransportAddress(new InetSocketAddress(publishInetAddress, publishPort)); this.boundAddress = new BoundTransportAddress(boundAddresses.toArray(new TransportAddress[0]), publishAddress); logger.info("{}", boundAddress); @@ -258,47 +276,6 @@ protected void doClose() {} */ protected abstract void stopInternal(); - // package private for tests - static int resolvePublishPort(Settings settings, List boundAddresses, InetAddress publishInetAddress) { - int publishPort = SETTING_HTTP_PUBLISH_PORT.get(settings); - - if (publishPort < 0) { - for (TransportAddress boundAddress : boundAddresses) { - InetAddress boundInetAddress = boundAddress.address().getAddress(); - if (boundInetAddress.isAnyLocalAddress() || boundInetAddress.equals(publishInetAddress)) { - publishPort = boundAddress.getPort(); - break; - } - } - } - - // if no matching boundAddress found, check if there is a unique port for all bound addresses - if (publishPort < 0) { - final Set ports = new HashSet<>(); - for (TransportAddress boundAddress : boundAddresses) { - ports.add(boundAddress.getPort()); - } - if (ports.size() == 1) { - publishPort = ports.iterator().next(); - } - } - - if (publishPort < 0) { - throw new BindHttpException( - "Failed to auto-resolve http publish port, multiple bound addresses " - + boundAddresses - + " with distinct ports and none of them matched the publish address (" - + publishInetAddress - + "). " - + "Please specify a unique port by setting " - + SETTING_HTTP_PORT.getKey() - + " or " - + SETTING_HTTP_PUBLISH_PORT.getKey() - ); - } - return publishPort; - } - public void onException(HttpChannel channel, Exception e) { channel.handleException(e); if (lifecycle.started() == false) { diff --git a/server/src/main/java/org/opensearch/http/HttpTracer.java b/server/src/main/java/org/opensearch/http/HttpTracer.java index de1da4a20e294..e31cca21f6a54 100644 --- a/server/src/main/java/org/opensearch/http/HttpTracer.java +++ b/server/src/main/java/org/opensearch/http/HttpTracer.java @@ -116,10 +116,11 @@ void traceResponse( ) { logger.trace( new ParameterizedMessage( - "[{}][{}][{}][{}][{}] sent response to [{}] success [{}]", + "[{}][{}][{}][{}][{}][{}] sent response to [{}] success [{}]", requestId, opaqueHeader, restResponse.status(), + restResponse.status().getStatus(), restResponse.contentType(), contentLength, httpChannel, diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeQueryHelper.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeQueryHelper.java index e538be5d5bece..e46cf6f56b36e 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeQueryHelper.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeQueryHelper.java @@ -152,7 +152,7 @@ private static MetricStat validateStarTreeMetricSupport( MetricStat metricStat = ((MetricAggregatorFactory) aggregatorFactory).getMetricStat(); field = ((MetricAggregatorFactory) aggregatorFactory).getField(); - if (supportedMetrics.containsKey(field) && supportedMetrics.get(field).contains(metricStat)) { + if (field != null && supportedMetrics.containsKey(field) && supportedMetrics.get(field).contains(metricStat)) { return metricStat; } } diff --git a/server/src/main/java/org/opensearch/index/fielddata/LongToSortedNumericUnsignedLongValues.java b/server/src/main/java/org/opensearch/index/fielddata/LongToSortedNumericUnsignedLongValues.java new file mode 100644 index 0000000000000..eb8d8f1667218 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/fielddata/LongToSortedNumericUnsignedLongValues.java @@ -0,0 +1,55 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.fielddata; + +import org.apache.lucene.index.SortedNumericDocValues; + +import java.io.IOException; + +/** + * Wraps long-based {@link SortedNumericDocValues} as unsigned long ones + * (primarily used by {@link org.opensearch.search.MultiValueMode} + * + * @opensearch.internal + */ +public final class LongToSortedNumericUnsignedLongValues extends SortedNumericUnsignedLongValues { + private final SortedNumericDocValues values; + + public LongToSortedNumericUnsignedLongValues(SortedNumericDocValues values) { + this.values = values; + } + + @Override + public boolean advanceExact(int target) throws IOException { + return values.advanceExact(target); + } + + @Override + public long nextValue() throws IOException { + return values.nextValue(); + } + + @Override + public int docValueCount() { + return values.docValueCount(); + } + + public int advance(int target) throws IOException { + return values.advance(target); + } + + public int docID() { + return values.docID(); + } + + /** Return the wrapped values. */ + public SortedNumericDocValues getNumericUnsignedLongValues() { + return values; + } +} diff --git a/server/src/main/java/org/opensearch/index/fielddata/SortedNumericUnsignedLongValues.java b/server/src/main/java/org/opensearch/index/fielddata/SortedNumericUnsignedLongValues.java new file mode 100644 index 0000000000000..fa4c5152b9f90 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/fielddata/SortedNumericUnsignedLongValues.java @@ -0,0 +1,62 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.fielddata; + +import org.apache.lucene.index.SortedNumericDocValues; +import org.opensearch.common.annotation.PublicApi; + +import java.io.IOException; + +/** + * Clone of {@link SortedNumericDocValues} for unsigned long values. + * + * @opensearch.api + */ +@PublicApi(since = "2.19.0") +public abstract class SortedNumericUnsignedLongValues { + + /** Sole constructor. (For invocation by subclass + * constructors, typically implicit.) */ + protected SortedNumericUnsignedLongValues() {} + + /** Advance the iterator to exactly {@code target} and return whether + * {@code target} has a value. + * {@code target} must be greater than or equal to the current + * doc ID and must be a valid doc ID, ie. ≥ 0 and + * < {@code maxDoc}.*/ + public abstract boolean advanceExact(int target) throws IOException; + + /** + * Iterates to the next value in the current document. Do not call this more than + * {@link #docValueCount} times for the document. + */ + public abstract long nextValue() throws IOException; + + /** + * Retrieves the number of values for the current document. This must always + * be greater than zero. + * It is illegal to call this method after {@link #advanceExact(int)} + * returned {@code false}. + */ + public abstract int docValueCount(); + + /** + * Advances to the first beyond the current whose document number is greater than or equal to + * target, and returns the document number itself. Exhausts the iterator and returns {@link + * org.apache.lucene.search.DocIdSetIterator#NO_MORE_DOCS} if target is greater than the highest document number in the set. + * + * This method is being used by {@link org.apache.lucene.search.comparators.NumericComparator.NumericLeafComparator} when point values optimization kicks + * in and is implemented by most numeric types. + */ + public int advance(int target) throws IOException { + throw new UnsupportedOperationException(); + } + + public abstract int docID(); +} diff --git a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/UnsignedLongValuesComparatorSource.java b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/UnsignedLongValuesComparatorSource.java index 9db5817450cd0..6fc85bd0b2689 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/UnsignedLongValuesComparatorSource.java +++ b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/UnsignedLongValuesComparatorSource.java @@ -10,7 +10,6 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NumericDocValues; -import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.LeafFieldComparator; @@ -24,6 +23,8 @@ import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexNumericFieldData; import org.opensearch.index.fielddata.LeafNumericFieldData; +import org.opensearch.index.fielddata.LongToSortedNumericUnsignedLongValues; +import org.opensearch.index.fielddata.SortedNumericUnsignedLongValues; import org.opensearch.index.search.comparators.UnsignedLongComparator; import org.opensearch.search.DocValueFormat; import org.opensearch.search.MultiValueMode; @@ -57,14 +58,13 @@ public SortField.Type reducedType() { return SortField.Type.LONG; } - private SortedNumericDocValues loadDocValues(LeafReaderContext context) { + private SortedNumericUnsignedLongValues loadDocValues(LeafReaderContext context) { final LeafNumericFieldData data = indexFieldData.load(context); - SortedNumericDocValues values = data.getLongValues(); - return values; + return new LongToSortedNumericUnsignedLongValues(data.getLongValues()); } private NumericDocValues getNumericDocValues(LeafReaderContext context, BigInteger missingValue) throws IOException { - final SortedNumericDocValues values = loadDocValues(context); + final SortedNumericUnsignedLongValues values = loadDocValues(context); if (nested == null) { return FieldData.replaceMissing(sortMode.select(values), missingValue); } diff --git a/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java index aece93b90aa18..11b38c429dd8e 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java @@ -123,7 +123,7 @@ public enum Resolution { MILLISECONDS(CONTENT_TYPE, NumericType.DATE) { @Override public long convert(Instant instant) { - return instant.toEpochMilli(); + return clampToValidRange(instant).toEpochMilli(); } @Override @@ -133,7 +133,7 @@ public Instant toInstant(long value) { @Override public Instant clampToValidRange(Instant instant) { - return instant; + return DateUtils.clampToMillisRange(instant); } @Override diff --git a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java index 0ccdb40f9d33a..7339ed29db345 100644 --- a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java @@ -15,7 +15,6 @@ import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.Term; -import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.FieldExistsQuery; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.Query; @@ -336,23 +335,17 @@ private KeywordFieldType valueFieldType() { return (mappedFieldTypeName == null) ? valueFieldType : valueAndPathFieldType; } + @Override + public Query termQueryCaseInsensitive(Object value, QueryShardContext context) { + return valueFieldType().termQueryCaseInsensitive(rewriteValue(inputToString(value)), context); + } + /** * redirect queries with rewrite value to rewriteSearchValue and directSubFieldName */ @Override public Query termQuery(Object value, @Nullable QueryShardContext context) { - - String searchValueString = inputToString(value); - String directSubFieldName = directSubfield(); - String rewriteSearchValue = rewriteValue(searchValueString); - - failIfNotIndexed(); - Query query; - query = new TermQuery(new Term(directSubFieldName, indexedValueForSearch(rewriteSearchValue))); - if (boost() != 1f) { - query = new BoostQuery(query, boost()); - } - return query; + return valueFieldType().termQuery(rewriteValue(inputToString(value)), context); } @Override diff --git a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java index 54a1aead5fcc7..43f21aebaa47c 100644 --- a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java @@ -39,6 +39,7 @@ import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.Term; +import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.MultiTermQuery; @@ -391,6 +392,46 @@ protected Object rewriteForDocValue(Object value) { return value; } + @Override + public Query termQueryCaseInsensitive(Object value, QueryShardContext context) { + failIfNotIndexedAndNoDocValues(); + if (isSearchable()) { + return super.termQueryCaseInsensitive(value, context); + } else { + BytesRef bytesRef = indexedValueForSearch(rewriteForDocValue(value)); + Term term = new Term(name(), bytesRef); + Query query = AutomatonQueries.createAutomatonQuery( + term, + AutomatonQueries.toCaseInsensitiveString(bytesRef.utf8ToString(), Operations.DEFAULT_DETERMINIZE_WORK_LIMIT), + MultiTermQuery.DOC_VALUES_REWRITE + ); + if (boost() != 1f) { + query = new BoostQuery(query, boost()); + } + return query; + } + } + + @Override + public Query termQuery(Object value, QueryShardContext context) { + failIfNotIndexedAndNoDocValues(); + if (isSearchable()) { + return super.termQuery(value, context); + } else { + Query query = SortedSetDocValuesField.newSlowRangeQuery( + name(), + indexedValueForSearch(rewriteForDocValue(value)), + indexedValueForSearch(rewriteForDocValue(value)), + true, + true + ); + if (boost() != 1f) { + query = new BoostQuery(query, boost()); + } + return query; + } + } + @Override public Query termsQuery(List values, QueryShardContext context) { failIfNotIndexedAndNoDocValues(); diff --git a/server/src/main/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapper.java index fb97f8c309a70..757de65248d33 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapper.java @@ -16,6 +16,7 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.MultiPhraseQuery; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; @@ -290,6 +291,16 @@ public Query phrasePrefixQuery(TokenStream stream, int slop, int maxExpansions, return new SourceFieldMatchQuery(builder.build(), phrasePrefixQuery, this, context); } + @Override + public Query termQuery(Object value, QueryShardContext context) { + return new ConstantScoreQuery(super.termQuery(value, context)); + } + + @Override + public Query termQueryCaseInsensitive(Object value, QueryShardContext context) { + return new ConstantScoreQuery(super.termQueryCaseInsensitive(value, context)); + } + private List> getTermsFromTokenStream(TokenStream stream) throws IOException { final List> termArray = new ArrayList<>(); TermToBytesRefAttribute termAtt = stream.getAttribute(TermToBytesRefAttribute.class); diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java index 9bffaedcbf482..80275f27bbcb9 100644 --- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java @@ -633,6 +633,7 @@ public synchronized void renewPeerRecoveryRetentionLeases() { */ final boolean renewalNeeded = StreamSupport.stream(routingTable.spliterator(), false) .filter(ShardRouting::assignedToNode) + .filter(r -> r.isSearchOnly() == false) .anyMatch(shardRouting -> { final RetentionLease retentionLease = retentionLeases.get(getPeerRecoveryRetentionLeaseId(shardRouting)); if (retentionLease == null) { diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index baa1351f15cda..670fe8563d51c 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -1631,6 +1631,22 @@ public org.apache.lucene.util.Version minimumCompatibleVersion() { return luceneVersion == null ? indexSettings.getIndexVersionCreated().luceneVersion : luceneVersion; } + /** + * Fetches the last remote uploaded segment metadata file + * @return {@link RemoteSegmentMetadata} + * @throws IOException + */ + public RemoteSegmentMetadata fetchLastRemoteUploadedSegmentMetadata() throws IOException { + if (!indexSettings.isAssignedOnRemoteNode()) { + throw new IllegalStateException("Index is not assigned on Remote Node"); + } + RemoteSegmentMetadata lastUploadedMetadata = getRemoteDirectory().readLatestMetadataFile(); + if (lastUploadedMetadata == null) { + throw new FileNotFoundException("No metadata file found in remote store"); + } + return lastUploadedMetadata; + } + /** * Creates a new {@link IndexCommit} snapshot from the currently running engine. All resources referenced by this * commit won't be freed until the commit / snapshot is closed. @@ -2531,22 +2547,24 @@ public void openEngineAndRecoverFromTranslog(boolean syncFromRemote) throws IOEx */ public void openEngineAndSkipTranslogRecovery() throws IOException { assert routingEntry().recoverySource().getType() == RecoverySource.Type.PEER : "not a peer recovery [" + routingEntry() + "]"; - recoveryState.validateCurrentStage(RecoveryState.Stage.TRANSLOG); - loadGlobalCheckpointToReplicationTracker(); - innerOpenEngineAndTranslog(replicationTracker); - getEngine().skipTranslogRecovery(); + openEngineAndSkipTranslogRecovery(true); } public void openEngineAndSkipTranslogRecoveryFromSnapshot() throws IOException { - assert routingEntry().recoverySource().getType() == RecoverySource.Type.SNAPSHOT : "not a snapshot recovery [" - + routingEntry() - + "]"; + assert routingEntry().isSearchOnly() || routingEntry().recoverySource().getType() == RecoverySource.Type.SNAPSHOT + : "not a snapshot recovery [" + routingEntry() + "]"; recoveryState.validateCurrentStage(RecoveryState.Stage.INDEX); maybeCheckIndex(); recoveryState.setStage(RecoveryState.Stage.TRANSLOG); + openEngineAndSkipTranslogRecovery(routingEntry().isSearchOnly()); + } + + void openEngineAndSkipTranslogRecovery(boolean syncFromRemote) throws IOException { recoveryState.validateCurrentStage(RecoveryState.Stage.TRANSLOG); loadGlobalCheckpointToReplicationTracker(); - innerOpenEngineAndTranslog(replicationTracker, false); + innerOpenEngineAndTranslog(replicationTracker, syncFromRemote); + assert routingEntry().isSearchOnly() == false || translogStats().estimatedNumberOfOperations() == 0 + : "Translog is expected to be empty but holds " + translogStats().estimatedNumberOfOperations() + "Operations."; getEngine().skipTranslogRecovery(); } @@ -2896,7 +2914,8 @@ public void recoverFromLocalShards( public void recoverFromStore(ActionListener listener) { // we are the first primary, recover from the gateway // if its post api allocation, the index should exists - assert shardRouting.primary() : "recover from store only makes sense if the shard is a primary shard"; + assert shardRouting.primary() || shardRouting.isSearchOnly() + : "recover from store only makes sense if the shard is a primary shard or an untracked search only replica"; assert shardRouting.initializing() : "can only start recovery on initializing shard"; StoreRecovery storeRecovery = new StoreRecovery(shardId, logger); storeRecovery.recoverFromStore(this, listener); diff --git a/server/src/main/java/org/opensearch/index/shard/ReplicationGroup.java b/server/src/main/java/org/opensearch/index/shard/ReplicationGroup.java index ccfaf50da1c6b..b2db48737ee3f 100644 --- a/server/src/main/java/org/opensearch/index/shard/ReplicationGroup.java +++ b/server/src/main/java/org/opensearch/index/shard/ReplicationGroup.java @@ -67,15 +67,17 @@ public ReplicationGroup( this.inSyncAllocationIds = inSyncAllocationIds; this.trackedAllocationIds = trackedAllocationIds; this.version = version; - this.unavailableInSyncShards = Sets.difference(inSyncAllocationIds, routingTable.getAllAllocationIds()); this.replicationTargets = new ArrayList<>(); this.skippedShards = new ArrayList<>(); for (final ShardRouting shard : routingTable) { - // search only replicas never receive any replicated operations if (shard.unassigned() || shard.isSearchOnly()) { assert shard.primary() == false : "primary shard should not be unassigned in a replication group: " + shard; skippedShards.add(shard); + if (shard.isSearchOnly()) { + assert shard.allocationId() == null || inSyncAllocationIds.contains(shard.allocationId().getId()) == false + : " Search replicas should not be part of the inSync id set"; + } } else { if (trackedAllocationIds.contains(shard.allocationId().getId())) { replicationTargets.add(shard); diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java index 494fe0dbef803..e9da9d2159b17 100644 --- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java @@ -545,7 +545,7 @@ private boolean canRecover(IndexShard indexShard) { // got closed on us, just ignore this recovery return false; } - if (indexShard.routingEntry().primary() == false) { + if (indexShard.routingEntry().primary() == false && indexShard.routingEntry().isSearchOnly() == false) { throw new IndexShardRecoveryException(shardId, "Trying to recover when the shard is in backup state", null); } return true; @@ -748,7 +748,17 @@ private void internalRecoverFromStore(IndexShard indexShard) throws IndexShardRe writeEmptyRetentionLeasesFile(indexShard); indexShard.recoveryState().getIndex().setFileDetailsComplete(); } - indexShard.openEngineAndRecoverFromTranslog(); + if (indexShard.routingEntry().isSearchOnly() == false) { + indexShard.openEngineAndRecoverFromTranslog(); + } else { + // Opens the engine for pull based replica copies that are + // not primary eligible. This will skip any checkpoint tracking and ensure + // that the shards are sync'd with remote store before opening. + // + // first bootstrap new history / translog so that the TranslogUUID matches the UUID from the latest commit. + bootstrapForSnapshot(indexShard, store); + indexShard.openEngineAndSkipTranslogRecoveryFromSnapshot(); + } if (indexShard.shouldSeedRemoteStore()) { indexShard.getThreadPool().executor(ThreadPool.Names.GENERIC).execute(() -> { logger.info("Attempting to seed Remote Store via local recovery for {}", indexShard.shardId()); @@ -879,6 +889,7 @@ private void bootstrap(final IndexShard indexShard, final Store store) throws IO store.bootstrapNewHistory(); final SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo(); final long localCheckpoint = Long.parseLong(segmentInfos.userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)); + final String translogUUID = Translog.createEmptyTranslog( indexShard.shardPath().resolveTranslog(), localCheckpoint, diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index ee28d416cf5ee..b84be7742394c 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -1212,6 +1212,9 @@ protected Node( SearchExecutionStatsCollector.makeWrapper(responseCollectorService) ); final HttpServerTransport httpServerTransport = newHttpTransport(networkModule); + + pluginComponents.addAll(newAuxTransports(networkModule)); + final IndexingPressureService indexingPressureService = new IndexingPressureService(settings, clusterService); // Going forward, IndexingPressureService will have required constructs for exposing listeners/interfaces for plugin // development. Then we can deprecate Getter and Setter for IndexingPressureService in ClusterService (#478). @@ -2107,6 +2110,10 @@ protected HttpServerTransport newHttpTransport(NetworkModule networkModule) { return networkModule.getHttpServerTransportSupplier().get(); } + protected List newAuxTransports(NetworkModule networkModule) { + return networkModule.getAuxServerTransportList(); + } + private static class LocalNodeFactory implements Function { private final SetOnce localNode = new SetOnce<>(); private final String persistentNodeId; diff --git a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java index c1c041ce01198..fb97cf40d90d6 100644 --- a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java +++ b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java @@ -21,6 +21,7 @@ import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; import org.opensearch.repositories.RepositoryException; +import org.opensearch.repositories.RepositoryMissingException; import org.opensearch.threadpool.ThreadPool; import java.util.ArrayList; @@ -183,6 +184,20 @@ public RepositoriesMetadata updateRepositoriesMetadata(DiscoveryNode joiningNode boolean repositoryAlreadyPresent = false; for (RepositoryMetadata existingRepositoryMetadata : existingRepositories.repositories()) { if (newRepositoryMetadata.name().equals(existingRepositoryMetadata.name())) { + try { + // This is to handle cases where-in the during a previous node-join attempt if the publish operation succeeded + // but the commit operation failed, the cluster-state may have the repository metadata which is not applied + // into the repository service. This may lead to assertion failures down the line. + repositoriesService.get().repository(newRepositoryMetadata.name()); + } catch (RepositoryMissingException e) { + logger.warn( + "Skipping repositories metadata checks: Remote repository [{}] is in the cluster state but not present " + + "in the repository service.", + newRepositoryMetadata.name() + ); + break; + } + try { // This will help in handling two scenarios - // 1. When a fresh cluster is formed and a node tries to join the cluster, the repository diff --git a/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java b/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java index 138ef6f71280d..516aa94534f94 100644 --- a/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java @@ -31,9 +31,13 @@ package org.opensearch.plugins; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.common.transport.PortsRange; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.common.util.concurrent.ThreadContext; @@ -49,8 +53,12 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.function.Function; import java.util.function.Supplier; +import static java.util.Collections.emptyList; +import static org.opensearch.common.settings.Setting.affixKeySetting; + /** * Plugin for extending network and transport related classes * @@ -58,6 +66,49 @@ */ public interface NetworkPlugin { + /** + * Auxiliary transports are lifecycle components with an associated port range. + * These pluggable client/server transport implementations have their lifecycle managed by Node. + * + * Auxiliary transports are additionally defined by a port range on which they bind. Opening permissions on these + * ports is awkward as {@link org.opensearch.bootstrap.Security} is configured previous to Node initialization during + * bootstrap. To allow pluggable AuxTransports access to configurable port ranges we require the port range be provided + * through an {@link org.opensearch.common.settings.Setting.AffixSetting} of the form 'AUX_SETTINGS_PREFIX.{aux-transport-key}.ports'. + */ + abstract class AuxTransport extends AbstractLifecycleComponent { + public static final String AUX_SETTINGS_PREFIX = "aux.transport."; + public static final String AUX_TRANSPORT_TYPES_KEY = AUX_SETTINGS_PREFIX + "types"; + public static final String AUX_PORT_DEFAULTS = "9400-9500"; + public static final Setting.AffixSetting AUX_TRANSPORT_PORTS = affixKeySetting( + AUX_SETTINGS_PREFIX, + "ports", + key -> new Setting<>(key, AUX_PORT_DEFAULTS, PortsRange::new, Setting.Property.NodeScope) + ); + + public static final Setting> AUX_TRANSPORT_TYPES_SETTING = Setting.listSetting( + AUX_TRANSPORT_TYPES_KEY, + emptyList(), + Function.identity(), + Setting.Property.NodeScope + ); + } + + /** + * Auxiliary transports are optional and run in parallel to the default HttpServerTransport. + * Returns a map of AuxTransport suppliers. + */ + @ExperimentalApi + default Map> getAuxTransports( + Settings settings, + ThreadPool threadPool, + CircuitBreakerService circuitBreakerService, + NetworkService networkService, + ClusterSettings clusterSettings, + Tracer tracer + ) { + return Collections.emptyMap(); + } + /** * Returns a list of {@link TransportInterceptor} instances that are used to intercept incoming and outgoing * transport (inter-node) requests. This must not return null diff --git a/server/src/main/java/org/opensearch/plugins/PluginInfo.java b/server/src/main/java/org/opensearch/plugins/PluginInfo.java index 7cf24d7902095..b57fd5eb93a19 100644 --- a/server/src/main/java/org/opensearch/plugins/PluginInfo.java +++ b/server/src/main/java/org/opensearch/plugins/PluginInfo.java @@ -86,6 +86,8 @@ public class PluginInfo implements Writeable, ToXContentObject { private final String classname; private final String customFolderName; private final List extendedPlugins; + // Optional extended plugins are a subset of extendedPlugins that only contains the optional extended plugins + private final List optionalExtendedPlugins; private final boolean hasNativeController; /** @@ -149,7 +151,11 @@ public PluginInfo( this.javaVersion = javaVersion; this.classname = classname; this.customFolderName = customFolderName; - this.extendedPlugins = Collections.unmodifiableList(extendedPlugins); + this.extendedPlugins = extendedPlugins.stream().map(s -> s.split(";")[0]).collect(Collectors.toUnmodifiableList()); + this.optionalExtendedPlugins = extendedPlugins.stream() + .filter(PluginInfo::isOptionalExtension) + .map(s -> s.split(";")[0]) + .collect(Collectors.toUnmodifiableList()); this.hasNativeController = hasNativeController; } @@ -207,12 +213,22 @@ public PluginInfo(final StreamInput in) throws IOException { this.javaVersion = in.readString(); this.classname = in.readString(); if (in.getVersion().onOrAfter(Version.V_1_1_0)) { - customFolderName = in.readString(); + this.customFolderName = in.readString(); } else { - customFolderName = this.name; + this.customFolderName = this.name; } - extendedPlugins = in.readStringList(); - hasNativeController = in.readBoolean(); + this.extendedPlugins = in.readStringList(); + this.hasNativeController = in.readBoolean(); + if (in.getVersion().onOrAfter(Version.V_2_19_0)) { + this.optionalExtendedPlugins = in.readStringList(); + } else { + this.optionalExtendedPlugins = new ArrayList<>(); + } + } + + static boolean isOptionalExtension(String extendedPlugin) { + String[] dependency = extendedPlugin.split(";"); + return dependency.length > 1 && "optional=true".equals(dependency[1]); } @Override @@ -240,6 +256,9 @@ This works for currently supported range notations (=,~) } out.writeStringCollection(extendedPlugins); out.writeBoolean(hasNativeController); + if (out.getVersion().onOrAfter(Version.V_2_19_0)) { + out.writeStringCollection(optionalExtendedPlugins); + } } /** @@ -422,8 +441,17 @@ public String getFolderName() { * * @return the names of the plugins extended */ + public boolean isExtendedPluginOptional(String extendedPlugin) { + return optionalExtendedPlugins.contains(extendedPlugin); + } + + /** + * Other plugins this plugin extends through SPI + * + * @return the names of the plugins extended + */ public List getExtendedPlugins() { - return extendedPlugins; + return extendedPlugins.stream().map(s -> s.split(";")[0]).collect(Collectors.toUnmodifiableList()); } /** @@ -498,6 +526,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("custom_foldername", customFolderName); builder.field("extended_plugins", extendedPlugins); builder.field("has_native_controller", hasNativeController); + builder.field("optional_extended_plugins", optionalExtendedPlugins); } builder.endObject(); diff --git a/server/src/main/java/org/opensearch/plugins/PluginsService.java b/server/src/main/java/org/opensearch/plugins/PluginsService.java index f08c9c738f1b4..9bc1f1334122e 100644 --- a/server/src/main/java/org/opensearch/plugins/PluginsService.java +++ b/server/src/main/java/org/opensearch/plugins/PluginsService.java @@ -524,7 +524,13 @@ private static void addSortedBundle( for (String dependency : bundle.plugin.getExtendedPlugins()) { Bundle depBundle = bundles.get(dependency); if (depBundle == null) { - throw new IllegalArgumentException("Missing plugin [" + dependency + "], dependency of [" + name + "]"); + if (bundle.plugin.isExtendedPluginOptional(dependency)) { + logger.warn("Missing plugin [" + dependency + "], dependency of [" + name + "]"); + logger.warn("Some features of this plugin may not function without the dependencies being installed.\n"); + continue; + } else { + throw new IllegalArgumentException("Missing plugin [" + dependency + "], dependency of [" + name + "]"); + } } addSortedBundle(depBundle, bundles, sortedBundles, dependencyStack); assert sortedBundles.contains(depBundle); @@ -653,6 +659,9 @@ static void checkBundleJarHell(Set classpath, Bundle bundle, Map urls = new HashSet<>(); for (String extendedPlugin : exts) { Set pluginUrls = transitiveUrls.get(extendedPlugin); + if (pluginUrls == null && bundle.plugin.isExtendedPluginOptional(extendedPlugin)) { + continue; + } assert pluginUrls != null : "transitive urls should have already been set for " + extendedPlugin; Set intersection = new HashSet<>(urls); @@ -704,6 +713,10 @@ private Plugin loadBundle(Bundle bundle, Map loaded) { List extendedLoaders = new ArrayList<>(); for (String extendedPluginName : bundle.plugin.getExtendedPlugins()) { Plugin extendedPlugin = loaded.get(extendedPluginName); + if (extendedPlugin == null && bundle.plugin.isExtendedPluginOptional(extendedPluginName)) { + // extended plugin is optional and is not installed + continue; + } assert extendedPlugin != null; if (ExtensiblePlugin.class.isInstance(extendedPlugin) == false) { throw new IllegalStateException("Plugin [" + name + "] cannot extend non-extensible plugin [" + extendedPluginName + "]"); diff --git a/server/src/main/java/org/opensearch/repositories/RepositoriesService.java b/server/src/main/java/org/opensearch/repositories/RepositoriesService.java index 4583b26ccfd92..a964e663adefa 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoriesService.java @@ -80,6 +80,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -918,6 +919,12 @@ public void ensureValidSystemRepositoryUpdate(RepositoryMetadata newRepositoryMe Settings newRepositoryMetadataSettings = newRepositoryMetadata.settings(); Settings currentRepositoryMetadataSettings = currentRepositoryMetadata.settings(); + assert Objects.nonNull(repository) : String.format( + Locale.ROOT, + "repository [%s] not present in RepositoryService", + currentRepositoryMetadata.name() + ); + List restrictedSettings = repository.getRestrictedSystemRepositorySettings() .stream() .map(setting -> setting.getKey()) diff --git a/server/src/main/java/org/opensearch/repositories/Repository.java b/server/src/main/java/org/opensearch/repositories/Repository.java index 3c24d1965744a..64a64869df7eb 100644 --- a/server/src/main/java/org/opensearch/repositories/Repository.java +++ b/server/src/main/java/org/opensearch/repositories/Repository.java @@ -429,6 +429,45 @@ default void snapshotRemoteStoreIndexShard( throw new UnsupportedOperationException(); } + /** + * Adds a reference of remote store data for a index commit point. + *

+ * The index commit point can be obtained by using {@link org.opensearch.index.engine.Engine#acquireLastIndexCommit} method. + * Or for closed index can be obtained by reading last remote uploaded metadata by using {@link org.opensearch.index.shard.IndexShard#fetchLastRemoteUploadedSegmentMetadata()} method. + * Repository implementations shouldn't release the snapshot index commit point. It is done by the method caller. + *

+ * As snapshot process progresses, implementation of this method should update {@link IndexShardSnapshotStatus} object and check + * {@link IndexShardSnapshotStatus#isAborted()} to see if the snapshot process should be aborted. + * @param store store to be snapshotted + * @param snapshotId snapshot id + * @param indexId id for the index being snapshotted + * @param snapshotIndexCommit commit point + * @param shardStateIdentifier a unique identifier of the state of the shard that is stored with the shard's snapshot and used + * to detect if the shard has changed between snapshots. If {@code null} is passed as the identifier + * snapshotting will be done by inspecting the physical files referenced by {@code snapshotIndexCommit} + * @param snapshotStatus snapshot status + * @param primaryTerm current Primary Term + * @param commitGeneration current commit generation + * @param startTime start time of the snapshot commit, this will be used as the start time for snapshot. + * @param indexFilesToFileLengthMap map of index files to file length + * @param listener listener invoked on completion + */ + default void snapshotRemoteStoreIndexShard( + Store store, + SnapshotId snapshotId, + IndexId indexId, + @Nullable IndexCommit snapshotIndexCommit, + @Nullable String shardStateIdentifier, + IndexShardSnapshotStatus snapshotStatus, + long primaryTerm, + long commitGeneration, + long startTime, + @Nullable Map indexFilesToFileLengthMap, + ActionListener listener + ) { + throw new UnsupportedOperationException(); + } + /** * Restores snapshot of the shard. *

diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index 8fc4401857b68..deee023e2939d 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -50,7 +50,6 @@ import org.opensearch.action.ActionRunnable; import org.opensearch.action.StepListener; import org.opensearch.action.support.GroupedActionListener; -import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.RepositoryCleanupInProgress; @@ -70,7 +69,6 @@ import org.opensearch.common.Randomness; import org.opensearch.common.SetOnce; import org.opensearch.common.UUIDs; -import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; @@ -429,16 +427,6 @@ protected static long calculateMaxWithinIntLimit(long defaultThresholdOfHeap, lo Setting.Property.Final ); - /** - * Controls the fixed prefix for the snapshot shard blob path. cluster.snapshot.async-deletion.enable - */ - public static final Setting SNAPSHOT_ASYNC_DELETION_ENABLE_SETTING = Setting.boolSetting( - "cluster.snapshot.async-deletion.enable", - true, - Setting.Property.NodeScope, - Setting.Property.Dynamic - ); - protected volatile boolean supportURLRepo; private volatile int maxShardBlobDeleteBatch; @@ -532,8 +520,6 @@ protected static long calculateMaxWithinIntLimit(long defaultThresholdOfHeap, lo private final String snapshotShardPathPrefix; - private volatile boolean enableAsyncDeletion; - protected final long repositoryDataCacheThreshold; /** @@ -591,8 +577,6 @@ protected BlobStoreRepository( this.recoverySettings = recoverySettings; this.remoteStoreSettings = new RemoteStoreSettings(clusterService.getSettings(), clusterService.getClusterSettings()); this.snapshotShardPathPrefix = SNAPSHOT_SHARD_PATH_PREFIX_SETTING.get(clusterService.getSettings()); - this.enableAsyncDeletion = SNAPSHOT_ASYNC_DELETION_ENABLE_SETTING.get(clusterService.getSettings()); - clusterService.getClusterSettings().addSettingsUpdateConsumer(SNAPSHOT_ASYNC_DELETION_ENABLE_SETTING, this::setEnableAsyncDeletion); this.repositoryDataCacheThreshold = SNAPSHOT_REPOSITORY_DATA_CACHE_THRESHOLD.get(clusterService.getSettings()).getBytes(); } @@ -2293,15 +2277,7 @@ private void executeOneStaleIndexDelete( private DeleteResult deleteContainer(BlobContainer container) throws IOException { long startTime = System.nanoTime(); - DeleteResult deleteResult; - if (enableAsyncDeletion && container instanceof AsyncMultiStreamBlobContainer) { - // Use deleteAsync and wait for the result - PlainActionFuture future = new PlainActionFuture<>(); - ((AsyncMultiStreamBlobContainer) container).deleteAsync(future); - deleteResult = future.actionGet(); - } else { - deleteResult = container.delete(); - } + DeleteResult deleteResult = container.delete(); logger.debug(new ParameterizedMessage("[{}] Deleted {} in {}ns", metadata.name(), container.path(), startTime - System.nanoTime())); return deleteResult; } @@ -2961,13 +2937,7 @@ public IndexMetadata getSnapshotIndexMetaData(RepositoryData repositoryData, Sna private void deleteFromContainer(BlobContainer container, List blobs) throws IOException { logger.trace(() -> new ParameterizedMessage("[{}] Deleting {} from [{}]", metadata.name(), blobs, container.path())); long startTime = System.nanoTime(); - if (enableAsyncDeletion && container instanceof AsyncMultiStreamBlobContainer) { - PlainActionFuture future = new PlainActionFuture<>(); - ((AsyncMultiStreamBlobContainer) container).deleteBlobsAsyncIgnoringIfNotExists(blobs, future); - future.actionGet(); - } else { - container.deleteBlobsIgnoringIfNotExists(blobs); - } + container.deleteBlobsIgnoringIfNotExists(blobs); logger.debug( () -> new ParameterizedMessage( "[{}] Deletion {} from [{}] took {}ns", @@ -3093,7 +3063,11 @@ public String startVerification() { */ private BlobContainer testContainer(String seed) { BlobPath testBlobPath; - if (prefixModeVerification == true) { + if (prefixModeVerification == true + && (clusterService.isStateInitialised() == false + || clusterService.state().nodes().getMinNodeVersion().onOrAfter(Version.V_2_17_0))) { + // During the remote store node bootstrap, the cluster state is not initialised + // Otherwise, the cluster state is initialised and available with the min node version information BasePathInput pathInput = BasePathInput.builder().basePath(basePath()).indexUUID(seed).build(); testBlobPath = PathType.HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); } else { @@ -3839,6 +3813,33 @@ private void writeAtomic(BlobContainer container, final String blobName, final B } } + @Override + public void snapshotRemoteStoreIndexShard( + Store store, + SnapshotId snapshotId, + IndexId indexId, + IndexCommit snapshotIndexCommit, + @Nullable String shardStateIdentifier, + IndexShardSnapshotStatus snapshotStatus, + long primaryTerm, + long startTime, + ActionListener listener + ) { + snapshotRemoteStoreIndexShard( + store, + snapshotId, + indexId, + snapshotIndexCommit, + shardStateIdentifier, + snapshotStatus, + primaryTerm, + snapshotIndexCommit.getGeneration(), + startTime, + null, + listener + ); + } + @Override public void snapshotRemoteStoreIndexShard( Store store, @@ -3848,13 +3849,16 @@ public void snapshotRemoteStoreIndexShard( String shardStateIdentifier, IndexShardSnapshotStatus snapshotStatus, long primaryTerm, + long commitGeneration, long startTime, + Map indexFilesToFileLengthMap, ActionListener listener ) { if (isReadOnly()) { listener.onFailure(new RepositoryException(metadata.name(), "cannot snapshot shard on a readonly repository")); return; } + final ShardId shardId = store.shardId(); try { final String generation = snapshotStatus.generation(); @@ -3862,13 +3866,21 @@ public void snapshotRemoteStoreIndexShard( final BlobContainer shardContainer = shardContainer(indexId, shardId); long indexTotalFileSize = 0; - // local store is being used here to fetch the files metadata instead of remote store as currently - // remote store is mirroring the local store. - List fileNames = new ArrayList<>(snapshotIndexCommit.getFileNames()); - Store.MetadataSnapshot commitSnapshotMetadata = store.getMetadata(snapshotIndexCommit); - for (String fileName : fileNames) { - indexTotalFileSize += commitSnapshotMetadata.get(fileName).length(); + List fileNames; + + if (snapshotIndexCommit != null) { + // local store is being used here to fetch the files metadata instead of remote store as currently + // remote store is mirroring the local store. + fileNames = new ArrayList<>(snapshotIndexCommit.getFileNames()); + Store.MetadataSnapshot commitSnapshotMetadata = store.getMetadata(snapshotIndexCommit); + for (String fileName : fileNames) { + indexTotalFileSize += commitSnapshotMetadata.get(fileName).length(); + } + } else { + fileNames = new ArrayList<>(indexFilesToFileLengthMap.keySet()); + indexTotalFileSize = indexFilesToFileLengthMap.values().stream().mapToLong(Long::longValue).sum(); } + int indexTotalNumberOfFiles = fileNames.size(); snapshotStatus.moveToStarted( @@ -3879,7 +3891,7 @@ public void snapshotRemoteStoreIndexShard( indexTotalFileSize ); - final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.moveToFinalize(snapshotIndexCommit.getGeneration()); + final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.moveToFinalize(commitGeneration); // now create and write the commit point logger.trace("[{}] [{}] writing shard snapshot file", shardId, snapshotId); @@ -3890,7 +3902,7 @@ public void snapshotRemoteStoreIndexShard( snapshotId.getName(), lastSnapshotStatus.getIndexVersion(), primaryTerm, - snapshotIndexCommit.getGeneration(), + commitGeneration, lastSnapshotStatus.getStartTime(), threadPool.absoluteTimeInMillis() - lastSnapshotStatus.getStartTime(), indexTotalNumberOfFiles, @@ -4890,8 +4902,4 @@ public String toString() { return name; } } - - public void setEnableAsyncDeletion(boolean enableAsyncDeletion) { - this.enableAsyncDeletion = enableAsyncDeletion; - } } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterSearchShardsAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterSearchShardsAction.java index 3555576433104..304d1cabefd35 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterSearchShardsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterSearchShardsAction.java @@ -40,6 +40,7 @@ import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.RestToXContentListener; +import org.opensearch.search.builder.SearchSourceBuilder; import java.io.IOException; import java.util.List; @@ -81,6 +82,13 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC clusterSearchShardsRequest.routing(request.param("routing")); clusterSearchShardsRequest.preference(request.param("preference")); clusterSearchShardsRequest.indicesOptions(IndicesOptions.fromRequest(request, clusterSearchShardsRequest.indicesOptions())); + if (request.hasContentOrSourceParam()) { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.parseXContent(request.contentOrSourceParamParser()); + if (sourceBuilder.slice() != null) { + clusterSearchShardsRequest.slice(sourceBuilder.slice()); + } + } return channel -> client.admin().cluster().searchShards(clusterSearchShardsRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/opensearch/search/MultiValueMode.java b/server/src/main/java/org/opensearch/search/MultiValueMode.java index a99da674836f2..fa2e776eca67a 100644 --- a/server/src/main/java/org/opensearch/search/MultiValueMode.java +++ b/server/src/main/java/org/opensearch/search/MultiValueMode.java @@ -42,6 +42,7 @@ import org.apache.lucene.util.BitSet; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; +import org.opensearch.common.Numbers; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -50,9 +51,11 @@ import org.opensearch.index.fielddata.AbstractNumericDocValues; import org.opensearch.index.fielddata.AbstractSortedDocValues; import org.opensearch.index.fielddata.FieldData; +import org.opensearch.index.fielddata.LongToSortedNumericUnsignedLongValues; import org.opensearch.index.fielddata.NumericDoubleValues; import org.opensearch.index.fielddata.SortedBinaryDocValues; import org.opensearch.index.fielddata.SortedNumericDoubleValues; +import org.opensearch.index.fielddata.SortedNumericUnsignedLongValues; import java.io.IOException; import java.util.Locale; @@ -143,6 +146,44 @@ protected double pick( return totalCount > 0 ? totalValue : missingValue; } + + @Override + protected long pick(SortedNumericUnsignedLongValues values) throws IOException { + final int count = values.docValueCount(); + long total = 0; + for (int index = 0; index < count; ++index) { + total += values.nextValue(); + } + return total; + } + + @Override + protected long pick( + SortedNumericUnsignedLongValues values, + long missingValue, + DocIdSetIterator docItr, + int startDoc, + int endDoc, + int maxChildren + ) throws IOException { + int totalCount = 0; + long totalValue = 0; + int count = 0; + for (int doc = startDoc; doc < endDoc; doc = docItr.nextDoc()) { + if (values.advanceExact(doc)) { + if (++count > maxChildren) { + break; + } + + final int docCount = values.docValueCount(); + for (int index = 0; index < docCount; ++index) { + totalValue += values.nextValue(); + } + totalCount += docCount; + } + } + return totalCount > 0 ? totalValue : missingValue; + } }, /** @@ -228,6 +269,46 @@ protected double pick( } return totalValue / totalCount; } + + @Override + protected long pick(SortedNumericUnsignedLongValues values) throws IOException { + final int count = values.docValueCount(); + long total = 0; + for (int index = 0; index < count; ++index) { + total += values.nextValue(); + } + return count > 1 ? divideUnsignedAndRoundUp(total, count) : total; + } + + @Override + protected long pick( + SortedNumericUnsignedLongValues values, + long missingValue, + DocIdSetIterator docItr, + int startDoc, + int endDoc, + int maxChildren + ) throws IOException { + int totalCount = 0; + long totalValue = 0; + int count = 0; + for (int doc = startDoc; doc < endDoc; doc = docItr.nextDoc()) { + if (values.advanceExact(doc)) { + if (++count > maxChildren) { + break; + } + final int docCount = values.docValueCount(); + for (int index = 0; index < docCount; ++index) { + totalValue += values.nextValue(); + } + totalCount += docCount; + } + } + if (totalCount < 1) { + return missingValue; + } + return totalCount > 1 ? divideUnsignedAndRoundUp(totalValue, totalCount) : totalValue; + } }, /** @@ -259,6 +340,45 @@ protected double pick(SortedNumericDoubleValues values) throws IOException { return values.nextValue(); } } + + @Override + protected long pick(SortedNumericUnsignedLongValues values) throws IOException { + int count = values.docValueCount(); + long firstValue = values.nextValue(); + if (count == 1) { + return firstValue; + } else if (count == 2) { + long total = firstValue + values.nextValue(); + return (total >>> 1) + (total & 1); + } else if (firstValue >= 0) { + for (int i = 1; i < (count - 1) / 2; ++i) { + values.nextValue(); + } + if (count % 2 == 0) { + long total = values.nextValue() + values.nextValue(); + return (total >>> 1) + (total & 1); + } else { + return values.nextValue(); + } + } + + final long[] docValues = new long[count]; + docValues[0] = firstValue; + int firstPositiveIndex = 0; + for (int i = 1; i < count; ++i) { + docValues[i] = values.nextValue(); + if (docValues[i] >= 0 && firstPositiveIndex == 0) { + firstPositiveIndex = i; + } + } + final int mid = ((count - 1) / 2 + firstPositiveIndex) % count; + if (count % 2 == 0) { + long total = docValues[mid] + docValues[(mid + 1) % count]; + return (total >>> 1) + (total & 1); + } else { + return docValues[mid]; + } + } }, /** @@ -382,6 +502,47 @@ protected int pick(SortedDocValues values, DocIdSetIterator docItr, int startDoc return hasValue ? ord : -1; } + + @Override + protected long pick(SortedNumericUnsignedLongValues values) throws IOException { + final int count = values.docValueCount(); + final long min = values.nextValue(); + if (count == 1 || min > 0) { + return min; + } + for (int i = 1; i < count; ++i) { + long val = values.nextValue(); + if (val >= 0) { + return val; + } + } + return min; + } + + @Override + protected long pick( + SortedNumericUnsignedLongValues values, + long missingValue, + DocIdSetIterator docItr, + int startDoc, + int endDoc, + int maxChildren + ) throws IOException { + boolean hasValue = false; + long minValue = Numbers.MAX_UNSIGNED_LONG_VALUE_AS_LONG; + int count = 0; + for (int doc = startDoc; doc < endDoc; doc = docItr.nextDoc()) { + if (values.advanceExact(doc)) { + if (++count > maxChildren) { + break; + } + final long docMin = pick(values); + minValue = Long.compareUnsigned(docMin, minValue) < 0 ? docMin : minValue; + hasValue = true; + } + } + return hasValue ? minValue : missingValue; + } }, /** @@ -525,6 +686,46 @@ protected int pick(SortedDocValues values, DocIdSetIterator docItr, int startDoc } return ord; } + + @Override + protected long pick(SortedNumericUnsignedLongValues values) throws IOException { + final int count = values.docValueCount(); + long max = values.nextValue(); + long val; + for (int i = 1; i < count; ++i) { + val = values.nextValue(); + if (max < 0 && val >= 0) { + return max; + } + max = val; + } + return max; + } + + @Override + protected long pick( + SortedNumericUnsignedLongValues values, + long missingValue, + DocIdSetIterator docItr, + int startDoc, + int endDoc, + int maxChildren + ) throws IOException { + boolean hasValue = false; + long maxValue = Numbers.MIN_UNSIGNED_LONG_VALUE_AS_LONG; + int count = 0; + for (int doc = startDoc; doc < endDoc; doc = docItr.nextDoc()) { + if (values.advanceExact(doc)) { + if (++count > maxChildren) { + break; + } + final long docMax = pick(values); + maxValue = Long.compareUnsigned(maxValue, docMax) < 0 ? docMax : maxValue; + hasValue = true; + } + } + return hasValue ? maxValue : missingValue; + } }; /** @@ -1032,6 +1233,126 @@ protected int pick(SortedDocValues values, DocIdSetIterator docItr, int startDoc throw new IllegalArgumentException("Unsupported sort mode: " + this); } + /** + * Return a {@link NumericDoubleValues} instance that can be used to sort documents + * with this mode and the provided values. When a document has no value, + * missingValue is returned. + *

+ * Allowed Modes: SUM, AVG, MEDIAN, MIN, MAX + */ + public NumericDocValues select(final SortedNumericUnsignedLongValues values) { + SortedNumericDocValues sortedNumericDocValues = null; + if (values instanceof LongToSortedNumericUnsignedLongValues) { + sortedNumericDocValues = ((LongToSortedNumericUnsignedLongValues) values).getNumericUnsignedLongValues(); + } + + final NumericDocValues singleton = DocValues.unwrapSingleton(sortedNumericDocValues); + if (singleton != null) { + return singleton; + } else { + return new AbstractNumericDocValues() { + + private long value; + + @Override + public boolean advanceExact(int target) throws IOException { + if (values.advanceExact(target)) { + value = pick(values); + return true; + } + return false; + } + + @Override + public int docID() { + return values.docID(); + } + + @Override + public long longValue() throws IOException { + return value; + } + }; + } + } + + protected long pick(SortedNumericUnsignedLongValues values) throws IOException { + throw new IllegalArgumentException("Unsupported sort mode: " + this); + } + + /** + * Return a {@link SortedDocValues} instance that can be used to sort root documents + * with this mode, the provided values and filters for root/inner documents. + *

+ * For every root document, the values of its inner documents will be aggregated. + *

+ * Allowed Modes: MIN, MAX + *

+ * NOTE: Calling the returned instance on docs that are not root docs is illegal + * The returned instance can only be evaluate the current and upcoming docs + */ + public NumericDocValues select( + final SortedNumericUnsignedLongValues values, + final long missingValue, + final BitSet parentDocs, + final DocIdSetIterator childDocs, + int maxDoc, + int maxChildren + ) throws IOException { + if (parentDocs == null || childDocs == null) { + return FieldData.replaceMissing(DocValues.emptyNumeric(), missingValue); + } + + return new AbstractNumericDocValues() { + + int lastSeenParentDoc = -1; + long lastEmittedValue = missingValue; + + @Override + public boolean advanceExact(int parentDoc) throws IOException { + assert parentDoc >= lastSeenParentDoc : "can only evaluate current and upcoming parent docs"; + if (parentDoc == lastSeenParentDoc) { + return true; + } else if (parentDoc == 0) { + lastEmittedValue = missingValue; + return true; + } + final int prevParentDoc = parentDocs.prevSetBit(parentDoc - 1); + final int firstChildDoc; + if (childDocs.docID() > prevParentDoc) { + firstChildDoc = childDocs.docID(); + } else { + firstChildDoc = childDocs.advance(prevParentDoc + 1); + } + + lastSeenParentDoc = parentDoc; + lastEmittedValue = pick(values, missingValue, childDocs, firstChildDoc, parentDoc, maxChildren); + return true; + } + + @Override + public int docID() { + return lastSeenParentDoc; + } + + @Override + public long longValue() { + return lastEmittedValue; + } + }; + } + + protected long pick( + SortedNumericUnsignedLongValues values, + long missingValue, + DocIdSetIterator docItr, + int startDoc, + int endDoc, + int maxChildren + ) throws IOException { + throw new IllegalArgumentException("Unsupported sort mode: " + this); + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeEnum(this); @@ -1040,4 +1361,16 @@ public void writeTo(StreamOutput out) throws IOException { public static MultiValueMode readMultiValueModeFrom(StreamInput in) throws IOException { return in.readEnum(MultiValueMode.class); } + + /** + * Copied from {@link Long#divideUnsigned(long, long)} and {@link Long#remainderUnsigned(long, long)} + */ + private static long divideUnsignedAndRoundUp(long dividend, long divisor) { + assert divisor > 0; + final long q = (dividend >>> 1) / divisor << 1; + final long r = dividend - q * divisor; + final long quotient = q + ((r | ~(r - divisor)) >>> (Long.SIZE - 1)); + final long rem = r - ((~(r - divisor) >> (Long.SIZE - 1)) & divisor); + return quotient + Math.round((double) rem / divisor); + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java index 743d0023364fa..e4a454ee64609 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java @@ -32,6 +32,7 @@ package org.opensearch.search.aggregations.bucket.adjacency; +import org.opensearch.Version; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -71,7 +72,10 @@ public class AdjacencyMatrixAggregationBuilder extends AbstractAggregationBuilde private static final ParseField SEPARATOR_FIELD = new ParseField("separator"); private static final ParseField FILTERS_FIELD = new ParseField("filters"); + private static final ParseField SHOW_ONLY_INTERSECTING = new ParseField("show_only_intersecting"); + private List filters; + private boolean showOnlyIntersecting = false; private String separator = DEFAULT_SEPARATOR; private static final ObjectParser PARSER = ObjectParser.fromBuilder( @@ -81,6 +85,10 @@ public class AdjacencyMatrixAggregationBuilder extends AbstractAggregationBuilde static { PARSER.declareString(AdjacencyMatrixAggregationBuilder::separator, SEPARATOR_FIELD); PARSER.declareNamedObjects(AdjacencyMatrixAggregationBuilder::setFiltersAsList, KeyedFilter.PARSER, FILTERS_FIELD); + PARSER.declareBoolean( + AdjacencyMatrixAggregationBuilder::setShowOnlyIntersecting, + AdjacencyMatrixAggregationBuilder.SHOW_ONLY_INTERSECTING + ); } public static AggregationBuilder parse(XContentParser parser, String name) throws IOException { @@ -115,6 +123,7 @@ protected AdjacencyMatrixAggregationBuilder( super(clone, factoriesBuilder, metadata); this.filters = new ArrayList<>(clone.filters); this.separator = clone.separator; + this.showOnlyIntersecting = clone.showOnlyIntersecting; } @Override @@ -138,6 +147,40 @@ public AdjacencyMatrixAggregationBuilder(String name, String separator, Map filters, boolean showOnlyIntersecting) { + this(name, DEFAULT_SEPARATOR, filters, showOnlyIntersecting); + } + + /** + * @param name + * the name of this aggregation + * @param separator + * the string used to separate keys in intersections buckets e.g. + * & character for keyed filters A and B would return an + * intersection bucket named A&B + * @param filters + * the filters and their key to use with this aggregation. + * @param showOnlyIntersecting + * show only the buckets that intersection multiple documents + */ + public AdjacencyMatrixAggregationBuilder( + String name, + String separator, + Map filters, + boolean showOnlyIntersecting + ) { + this(name, separator, filters); + this.showOnlyIntersecting = showOnlyIntersecting; + } + /** * Read from a stream. */ @@ -145,6 +188,9 @@ public AdjacencyMatrixAggregationBuilder(StreamInput in) throws IOException { super(in); int filtersSize = in.readVInt(); separator = in.readString(); + if (in.getVersion().onOrAfter(Version.V_2_19_0)) { + showOnlyIntersecting = in.readBoolean(); + } filters = new ArrayList<>(filtersSize); for (int i = 0; i < filtersSize; i++) { filters.add(new KeyedFilter(in)); @@ -155,6 +201,9 @@ public AdjacencyMatrixAggregationBuilder(StreamInput in) throws IOException { protected void doWriteTo(StreamOutput out) throws IOException { out.writeVInt(filters.size()); out.writeString(separator); + if (out.getVersion().onOrAfter(Version.V_2_19_0)) { + out.writeBoolean(showOnlyIntersecting); + } for (KeyedFilter keyedFilter : filters) { keyedFilter.writeTo(out); } @@ -185,6 +234,11 @@ private AdjacencyMatrixAggregationBuilder setFiltersAsList(List fil return this; } + public AdjacencyMatrixAggregationBuilder setShowOnlyIntersecting(boolean showOnlyIntersecting) { + this.showOnlyIntersecting = showOnlyIntersecting; + return this; + } + /** * Set the separator used to join pairs of bucket keys */ @@ -214,6 +268,10 @@ public Map filters() { return result; } + public boolean isShowOnlyIntersecting() { + return showOnlyIntersecting; + } + @Override protected AdjacencyMatrixAggregationBuilder doRewrite(QueryRewriteContext queryShardContext) throws IOException { boolean modified = false; @@ -224,7 +282,9 @@ protected AdjacencyMatrixAggregationBuilder doRewrite(QueryRewriteContext queryS rewrittenFilters.add(new KeyedFilter(kf.key(), rewritten)); } if (modified) { - return new AdjacencyMatrixAggregationBuilder(name).separator(separator).setFiltersAsList(rewrittenFilters); + return new AdjacencyMatrixAggregationBuilder(name).separator(separator) + .setFiltersAsList(rewrittenFilters) + .setShowOnlyIntersecting(showOnlyIntersecting); } return this; } @@ -245,7 +305,16 @@ protected AggregatorFactory doBuild(QueryShardContext queryShardContext, Aggrega + "] index level setting." ); } - return new AdjacencyMatrixAggregatorFactory(name, filters, separator, queryShardContext, parent, subFactoriesBuilder, metadata); + return new AdjacencyMatrixAggregatorFactory( + name, + filters, + showOnlyIntersecting, + separator, + queryShardContext, + parent, + subFactoriesBuilder, + metadata + ); } @Override @@ -257,7 +326,8 @@ public BucketCardinality bucketCardinality() { protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(SEPARATOR_FIELD.getPreferredName(), separator); - builder.startObject(AdjacencyMatrixAggregator.FILTERS_FIELD.getPreferredName()); + builder.field(SHOW_ONLY_INTERSECTING.getPreferredName(), showOnlyIntersecting); + builder.startObject(FILTERS_FIELD.getPreferredName()); for (KeyedFilter keyedFilter : filters) { builder.field(keyedFilter.key(), keyedFilter.filter()); } @@ -268,7 +338,7 @@ protected XContentBuilder internalXContent(XContentBuilder builder, Params param @Override public int hashCode() { - return Objects.hash(super.hashCode(), filters, separator); + return Objects.hash(super.hashCode(), filters, showOnlyIntersecting, separator); } @Override @@ -277,7 +347,9 @@ public boolean equals(Object obj) { if (obj == null || getClass() != obj.getClass()) return false; if (super.equals(obj) == false) return false; AdjacencyMatrixAggregationBuilder other = (AdjacencyMatrixAggregationBuilder) obj; - return Objects.equals(filters, other.filters) && Objects.equals(separator, other.separator); + return Objects.equals(filters, other.filters) + && Objects.equals(separator, other.separator) + && Objects.equals(showOnlyIntersecting, other.showOnlyIntersecting); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregator.java index ef1795f425240..f82ee9dc242fb 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregator.java @@ -36,7 +36,6 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; import org.opensearch.common.lucene.Lucene; -import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -70,8 +69,6 @@ */ public class AdjacencyMatrixAggregator extends BucketsAggregator { - public static final ParseField FILTERS_FIELD = new ParseField("filters"); - /** * A keyed filter * @@ -145,6 +142,8 @@ public boolean equals(Object obj) { private final String[] keys; private final Weight[] filters; + + private final boolean showOnlyIntersecting; private final int totalNumKeys; private final int totalNumIntersections; private final String separator; @@ -155,6 +154,7 @@ public AdjacencyMatrixAggregator( String separator, String[] keys, Weight[] filters, + boolean showOnlyIntersecting, SearchContext context, Aggregator parent, Map metadata @@ -163,6 +163,7 @@ public AdjacencyMatrixAggregator( this.separator = separator; this.keys = keys; this.filters = filters; + this.showOnlyIntersecting = showOnlyIntersecting; this.totalNumIntersections = ((keys.length * keys.length) - keys.length) / 2; this.totalNumKeys = keys.length + totalNumIntersections; } @@ -177,10 +178,12 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBuc return new LeafBucketCollectorBase(sub, null) { @Override public void collect(int doc, long bucket) throws IOException { - // Check each of the provided filters - for (int i = 0; i < bits.length; i++) { - if (bits[i].get(doc)) { - collectBucket(sub, doc, bucketOrd(bucket, i)); + if (!showOnlyIntersecting) { + // Check each of the provided filters + for (int i = 0; i < bits.length; i++) { + if (bits[i].get(doc)) { + collectBucket(sub, doc, bucketOrd(bucket, i)); + } } } // Check all the possible intersections of the provided filters @@ -229,7 +232,7 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I for (int i = 0; i < keys.length; i++) { long bucketOrd = bucketOrd(owningBucketOrds[owningBucketOrdIdx], i); long docCount = bucketDocCount(bucketOrd); - // Empty buckets are not returned because this aggregation will commonly be used under a + // Empty buckets are not returned because this aggregation will commonly be used under // a date-histogram where we will look for transactions over time and can expect many // empty buckets. if (docCount > 0) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorFactory.java index 99ffb563ba2a8..bae86f3fcdfc1 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorFactory.java @@ -57,11 +57,14 @@ public class AdjacencyMatrixAggregatorFactory extends AggregatorFactory { private final String[] keys; private final Weight[] weights; + + private final boolean showOnlyIntersecting; private final String separator; public AdjacencyMatrixAggregatorFactory( String name, List filters, + boolean showOnlyIntersecting, String separator, QueryShardContext queryShardContext, AggregatorFactory parent, @@ -79,6 +82,7 @@ public AdjacencyMatrixAggregatorFactory( Query filter = keyedFilter.filter().toQuery(queryShardContext); weights[i] = contextSearcher.createWeight(contextSearcher.rewrite(filter), ScoreMode.COMPLETE_NO_SCORES, 1f); } + this.showOnlyIntersecting = showOnlyIntersecting; } @Override @@ -88,7 +92,17 @@ public Aggregator createInternal( CardinalityUpperBound cardinality, Map metadata ) throws IOException { - return new AdjacencyMatrixAggregator(name, factories, separator, keys, weights, searchContext, parent, metadata); + return new AdjacencyMatrixAggregator( + name, + factories, + separator, + keys, + weights, + showOnlyIntersecting, + searchContext, + parent, + metadata + ); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregatorFactory.java index d862b2c2784de..41344fd06cbbc 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregatorFactory.java @@ -104,6 +104,6 @@ public String getStatsSubtype() { } public String getField() { - return config.fieldContext().field(); + return config.fieldContext() != null ? config.fieldContext().field() : null; } } diff --git a/server/src/main/java/org/opensearch/search/slice/SliceBuilder.java b/server/src/main/java/org/opensearch/search/slice/SliceBuilder.java index 78004d5086161..6f8ce5c2a252c 100644 --- a/server/src/main/java/org/opensearch/search/slice/SliceBuilder.java +++ b/server/src/main/java/org/opensearch/search/slice/SliceBuilder.java @@ -219,6 +219,15 @@ public int hashCode() { return Objects.hash(this.field, this.id, this.max); } + public boolean shardMatches(int shardOrdinal, int numShards) { + if (max >= numShards) { + // Slices are distributed over shards + return id % numShards == shardOrdinal; + } + // Shards are distributed over slices + return shardOrdinal % max == id; + } + /** * Converts this QueryBuilder to a lucene {@link Query}. * @@ -230,7 +239,7 @@ public Query toFilter(ClusterService clusterService, ShardSearchRequest request, throw new IllegalArgumentException("field " + field + " not found"); } - int shardId = request.shardId().id(); + int shardOrdinal = request.shardId().id(); int numShards = context.getIndexSettings().getNumberOfShards(); if ((request.preference() != null || request.indexRoutings().length > 0)) { GroupShardsIterator group = buildShardIterator(clusterService, request); @@ -246,21 +255,26 @@ public Query toFilter(ClusterService clusterService, ShardSearchRequest request, */ numShards = group.size(); int ord = 0; - shardId = -1; + shardOrdinal = -1; // remap the original shard id with its index (position) in the sorted shard iterator. for (ShardIterator it : group) { assert it.shardId().getIndex().equals(request.shardId().getIndex()); if (request.shardId().equals(it.shardId())) { - shardId = ord; + shardOrdinal = ord; break; } ++ord; } - assert shardId != -1 : "shard id: " + request.shardId().getId() + " not found in index shard routing"; + assert shardOrdinal != -1 : "shard id: " + request.shardId().getId() + " not found in index shard routing"; } } - String field = this.field; + if (shardMatches(shardOrdinal, numShards) == false) { + // We should have already excluded this shard before routing to it. + // If we somehow land here, then we match nothing. + return new MatchNoDocsQuery("this shard is not part of the slice"); + } + boolean useTermQuery = false; if ("_uid".equals(field)) { // on new indices, the _id acts as a _uid @@ -291,12 +305,7 @@ public Query toFilter(ClusterService clusterService, ShardSearchRequest request, // the number of slices is greater than the number of shards // in such case we can reduce the number of requested shards by slice - // first we check if the slice is responsible of this shard int targetShard = id % numShards; - if (targetShard != shardId) { - // the shard is not part of this slice, we can skip it. - return new MatchNoDocsQuery("this shard is not part of the slice"); - } // compute the number of slices where this shard appears int numSlicesInShard = max / numShards; int rest = max % numShards; @@ -315,14 +324,8 @@ public Query toFilter(ClusterService clusterService, ShardSearchRequest request, ? new TermsSliceQuery(field, shardSlice, numSlicesInShard) : new DocValuesSliceQuery(field, shardSlice, numSlicesInShard); } - // the number of shards is greater than the number of slices + // the number of shards is greater than the number of slices. If we target this shard, we target all of it. - // check if the shard is assigned to the slice - int targetSlice = shardId % max; - if (id != targetSlice) { - // the shard is not part of this slice, we can skip it. - return new MatchNoDocsQuery("this shard is not part of the slice"); - } return new MatchAllDocsQuery(); } @@ -335,6 +338,8 @@ private GroupShardsIterator buildShardIterator(ClusterService clu Map> routingMap = request.indexRoutings().length > 0 ? Collections.singletonMap(indices[0], Sets.newHashSet(request.indexRoutings())) : null; + // Note that we do *not* want to filter this set of shard IDs based on the slice, since we want the + // full set of shards matched by the routing parameters. return clusterService.operationRouting().searchShards(state, indices, routingMap, request.preference()); } diff --git a/server/src/main/java/org/opensearch/search/sort/SortedWiderNumericSortField.java b/server/src/main/java/org/opensearch/search/sort/SortedWiderNumericSortField.java index 10cc832fdb684..7f61b7cca3501 100644 --- a/server/src/main/java/org/opensearch/search/sort/SortedWiderNumericSortField.java +++ b/server/src/main/java/org/opensearch/search/sort/SortedWiderNumericSortField.java @@ -21,14 +21,18 @@ import org.apache.lucene.search.comparators.NumericComparator; import java.io.IOException; +import java.util.Comparator; /** - * Sorted numeric field for wider sort types, - * to help sorting two different numeric types. + * Sorted numeric field for wider sort types, to help sorting two different numeric types. + * NOTE: the unsigned_long is not supported by widening sort since the unsigned_long could not be used with other types * * @opensearch.internal */ public class SortedWiderNumericSortField extends SortedNumericSortField { + private final int byteCounts; + private final Comparator comparator; + /** * Creates a sort, possibly in reverse, specifying how the sort value from the document's set is * selected. @@ -39,6 +43,15 @@ public class SortedWiderNumericSortField extends SortedNumericSortField { */ public SortedWiderNumericSortField(String field, Type type, boolean reverse) { super(field, type, reverse); + if (type == Type.LONG) { + byteCounts = Long.BYTES; + comparator = Comparator.comparingLong(Number::longValue); + } else if (type == Type.DOUBLE) { + byteCounts = Double.BYTES; + comparator = Comparator.comparingDouble(Number::doubleValue); + } else { + throw new IllegalArgumentException("Unsupported numeric type: " + type); + } } /** @@ -51,7 +64,7 @@ public SortedWiderNumericSortField(String field, Type type, boolean reverse) { */ @Override public FieldComparator getComparator(int numHits, Pruning pruning) { - return new NumericComparator(getField(), (Number) getMissingValue(), getReverse(), pruning, Double.BYTES) { + return new NumericComparator(getField(), (Number) getMissingValue(), getReverse(), pruning, byteCounts) { @Override public int compare(int slot1, int slot2) { throw new UnsupportedOperationException(); @@ -78,7 +91,7 @@ public int compareValues(Number first, Number second) { } else if (second == null) { return 1; } else { - return Double.compare(first.doubleValue(), second.doubleValue()); + return comparator.compare(first, second); } } }; diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java index 1b189da13e92e..26f5cd51573a0 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java @@ -44,6 +44,7 @@ import org.opensearch.cluster.SnapshotsInProgress.ShardSnapshotStatus; import org.opensearch.cluster.SnapshotsInProgress.ShardState; import org.opensearch.cluster.SnapshotsInProgress.State; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; @@ -63,6 +64,7 @@ import org.opensearch.index.shard.IndexShardState; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import org.opensearch.index.snapshots.IndexShardSnapshotStatus.Stage; +import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.indices.IndicesService; import org.opensearch.repositories.IndexId; import org.opensearch.repositories.RepositoriesService; @@ -75,7 +77,6 @@ import org.opensearch.transport.TransportService; import java.io.IOException; -import java.nio.file.NoSuchFileException; import java.util.HashMap; import java.util.Iterator; import java.util.Map; @@ -377,7 +378,9 @@ private void snapshot( ActionListener listener ) { try { - final IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id()); + final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); + final IndexShard indexShard = indexService.getShardOrNull(shardId.id()); + final boolean closedIndex = indexService.getMetadata().getState() == IndexMetadata.State.CLOSE; if (indexShard.routingEntry().primary() == false) { throw new IndexShardSnapshotFailedException(shardId, "snapshot should be performed only on primary"); } @@ -404,24 +407,42 @@ private void snapshot( if (remoteStoreIndexShallowCopy && indexShard.indexSettings().isRemoteStoreEnabled()) { long startTime = threadPool.relativeTimeInMillis(); long primaryTerm = indexShard.getOperationPrimaryTerm(); - // we flush first to make sure we get the latest writes snapshotted - wrappedSnapshot = indexShard.acquireLastIndexCommitAndRefresh(true); - IndexCommit snapshotIndexCommit = wrappedSnapshot.get(); - long commitGeneration = snapshotIndexCommit.getGeneration(); + long commitGeneration = 0L; + Map indexFilesToFileLengthMap = null; + IndexCommit snapshotIndexCommit = null; + try { + if (closedIndex) { + RemoteSegmentMetadata lastRemoteUploadedIndexCommit = indexShard.fetchLastRemoteUploadedSegmentMetadata(); + indexFilesToFileLengthMap = lastRemoteUploadedIndexCommit.getMetadata() + .entrySet() + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().getLength())); + primaryTerm = lastRemoteUploadedIndexCommit.getPrimaryTerm(); + commitGeneration = lastRemoteUploadedIndexCommit.getGeneration(); + } else { + wrappedSnapshot = indexShard.acquireLastIndexCommitAndRefresh(true); + snapshotIndexCommit = wrappedSnapshot.get(); + commitGeneration = snapshotIndexCommit.getGeneration(); + } indexShard.acquireLockOnCommitData(snapshot.getSnapshotId().getUUID(), primaryTerm, commitGeneration); - } catch (NoSuchFileException e) { - wrappedSnapshot.close(); - logger.warn( - "Exception while acquiring lock on primaryTerm = {} and generation = {}", - primaryTerm, - commitGeneration - ); - indexShard.flush(new FlushRequest(shardId.getIndexName()).force(true)); - wrappedSnapshot = indexShard.acquireLastIndexCommit(false); - snapshotIndexCommit = wrappedSnapshot.get(); - commitGeneration = snapshotIndexCommit.getGeneration(); - indexShard.acquireLockOnCommitData(snapshot.getSnapshotId().getUUID(), primaryTerm, commitGeneration); + } catch (IOException e) { + if (closedIndex) { + logger.warn("Exception while reading latest metadata file from remote store"); + listener.onFailure(e); + } else { + wrappedSnapshot.close(); + logger.warn( + "Exception while acquiring lock on primaryTerm = {} and generation = {}", + primaryTerm, + commitGeneration + ); + indexShard.flush(new FlushRequest(shardId.getIndexName()).force(true)); + wrappedSnapshot = indexShard.acquireLastIndexCommit(false); + snapshotIndexCommit = wrappedSnapshot.get(); + commitGeneration = snapshotIndexCommit.getGeneration(); + indexShard.acquireLockOnCommitData(snapshot.getSnapshotId().getUUID(), primaryTerm, commitGeneration); + } } try { repository.snapshotRemoteStoreIndexShard( @@ -429,11 +450,13 @@ private void snapshot( snapshot.getSnapshotId(), indexId, snapshotIndexCommit, - getShardStateId(indexShard, snapshotIndexCommit), + null, snapshotStatus, primaryTerm, + commitGeneration, startTime, - ActionListener.runBefore(listener, wrappedSnapshot::close) + indexFilesToFileLengthMap, + closedIndex ? listener : ActionListener.runBefore(listener, wrappedSnapshot::close) ); } catch (IndexShardSnapshotFailedException e) { logger.error( diff --git a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java index ae1711012383b..97c1089330b97 100644 --- a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java @@ -198,7 +198,7 @@ public static ThreadPoolType fromType(String type) { map.put(Names.REMOTE_REFRESH_RETRY, ThreadPoolType.SCALING); map.put(Names.REMOTE_RECOVERY, ThreadPoolType.SCALING); map.put(Names.INDEX_SEARCHER, ThreadPoolType.FIXED_AUTO_QUEUE_SIZE); - map.put(Names.REMOTE_STATE_READ, ThreadPoolType.SCALING); + map.put(Names.REMOTE_STATE_READ, ThreadPoolType.FIXED); map.put(Names.REMOTE_STATE_CHECKSUM, ThreadPoolType.FIXED); THREAD_POOL_TYPES = Collections.unmodifiableMap(map); } @@ -317,7 +317,7 @@ public ThreadPool( ); builders.put( Names.REMOTE_STATE_READ, - new ScalingExecutorBuilder(Names.REMOTE_STATE_READ, 1, boundedBy(4 * allocatedProcessors, 4, 32), TimeValue.timeValueMinutes(5)) + new FixedExecutorBuilder(settings, Names.REMOTE_STATE_READ, boundedBy(4 * allocatedProcessors, 4, 32), 120000) ); builders.put( Names.INDEX_SEARCHER, diff --git a/server/src/main/java/org/opensearch/transport/TcpTransport.java b/server/src/main/java/org/opensearch/transport/TcpTransport.java index f56cd146ce953..f80a29872a78d 100644 --- a/server/src/main/java/org/opensearch/transport/TcpTransport.java +++ b/server/src/main/java/org/opensearch/transport/TcpTransport.java @@ -521,38 +521,8 @@ private BoundTransportAddress createBoundTransportAddress(ProfileSettings profil throw new BindTransportException("Failed to resolve publish address", e); } - final int publishPort = resolvePublishPort(profileSettings, boundAddresses, publishInetAddress); - final TransportAddress publishAddress = new TransportAddress(new InetSocketAddress(publishInetAddress, publishPort)); - return new BoundTransportAddress(transportBoundAddresses, publishAddress); - } - - // package private for tests - static int resolvePublishPort(ProfileSettings profileSettings, List boundAddresses, InetAddress publishInetAddress) { - int publishPort = profileSettings.publishPort; - - // if port not explicitly provided, search for port of address in boundAddresses that matches publishInetAddress - if (publishPort < 0) { - for (InetSocketAddress boundAddress : boundAddresses) { - InetAddress boundInetAddress = boundAddress.getAddress(); - if (boundInetAddress.isAnyLocalAddress() || boundInetAddress.equals(publishInetAddress)) { - publishPort = boundAddress.getPort(); - break; - } - } - } - - // if no matching boundAddress found, check if there is a unique port for all bound addresses - if (publishPort < 0) { - final Set ports = new HashSet<>(); - for (InetSocketAddress boundAddress : boundAddresses) { - ports.add(boundAddress.getPort()); - } - if (ports.size() == 1) { - publishPort = ports.iterator().next(); - } - } - - if (publishPort < 0) { + final int publishPort = Transport.resolvePublishPort(profileSettings.publishPort, boundAddresses, publishInetAddress); + if (publishPort == -1) { String profileExplanation = profileSettings.isDefaultProfile ? "" : " for profile " + profileSettings.profileName; throw new BindTransportException( "Failed to auto-resolve publish port" @@ -568,7 +538,9 @@ static int resolvePublishPort(ProfileSettings profileSettings, List boundAddresses, InetAddress publishInetAddress) { + if (publishPort < 0) { + for (InetSocketAddress boundAddress : boundAddresses) { + InetAddress boundInetAddress = boundAddress.getAddress(); + if (boundInetAddress.isAnyLocalAddress() || boundInetAddress.equals(publishInetAddress)) { + publishPort = boundAddress.getPort(); + break; + } + } + } + + if (publishPort < 0) { + final Set ports = new HashSet<>(); + for (InetSocketAddress boundAddress : boundAddresses) { + ports.add(boundAddress.getPort()); + } + if (ports.size() == 1) { + publishPort = ports.iterator().next(); + } + } + + return publishPort; + } + + static int resolveTransportPublishPort(int publishPort, List boundAddresses, InetAddress publishInetAddress) { + return Transport.resolvePublishPort( + publishPort, + boundAddresses.stream().map(TransportAddress::address).collect(Collectors.toList()), + publishInetAddress + ); + } + /** * A unidirectional connection to a {@link DiscoveryNode} * diff --git a/server/src/test/java/org/opensearch/action/admin/indices/replication/TransportSegmentReplicationStatsActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/replication/TransportSegmentReplicationStatsActionTests.java new file mode 100644 index 0000000000000..ea455d607f058 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/replication/TransportSegmentReplicationStatsActionTests.java @@ -0,0 +1,595 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.replication; + +import org.opensearch.Version; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlock; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.block.ClusterBlocks; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.routing.AllocationId; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.ShardIterator; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardsIterator; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.index.IndexService; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.SegmentReplicationPerGroupStats; +import org.opensearch.index.SegmentReplicationPressureService; +import org.opensearch.index.SegmentReplicationShardStats; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.SegmentReplicationState; +import org.opensearch.indices.replication.SegmentReplicationTargetService; +import org.opensearch.indices.replication.common.ReplicationLuceneIndex; +import org.opensearch.indices.replication.common.ReplicationTimer; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.transport.TransportService; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.EnumSet; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class TransportSegmentReplicationStatsActionTests extends OpenSearchTestCase { + @Mock + private ClusterService clusterService; + @Mock + private TransportService transportService; + @Mock + private IndicesService indicesService; + @Mock + private SegmentReplicationTargetService targetService; + @Mock + private ActionFilters actionFilters; + @Mock + private IndexNameExpressionResolver indexNameExpressionResolver; + @Mock + private SegmentReplicationPressureService pressureService; + @Mock + private IndexShard indexShard; + @Mock + private IndexService indexService; + + private TransportSegmentReplicationStatsAction action; + + @Before + public void setUp() throws Exception { + MockitoAnnotations.openMocks(this); + super.setUp(); + action = new TransportSegmentReplicationStatsAction( + clusterService, + transportService, + indicesService, + targetService, + actionFilters, + indexNameExpressionResolver, + pressureService + ); + } + + public void testShardReturnsAllTheShardsForTheIndex() { + SegmentReplicationStatsRequest segmentReplicationStatsRequest = mock(SegmentReplicationStatsRequest.class); + String[] concreteIndices = new String[] { "test-index" }; + ClusterState clusterState = mock(ClusterState.class); + RoutingTable routingTables = mock(RoutingTable.class); + ShardsIterator shardsIterator = mock(ShardIterator.class); + + when(clusterState.routingTable()).thenReturn(routingTables); + when(routingTables.allShardsIncludingRelocationTargets(any())).thenReturn(shardsIterator); + assertEquals(shardsIterator, action.shards(clusterState, segmentReplicationStatsRequest, concreteIndices)); + } + + public void testShardOperationWithPrimaryShard() { + ShardRouting shardRouting = mock(ShardRouting.class); + ShardId shardId = new ShardId(new Index("test-index", "test-uuid"), 0); + SegmentReplicationStatsRequest request = new SegmentReplicationStatsRequest(); + + when(shardRouting.shardId()).thenReturn(shardId); + when(shardRouting.primary()).thenReturn(true); + when(indicesService.indexServiceSafe(shardId.getIndex())).thenReturn(indexService); + when(indexService.getShard(shardId.id())).thenReturn(indexShard); + when(indexShard.indexSettings()).thenReturn(createIndexSettingsWithSegRepEnabled()); + + SegmentReplicationShardStatsResponse response = action.shardOperation(request, shardRouting); + + assertNotNull(response); + verify(pressureService).getStatsForShard(any()); + } + + public void testShardOperationWithReplicaShard() { + ShardRouting shardRouting = mock(ShardRouting.class); + ShardId shardId = new ShardId(new Index("test-index", "test-uuid"), 0); + SegmentReplicationStatsRequest request = new SegmentReplicationStatsRequest(); + request.activeOnly(false); + SegmentReplicationState completedSegmentReplicationState = mock(SegmentReplicationState.class); + + when(shardRouting.shardId()).thenReturn(shardId); + when(shardRouting.primary()).thenReturn(false); + when(indicesService.indexServiceSafe(shardId.getIndex())).thenReturn(indexService); + when(indexService.getShard(shardId.id())).thenReturn(indexShard); + when(indexShard.indexSettings()).thenReturn(createIndexSettingsWithSegRepEnabled()); + when(targetService.getSegmentReplicationState(shardId)).thenReturn(completedSegmentReplicationState); + + SegmentReplicationShardStatsResponse response = action.shardOperation(request, shardRouting); + + assertNotNull(response); + assertNull(response.getPrimaryStats()); + assertNotNull(response.getReplicaStats()); + verify(targetService).getSegmentReplicationState(shardId); + } + + public void testShardOperationWithReplicaShardActiveOnly() { + ShardRouting shardRouting = mock(ShardRouting.class); + ShardId shardId = new ShardId(new Index("test-index", "test-uuid"), 0); + SegmentReplicationStatsRequest request = new SegmentReplicationStatsRequest(); + request.activeOnly(true); + SegmentReplicationState onGoingSegmentReplicationState = mock(SegmentReplicationState.class); + + when(shardRouting.shardId()).thenReturn(shardId); + when(shardRouting.primary()).thenReturn(false); + when(indicesService.indexServiceSafe(shardId.getIndex())).thenReturn(indexService); + when(indexService.getShard(shardId.id())).thenReturn(indexShard); + when(indexShard.indexSettings()).thenReturn(createIndexSettingsWithSegRepEnabled()); + when(targetService.getOngoingEventSegmentReplicationState(shardId)).thenReturn(onGoingSegmentReplicationState); + + SegmentReplicationShardStatsResponse response = action.shardOperation(request, shardRouting); + + assertNotNull(response); + assertNull(response.getPrimaryStats()); + assertNotNull(response.getReplicaStats()); + verify(targetService).getOngoingEventSegmentReplicationState(shardId); + } + + public void testComputeBytesRemainingToReplicateWhenCompletedAndOngoingStateNotNull() { + ShardRouting shardRouting = mock(ShardRouting.class); + SegmentReplicationState completedSegmentReplicationState = mock(SegmentReplicationState.class); + SegmentReplicationState onGoingSegmentReplicationState = mock(SegmentReplicationState.class); + ShardId shardId = new ShardId(new Index("test-index", "test-uuid"), 0); + AllocationId allocationId = AllocationId.newInitializing(); + ReplicationTimer replicationTimerCompleted = mock(ReplicationTimer.class); + ReplicationTimer replicationTimerOngoing = mock(ReplicationTimer.class); + long time1 = 10; + long time2 = 15; + ReplicationLuceneIndex replicationLuceneIndex = new ReplicationLuceneIndex(); + replicationLuceneIndex.addFileDetail("name1", 10, false); + replicationLuceneIndex.addFileDetail("name2", 15, false); + + when(shardRouting.shardId()).thenReturn(shardId); + when(shardRouting.allocationId()).thenReturn(allocationId); + when(targetService.getlatestCompletedEventSegmentReplicationState(shardId)).thenReturn(completedSegmentReplicationState); + when(targetService.getOngoingEventSegmentReplicationState(shardId)).thenReturn(onGoingSegmentReplicationState); + when(completedSegmentReplicationState.getTimer()).thenReturn(replicationTimerCompleted); + when(onGoingSegmentReplicationState.getTimer()).thenReturn(replicationTimerOngoing); + when(replicationTimerOngoing.time()).thenReturn(time1); + when(replicationTimerCompleted.time()).thenReturn(time2); + when(onGoingSegmentReplicationState.getIndex()).thenReturn(replicationLuceneIndex); + + SegmentReplicationShardStats segmentReplicationShardStats = action.computeSegmentReplicationShardStats(shardRouting); + + assertNotNull(segmentReplicationShardStats); + assertEquals(25, segmentReplicationShardStats.getBytesBehindCount()); + assertEquals(10, segmentReplicationShardStats.getCurrentReplicationLagMillis()); + assertEquals(15, segmentReplicationShardStats.getLastCompletedReplicationTimeMillis()); + + verify(targetService).getlatestCompletedEventSegmentReplicationState(shardId); + verify(targetService).getOngoingEventSegmentReplicationState(shardId); + } + + public void testCalculateBytesRemainingToReplicateWhenNoCompletedState() { + ShardRouting shardRouting = mock(ShardRouting.class); + SegmentReplicationState onGoingSegmentReplicationState = mock(SegmentReplicationState.class); + ShardId shardId = new ShardId(new Index("test-index", "test-uuid"), 0); + AllocationId allocationId = AllocationId.newInitializing(); + ReplicationTimer replicationTimerOngoing = mock(ReplicationTimer.class); + long time1 = 10; + ReplicationLuceneIndex replicationLuceneIndex = new ReplicationLuceneIndex(); + replicationLuceneIndex.addFileDetail("name1", 10, false); + replicationLuceneIndex.addFileDetail("name2", 15, false); + + when(shardRouting.shardId()).thenReturn(shardId); + when(shardRouting.allocationId()).thenReturn(allocationId); + when(targetService.getOngoingEventSegmentReplicationState(shardId)).thenReturn(onGoingSegmentReplicationState); + when(onGoingSegmentReplicationState.getTimer()).thenReturn(replicationTimerOngoing); + when(replicationTimerOngoing.time()).thenReturn(time1); + when(onGoingSegmentReplicationState.getIndex()).thenReturn(replicationLuceneIndex); + + SegmentReplicationShardStats segmentReplicationShardStats = action.computeSegmentReplicationShardStats(shardRouting); + + assertNotNull(segmentReplicationShardStats); + assertEquals(25, segmentReplicationShardStats.getBytesBehindCount()); + assertEquals(10, segmentReplicationShardStats.getCurrentReplicationLagMillis()); + assertEquals(0, segmentReplicationShardStats.getLastCompletedReplicationTimeMillis()); + + verify(targetService).getlatestCompletedEventSegmentReplicationState(shardId); + verify(targetService).getOngoingEventSegmentReplicationState(shardId); + } + + public void testCalculateBytesRemainingToReplicateWhenNoOnGoingState() { + ShardRouting shardRouting = mock(ShardRouting.class); + SegmentReplicationState completedSegmentReplicationState = mock(SegmentReplicationState.class); + ShardId shardId = new ShardId(new Index("test-index", "test-uuid"), 0); + AllocationId allocationId = AllocationId.newInitializing(); + ReplicationTimer replicationTimerCompleted = mock(ReplicationTimer.class); + long time2 = 15; + + when(shardRouting.shardId()).thenReturn(shardId); + when(shardRouting.allocationId()).thenReturn(allocationId); + when(targetService.getlatestCompletedEventSegmentReplicationState(shardId)).thenReturn(completedSegmentReplicationState); + when(completedSegmentReplicationState.getTimer()).thenReturn(replicationTimerCompleted); + when(replicationTimerCompleted.time()).thenReturn(time2); + + SegmentReplicationShardStats segmentReplicationShardStats = action.computeSegmentReplicationShardStats(shardRouting); + + assertNotNull(segmentReplicationShardStats); + assertEquals(0, segmentReplicationShardStats.getBytesBehindCount()); + assertEquals(0, segmentReplicationShardStats.getCurrentReplicationLagMillis()); + assertEquals(15, segmentReplicationShardStats.getLastCompletedReplicationTimeMillis()); + + verify(targetService).getlatestCompletedEventSegmentReplicationState(shardId); + verify(targetService).getOngoingEventSegmentReplicationState(shardId); + } + + public void testCalculateBytesRemainingToReplicateWhenNoCompletedAndOngoingState() { + ShardRouting shardRouting = mock(ShardRouting.class); + ShardId shardId = new ShardId(new Index("test-index", "test-uuid"), 0); + AllocationId allocationId = AllocationId.newInitializing(); + when(shardRouting.shardId()).thenReturn(shardId); + when(shardRouting.allocationId()).thenReturn(allocationId); + + SegmentReplicationShardStats segmentReplicationShardStats = action.computeSegmentReplicationShardStats(shardRouting); + + assertNotNull(segmentReplicationShardStats); + assertEquals(0, segmentReplicationShardStats.getBytesBehindCount()); + assertEquals(0, segmentReplicationShardStats.getCurrentReplicationLagMillis()); + assertEquals(0, segmentReplicationShardStats.getLastCompletedReplicationTimeMillis()); + + verify(targetService).getlatestCompletedEventSegmentReplicationState(shardId); + verify(targetService).getOngoingEventSegmentReplicationState(shardId); + } + + public void testNewResponseWhenAllReplicasReturnResponseCombinesTheResults() { + SegmentReplicationStatsRequest request = new SegmentReplicationStatsRequest(); + List shardFailures = new ArrayList<>(); + String[] shards = { "0", "1" }; + request.shards(shards); + + int totalShards = 6; + int successfulShards = 6; + int failedShard = 0; + String allocIdOne = "allocIdOne"; + String allocIdTwo = "allocIdTwo"; + String allocIdThree = "allocIdThree"; + String allocIdFour = "allocIdFour"; + String allocIdFive = "allocIdFive"; + String allocIdSix = "allocIdSix"; + + ShardId shardId0 = mock(ShardId.class); + ShardRouting primary0 = mock(ShardRouting.class); + ShardRouting replica0 = mock(ShardRouting.class); + ShardRouting searchReplica0 = mock(ShardRouting.class); + + ShardId shardId1 = mock(ShardId.class); + ShardRouting primary1 = mock(ShardRouting.class); + ShardRouting replica1 = mock(ShardRouting.class); + ShardRouting searchReplica1 = mock(ShardRouting.class); + + when(shardId0.getId()).thenReturn(0); + when(shardId0.getIndexName()).thenReturn("test-index-1"); + when(primary0.shardId()).thenReturn(shardId0); + when(replica0.shardId()).thenReturn(shardId0); + when(searchReplica0.shardId()).thenReturn(shardId0); + + when(shardId1.getId()).thenReturn(1); + when(shardId1.getIndexName()).thenReturn("test-index-1"); + when(primary1.shardId()).thenReturn(shardId1); + when(replica1.shardId()).thenReturn(shardId1); + when(searchReplica1.shardId()).thenReturn(shardId1); + + AllocationId allocationIdOne = mock(AllocationId.class); + AllocationId allocationIdTwo = mock(AllocationId.class); + AllocationId allocationIdThree = mock(AllocationId.class); + AllocationId allocationIdFour = mock(AllocationId.class); + AllocationId allocationIdFive = mock(AllocationId.class); + AllocationId allocationIdSix = mock(AllocationId.class); + + when(allocationIdOne.getId()).thenReturn(allocIdOne); + when(allocationIdTwo.getId()).thenReturn(allocIdTwo); + when(allocationIdThree.getId()).thenReturn(allocIdThree); + when(allocationIdFour.getId()).thenReturn(allocIdFour); + when(allocationIdFive.getId()).thenReturn(allocIdFive); + when(allocationIdSix.getId()).thenReturn(allocIdSix); + when(primary0.allocationId()).thenReturn(allocationIdOne); + when(replica0.allocationId()).thenReturn(allocationIdTwo); + when(searchReplica0.allocationId()).thenReturn(allocationIdThree); + when(primary1.allocationId()).thenReturn(allocationIdFour); + when(replica1.allocationId()).thenReturn(allocationIdFive); + when(searchReplica1.allocationId()).thenReturn(allocationIdSix); + + when(primary0.isSearchOnly()).thenReturn(false); + when(replica0.isSearchOnly()).thenReturn(false); + when(searchReplica0.isSearchOnly()).thenReturn(true); + when(primary1.isSearchOnly()).thenReturn(false); + when(replica1.isSearchOnly()).thenReturn(false); + when(searchReplica1.isSearchOnly()).thenReturn(true); + + Set segmentReplicationShardStats0 = new HashSet<>(); + SegmentReplicationShardStats segmentReplicationShardStatsOfReplica0 = new SegmentReplicationShardStats(allocIdTwo, 0, 0, 0, 0, 0); + segmentReplicationShardStats0.add(segmentReplicationShardStatsOfReplica0); + + Set segmentReplicationShardStats1 = new HashSet<>(); + SegmentReplicationShardStats segmentReplicationShardStatsOfReplica1 = new SegmentReplicationShardStats(allocIdFive, 0, 0, 0, 0, 0); + segmentReplicationShardStats1.add(segmentReplicationShardStatsOfReplica1); + + SegmentReplicationPerGroupStats segmentReplicationPerGroupStats0 = new SegmentReplicationPerGroupStats( + shardId0, + segmentReplicationShardStats0, + 0 + ); + + SegmentReplicationPerGroupStats segmentReplicationPerGroupStats1 = new SegmentReplicationPerGroupStats( + shardId1, + segmentReplicationShardStats1, + 0 + ); + + SegmentReplicationState segmentReplicationState0 = mock(SegmentReplicationState.class); + SegmentReplicationState searchReplicaSegmentReplicationState0 = mock(SegmentReplicationState.class); + SegmentReplicationState segmentReplicationState1 = mock(SegmentReplicationState.class); + SegmentReplicationState searchReplicaSegmentReplicationState1 = mock(SegmentReplicationState.class); + + when(segmentReplicationState0.getShardRouting()).thenReturn(replica0); + when(searchReplicaSegmentReplicationState0.getShardRouting()).thenReturn(searchReplica0); + when(segmentReplicationState1.getShardRouting()).thenReturn(replica1); + when(searchReplicaSegmentReplicationState1.getShardRouting()).thenReturn(searchReplica1); + + List responses = List.of( + new SegmentReplicationShardStatsResponse(segmentReplicationPerGroupStats0), + new SegmentReplicationShardStatsResponse(segmentReplicationState0), + new SegmentReplicationShardStatsResponse(searchReplicaSegmentReplicationState0), + new SegmentReplicationShardStatsResponse(segmentReplicationPerGroupStats1), + new SegmentReplicationShardStatsResponse(segmentReplicationState1), + new SegmentReplicationShardStatsResponse(searchReplicaSegmentReplicationState1) + ); + + SegmentReplicationStatsResponse response = action.newResponse( + request, + totalShards, + successfulShards, + failedShard, + responses, + shardFailures, + ClusterState.EMPTY_STATE + ); + + List responseStats = response.getReplicationStats().get("test-index-1"); + SegmentReplicationPerGroupStats primStats0 = responseStats.get(0); + Set replicaStats0 = primStats0.getReplicaStats(); + assertEquals(2, replicaStats0.size()); + for (SegmentReplicationShardStats replicaStat : replicaStats0) { + if (replicaStat.getAllocationId().equals(allocIdTwo)) { + assertEquals(segmentReplicationState0, replicaStat.getCurrentReplicationState()); + } + + if (replicaStat.getAllocationId().equals(allocIdThree)) { + assertEquals(searchReplicaSegmentReplicationState0, replicaStat.getCurrentReplicationState()); + } + } + + SegmentReplicationPerGroupStats primStats1 = responseStats.get(1); + Set replicaStats1 = primStats1.getReplicaStats(); + assertEquals(2, replicaStats1.size()); + for (SegmentReplicationShardStats replicaStat : replicaStats1) { + if (replicaStat.getAllocationId().equals(allocIdFive)) { + assertEquals(segmentReplicationState1, replicaStat.getCurrentReplicationState()); + } + + if (replicaStat.getAllocationId().equals(allocIdSix)) { + assertEquals(searchReplicaSegmentReplicationState1, replicaStat.getCurrentReplicationState()); + } + } + } + + public void testNewResponseWhenShardsToFetchEmptyAndResponsesContainsNull() { + SegmentReplicationStatsRequest request = new SegmentReplicationStatsRequest(); + List shardFailures = new ArrayList<>(); + String[] shards = {}; + request.shards(shards); + + int totalShards = 3; + int successfulShards = 3; + int failedShard = 0; + String allocIdOne = "allocIdOne"; + String allocIdTwo = "allocIdTwo"; + ShardId shardIdOne = mock(ShardId.class); + ShardId shardIdTwo = mock(ShardId.class); + ShardId shardIdThree = mock(ShardId.class); + ShardRouting shardRoutingOne = mock(ShardRouting.class); + ShardRouting shardRoutingTwo = mock(ShardRouting.class); + ShardRouting shardRoutingThree = mock(ShardRouting.class); + when(shardIdOne.getId()).thenReturn(1); + when(shardIdTwo.getId()).thenReturn(2); + when(shardIdThree.getId()).thenReturn(3); + when(shardRoutingOne.shardId()).thenReturn(shardIdOne); + when(shardRoutingTwo.shardId()).thenReturn(shardIdTwo); + when(shardRoutingThree.shardId()).thenReturn(shardIdThree); + AllocationId allocationId = mock(AllocationId.class); + when(allocationId.getId()).thenReturn(allocIdOne); + when(shardRoutingTwo.allocationId()).thenReturn(allocationId); + when(shardIdOne.getIndexName()).thenReturn("test-index"); + + Set segmentReplicationShardStats = new HashSet<>(); + SegmentReplicationShardStats segmentReplicationShardStatsOfReplica = new SegmentReplicationShardStats(allocIdOne, 0, 0, 0, 0, 0); + segmentReplicationShardStats.add(segmentReplicationShardStatsOfReplica); + SegmentReplicationPerGroupStats segmentReplicationPerGroupStats = new SegmentReplicationPerGroupStats( + shardIdOne, + segmentReplicationShardStats, + 0 + ); + + SegmentReplicationState segmentReplicationState = mock(SegmentReplicationState.class); + SegmentReplicationShardStats segmentReplicationShardStatsFromSearchReplica = mock(SegmentReplicationShardStats.class); + when(segmentReplicationShardStatsFromSearchReplica.getAllocationId()).thenReturn("alloc2"); + when(segmentReplicationState.getShardRouting()).thenReturn(shardRoutingTwo); + + List responses = new ArrayList<>(); + responses.add(null); + responses.add(new SegmentReplicationShardStatsResponse(segmentReplicationPerGroupStats)); + responses.add(new SegmentReplicationShardStatsResponse(segmentReplicationState)); + + SegmentReplicationStatsResponse response = action.newResponse( + request, + totalShards, + successfulShards, + failedShard, + responses, + shardFailures, + ClusterState.EMPTY_STATE + ); + + List responseStats = response.getReplicationStats().get("test-index"); + SegmentReplicationPerGroupStats primStats = responseStats.get(0); + Set segRpShardStatsSet = primStats.getReplicaStats(); + + for (SegmentReplicationShardStats segRpShardStats : segRpShardStatsSet) { + if (segRpShardStats.getAllocationId().equals(allocIdOne)) { + assertEquals(segmentReplicationState, segRpShardStats.getCurrentReplicationState()); + } + + if (segRpShardStats.getAllocationId().equals(allocIdTwo)) { + assertEquals(segmentReplicationShardStatsFromSearchReplica, segRpShardStats); + } + } + } + + public void testShardOperationWithSegRepDisabled() { + ShardRouting shardRouting = mock(ShardRouting.class); + ShardId shardId = new ShardId(new Index("test-index", "test-uuid"), 0); + SegmentReplicationStatsRequest request = new SegmentReplicationStatsRequest(); + + when(shardRouting.shardId()).thenReturn(shardId); + when(indicesService.indexServiceSafe(shardId.getIndex())).thenReturn(indexService); + when(indexService.getShard(shardId.id())).thenReturn(indexShard); + when(indexShard.indexSettings()).thenReturn(createIndexSettingsWithSegRepDisabled()); + + SegmentReplicationShardStatsResponse response = action.shardOperation(request, shardRouting); + + assertNull(response); + } + + public void testGlobalBlockCheck() { + ClusterBlock writeClusterBlock = new ClusterBlock( + 1, + "uuid", + "", + true, + true, + true, + RestStatus.OK, + EnumSet.of(ClusterBlockLevel.METADATA_WRITE) + ); + + ClusterBlock readClusterBlock = new ClusterBlock( + 1, + "uuid", + "", + true, + true, + true, + RestStatus.OK, + EnumSet.of(ClusterBlockLevel.METADATA_READ) + ); + + ClusterBlocks.Builder builder = ClusterBlocks.builder(); + builder.addGlobalBlock(writeClusterBlock); + ClusterState metadataWriteBlockedState = ClusterState.builder(ClusterState.EMPTY_STATE).blocks(builder).build(); + assertNull(action.checkGlobalBlock(metadataWriteBlockedState, new SegmentReplicationStatsRequest())); + + builder = ClusterBlocks.builder(); + builder.addGlobalBlock(readClusterBlock); + ClusterState metadataReadBlockedState = ClusterState.builder(ClusterState.EMPTY_STATE).blocks(builder).build(); + assertNotNull(action.checkGlobalBlock(metadataReadBlockedState, new SegmentReplicationStatsRequest())); + } + + public void testIndexBlockCheck() { + ClusterBlock writeClusterBlock = new ClusterBlock( + 1, + "uuid", + "", + true, + true, + true, + RestStatus.OK, + EnumSet.of(ClusterBlockLevel.METADATA_WRITE) + ); + + ClusterBlock readClusterBlock = new ClusterBlock( + 1, + "uuid", + "", + true, + true, + true, + RestStatus.OK, + EnumSet.of(ClusterBlockLevel.METADATA_READ) + ); + + String indexName = "test"; + ClusterBlocks.Builder builder = ClusterBlocks.builder(); + builder.addIndexBlock(indexName, writeClusterBlock); + ClusterState metadataWriteBlockedState = ClusterState.builder(ClusterState.EMPTY_STATE).blocks(builder).build(); + assertNull(action.checkRequestBlock(metadataWriteBlockedState, new SegmentReplicationStatsRequest(), new String[] { indexName })); + + builder = ClusterBlocks.builder(); + builder.addIndexBlock(indexName, readClusterBlock); + ClusterState metadataReadBlockedState = ClusterState.builder(ClusterState.EMPTY_STATE).blocks(builder).build(); + assertNotNull(action.checkRequestBlock(metadataReadBlockedState, new SegmentReplicationStatsRequest(), new String[] { indexName })); + } + + private IndexSettings createIndexSettingsWithSegRepEnabled() { + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .build(); + + return new IndexSettings(IndexMetadata.builder("test").settings(settings).build(), settings); + } + + private IndexSettings createIndexSettingsWithSegRepDisabled() { + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .build(); + return new IndexSettings(IndexMetadata.builder("test").settings(settings).build(), settings); + } +} diff --git a/server/src/test/java/org/opensearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/opensearch/action/search/TransportSearchActionTests.java index 84955d01a59ce..0a0015ae8cbf6 100644 --- a/server/src/test/java/org/opensearch/action/search/TransportSearchActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/TransportSearchActionTests.java @@ -809,6 +809,7 @@ public void testCollectSearchShards() throws Exception { remoteIndicesByCluster, remoteClusterService, threadPool, + null, new LatchedActionListener<>(ActionListener.wrap(response::set, e -> fail("no failures expected")), latch) ); awaitLatch(latch, 5, TimeUnit.SECONDS); @@ -835,6 +836,7 @@ public void testCollectSearchShards() throws Exception { remoteIndicesByCluster, remoteClusterService, threadPool, + null, new LatchedActionListener<>(ActionListener.wrap(r -> fail("no response expected"), failure::set), latch) ); awaitLatch(latch, 5, TimeUnit.SECONDS); @@ -880,6 +882,7 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti remoteIndicesByCluster, remoteClusterService, threadPool, + null, new LatchedActionListener<>(ActionListener.wrap(r -> fail("no response expected"), failure::set), latch) ); awaitLatch(latch, 5, TimeUnit.SECONDS); @@ -907,6 +910,7 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti remoteIndicesByCluster, remoteClusterService, threadPool, + null, new LatchedActionListener<>(ActionListener.wrap(response::set, e -> fail("no failures expected")), latch) ); awaitLatch(latch, 5, TimeUnit.SECONDS); @@ -949,6 +953,7 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti remoteIndicesByCluster, remoteClusterService, threadPool, + null, new LatchedActionListener<>(ActionListener.wrap(response::set, e -> fail("no failures expected")), latch) ); awaitLatch(latch, 5, TimeUnit.SECONDS); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java index 8ee944646a413..005805bca45b8 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java @@ -55,6 +55,7 @@ import org.opensearch.common.util.FeatureFlags; import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.RepositoryMissingException; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; @@ -1463,6 +1464,73 @@ public void testJoinRemoteStoreClusterWithRemotePublicationNodeInMixedMode() { JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()); } + public void testUpdatesClusterStateWithRepositoryMetadataNotInSync() throws Exception { + Map newNodeAttributes = new HashMap<>(); + newNodeAttributes.putAll(remoteStateNodeAttributes(CLUSTER_STATE_REPO)); + newNodeAttributes.putAll(remoteRoutingTableAttributes(ROUTING_TABLE_REPO)); + + final AllocationService allocationService = mock(AllocationService.class); + when(allocationService.adaptAutoExpandReplicas(any())).then(invocationOnMock -> invocationOnMock.getArguments()[0]); + final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); + RepositoriesService repositoriesService = mock(RepositoriesService.class); + when(repositoriesService.repository(any())).thenThrow(RepositoryMissingException.class); + final RemoteStoreNodeService remoteStoreNodeService = new RemoteStoreNodeService(new SetOnce<>(repositoriesService)::get, null); + + final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor( + Settings.EMPTY, + allocationService, + logger, + rerouteService, + null, + remoteStoreNodeService + ); + + final DiscoveryNode clusterManagerNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + newNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + final RepositoryMetadata clusterStateRepo = buildRepositoryMetadata(clusterManagerNode, CLUSTER_STATE_REPO); + final RepositoryMetadata routingTableRepo = buildRepositoryMetadata(clusterManagerNode, ROUTING_TABLE_REPO); + List repositoriesMetadata = new ArrayList<>() { + { + add(clusterStateRepo); + add(routingTableRepo); + } + }; + + final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes( + DiscoveryNodes.builder() + .add(clusterManagerNode) + .localNodeId(clusterManagerNode.getId()) + .clusterManagerNodeId(clusterManagerNode.getId()) + ) + .metadata(Metadata.builder().putCustom(RepositoriesMetadata.TYPE, new RepositoriesMetadata(repositoriesMetadata))) + .build(); + + final DiscoveryNode joiningNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + newNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + final ClusterStateTaskExecutor.ClusterTasksResult result = joinTaskExecutor.execute( + clusterState, + List.of(new JoinTaskExecutor.Task(joiningNode, "test")) + ); + assertThat(result.executionResults.entrySet(), hasSize(1)); + final ClusterStateTaskExecutor.TaskResult taskResult = result.executionResults.values().iterator().next(); + assertTrue(taskResult.isSuccess()); + validatePublicationRepositoryMetadata(result.resultingState, clusterManagerNode); + + } + private void validateRepositoryMetadata(ClusterState updatedState, DiscoveryNode existingNode, int expectedRepositories) throws Exception { diff --git a/server/src/test/java/org/opensearch/cluster/metadata/IndexMetadataTests.java b/server/src/test/java/org/opensearch/cluster/metadata/IndexMetadataTests.java index f9968ca08ebba..6dbda650a2f75 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/IndexMetadataTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/IndexMetadataTests.java @@ -32,10 +32,13 @@ package org.opensearch.cluster.metadata; +import org.opensearch.Version; import org.opensearch.action.admin.indices.rollover.MaxAgeCondition; import org.opensearch.action.admin.indices.rollover.MaxDocsCondition; import org.opensearch.action.admin.indices.rollover.MaxSizeCondition; import org.opensearch.action.admin.indices.rollover.RolloverInfo; +import org.opensearch.cluster.Diff; +import org.opensearch.common.UUIDs; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; @@ -47,6 +50,7 @@ import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -88,6 +92,26 @@ protected NamedXContentRegistry xContentRegistry() { return new NamedXContentRegistry(IndicesModule.getNamedXContents()); } + // Create the index metadata for a given index, with the specified version. + private static IndexMetadata createIndexMetadata(final Index index, final long version) { + return createIndexMetadata(index, version, false); + } + + private static IndexMetadata createIndexMetadata(final Index index, final long version, final boolean isSystem) { + final Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + .build(); + return IndexMetadata.builder(index.getName()) + .settings(settings) + .numberOfShards(1) + .numberOfReplicas(0) + .creationDate(System.currentTimeMillis()) + .version(version) + .system(isSystem) + .build(); + } + public void testIndexMetadataSerialization() throws IOException { Integer numShard = randomFrom(1, 2, 4, 8, 16); int numberOfReplicas = randomIntBetween(0, 10); @@ -568,4 +592,18 @@ public void testParseIndexNameCannotFormatNumber() { } } + /** + * Test that changes to indices metadata are applied + */ + public void testIndicesMetadataDiffSystemFlagFlipped() { + String indexUuid = UUIDs.randomBase64UUID(); + Index index = new Index("test-index", indexUuid); + IndexMetadata previousIndexMetadata = createIndexMetadata(index, 1); + IndexMetadata nextIndexMetadata = createIndexMetadata(index, 2, true); + Diff diff = new IndexMetadata.IndexMetadataDiff(previousIndexMetadata, nextIndexMetadata); + IndexMetadata indexMetadataAfterDiffApplied = diff.apply(previousIndexMetadata); + assertTrue(indexMetadataAfterDiffApplied.isSystem()); + assertThat(indexMetadataAfterDiffApplied.getVersion(), equalTo(nextIndexMetadata.getVersion())); + } + } diff --git a/server/src/test/java/org/opensearch/cluster/metadata/SearchOnlyReplicaTests.java b/server/src/test/java/org/opensearch/cluster/metadata/SearchOnlyReplicaTests.java index 3d11193a07884..81055e01d915b 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/SearchOnlyReplicaTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/SearchOnlyReplicaTests.java @@ -19,32 +19,46 @@ import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.common.ValidationException; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; +import org.opensearch.env.Environment; +import org.opensearch.gateway.remote.RemoteClusterStateService; import org.opensearch.indices.ShardLimitValidator; import org.opensearch.indices.cluster.ClusterStateChanges; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.repositories.fs.FsRepository; import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.junit.After; import org.junit.Before; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Set; +import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicInteger; import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_REPLICATION_TYPE_SETTING; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; public class SearchOnlyReplicaTests extends OpenSearchSingleNodeTestCase { + public static final String TEST_RS_REPO = "test-rs-repo"; + public static final String INDEX_NAME = "test-index"; private ThreadPool threadPool; @Before @@ -70,7 +84,7 @@ protected Settings featureFlagSettings() { public void testCreateWithDefaultSearchReplicasSetting() { final ClusterStateChanges cluster = new ClusterStateChanges(xContentRegistry(), threadPool); ClusterState state = createIndexWithSettings(cluster, Settings.builder().build()); - IndexShardRoutingTable indexShardRoutingTable = state.getRoutingTable().index("index").getShards().get(0); + IndexShardRoutingTable indexShardRoutingTable = state.getRoutingTable().index(INDEX_NAME).getShards().get(0); assertEquals(1, indexShardRoutingTable.replicaShards().size()); assertEquals(0, indexShardRoutingTable.searchOnlyReplicas().size()); assertEquals(1, indexShardRoutingTable.writerReplicas().size()); @@ -91,53 +105,50 @@ public void testSearchReplicasValidationWithDocumentReplication() { ) ); assertEquals( - "To set index.number_of_search_only_replicas, index.replication.type must be set to SEGMENT", + "To set index.number_of_search_only_replicas, index.remote_store.enabled must be set to true", exception.getCause().getMessage() ); } - public void testUpdateSearchReplicaCount() { - final ClusterStateChanges cluster = new ClusterStateChanges(xContentRegistry(), threadPool); + public void testUpdateSearchReplicaCount() throws ExecutionException, InterruptedException { + Settings settings = Settings.builder() + .put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .put(INDEX_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) + .put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 1) + .build(); + createIndex(INDEX_NAME, settings); - ClusterState state = createIndexWithSettings( - cluster, - Settings.builder() - .put(SETTING_NUMBER_OF_SHARDS, 1) - .put(SETTING_NUMBER_OF_REPLICAS, 0) - .put(INDEX_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) - .put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 1) - .build() - ); - assertTrue(state.metadata().hasIndex("index")); - rerouteUntilActive(state, cluster); - IndexShardRoutingTable indexShardRoutingTable = state.getRoutingTable().index("index").getShards().get(0); + IndexShardRoutingTable indexShardRoutingTable = getIndexShardRoutingTable(); assertEquals(1, indexShardRoutingTable.replicaShards().size()); assertEquals(1, indexShardRoutingTable.searchOnlyReplicas().size()); assertEquals(0, indexShardRoutingTable.writerReplicas().size()); // add another replica - state = cluster.updateSettings( - state, - new UpdateSettingsRequest("index").settings(Settings.builder().put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 2).build()) + UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(INDEX_NAME).settings( + Settings.builder().put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 2).build() ); - rerouteUntilActive(state, cluster); - indexShardRoutingTable = state.getRoutingTable().index("index").getShards().get(0); + client().admin().indices().updateSettings(updateSettingsRequest).get(); + indexShardRoutingTable = getIndexShardRoutingTable(); assertEquals(2, indexShardRoutingTable.replicaShards().size()); assertEquals(2, indexShardRoutingTable.searchOnlyReplicas().size()); assertEquals(0, indexShardRoutingTable.writerReplicas().size()); // remove all replicas - state = cluster.updateSettings( - state, - new UpdateSettingsRequest("index").settings(Settings.builder().put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 0).build()) + updateSettingsRequest = new UpdateSettingsRequest(INDEX_NAME).settings( + Settings.builder().put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 0).build() ); - rerouteUntilActive(state, cluster); - indexShardRoutingTable = state.getRoutingTable().index("index").getShards().get(0); + client().admin().indices().updateSettings(updateSettingsRequest).get(); + indexShardRoutingTable = getIndexShardRoutingTable(); assertEquals(0, indexShardRoutingTable.replicaShards().size()); assertEquals(0, indexShardRoutingTable.searchOnlyReplicas().size()); assertEquals(0, indexShardRoutingTable.writerReplicas().size()); } + private IndexShardRoutingTable getIndexShardRoutingTable() { + return client().admin().cluster().prepareState().get().getState().getRoutingTable().index(INDEX_NAME).getShards().get(0); + } + private ClusterState createIndexWithSettings(ClusterStateChanges cluster, Settings settings) { List allNodes = new ArrayList<>(); // node for primary/local @@ -149,48 +160,32 @@ private ClusterState createIndexWithSettings(ClusterStateChanges cluster, Settin } ClusterState state = ClusterStateCreationUtils.state(localNode, localNode, allNodes.toArray(new DiscoveryNode[0])); - CreateIndexRequest request = new CreateIndexRequest("index", settings).waitForActiveShards(ActiveShardCount.NONE); + CreateIndexRequest request = new CreateIndexRequest(INDEX_NAME, settings).waitForActiveShards(ActiveShardCount.NONE); state = cluster.createIndex(state, request); return state; } public void testUpdateSearchReplicasOverShardLimit() { - final ClusterStateChanges cluster = new ClusterStateChanges(xContentRegistry(), threadPool); - - List allNodes = new ArrayList<>(); - // node for primary/local - DiscoveryNode localNode = createNode(Version.CURRENT, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE, DiscoveryNodeRole.DATA_ROLE); - allNodes.add(localNode); - - allNodes.add(createNode(Version.CURRENT, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE, DiscoveryNodeRole.DATA_ROLE)); - - ClusterState state = ClusterStateCreationUtils.state(localNode, localNode, allNodes.toArray(new DiscoveryNode[0])); + Settings settings = Settings.builder() + .put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .put(INDEX_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) + .put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 0) + .build(); + createIndex(INDEX_NAME, settings); + Integer maxShardPerNode = ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getDefault(Settings.EMPTY); - CreateIndexRequest request = new CreateIndexRequest( - "index", - Settings.builder() - .put(SETTING_NUMBER_OF_SHARDS, 1) - .put(SETTING_NUMBER_OF_REPLICAS, 0) - .put(INDEX_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) - .put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 1) - .build() - ).waitForActiveShards(ActiveShardCount.NONE); - state = cluster.createIndex(state, request); - assertTrue(state.metadata().hasIndex("index")); - rerouteUntilActive(state, cluster); + UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(INDEX_NAME).settings( + Settings.builder().put(SETTING_NUMBER_OF_SEARCH_REPLICAS, maxShardPerNode * 2).build() + ); // add another replica - ClusterState finalState = state; - Integer maxShardPerNode = ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getDefault(Settings.EMPTY); - expectThrows( - RuntimeException.class, - () -> cluster.updateSettings( - finalState, - new UpdateSettingsRequest("index").settings( - Settings.builder().put(SETTING_NUMBER_OF_SEARCH_REPLICAS, maxShardPerNode * 2).build() - ) - ) + ExecutionException executionException = expectThrows( + ExecutionException.class, + () -> client().admin().indices().updateSettings(updateSettingsRequest).get() ); + Throwable cause = executionException.getCause(); + assertEquals(ValidationException.class, cause.getClass()); } public void testUpdateSearchReplicasOnDocrepCluster() { @@ -206,7 +201,7 @@ public void testUpdateSearchReplicasOnDocrepCluster() { ClusterState state = ClusterStateCreationUtils.state(localNode, localNode, allNodes.toArray(new DiscoveryNode[0])); CreateIndexRequest request = new CreateIndexRequest( - "index", + INDEX_NAME, Settings.builder() .put(SETTING_NUMBER_OF_SHARDS, 1) .put(SETTING_NUMBER_OF_REPLICAS, 0) @@ -214,7 +209,7 @@ public void testUpdateSearchReplicasOnDocrepCluster() { .build() ).waitForActiveShards(ActiveShardCount.NONE); state = cluster.createIndex(state, request); - assertTrue(state.metadata().hasIndex("index")); + assertTrue(state.metadata().hasIndex(INDEX_NAME)); rerouteUntilActive(state, cluster); // add another replica @@ -224,7 +219,7 @@ public void testUpdateSearchReplicasOnDocrepCluster() { RuntimeException.class, () -> cluster.updateSettings( finalState, - new UpdateSettingsRequest("index").settings( + new UpdateSettingsRequest(INDEX_NAME).settings( Settings.builder().put(SETTING_NUMBER_OF_SEARCH_REPLICAS, maxShardPerNode * 2).build() ) ) @@ -232,11 +227,51 @@ public void testUpdateSearchReplicasOnDocrepCluster() { } + Path tempDir = createTempDir(); + Path repo = tempDir.resolve("repo"); + + @Override + protected Settings nodeSettings() { + return Settings.builder() + .put(super.nodeSettings()) + .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) + .put(buildRemoteStoreNodeAttributes(TEST_RS_REPO, repo)) + .put(Environment.PATH_HOME_SETTING.getKey(), tempDir) + .put(Environment.PATH_REPO_SETTING.getKey(), repo) + .build(); + } + + private Settings buildRemoteStoreNodeAttributes(String repoName, Path repoPath) { + String repoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + repoName + ); + String repoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + repoName + ); + + return Settings.builder() + .put("node.attr." + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, repoName) + .put(repoTypeAttributeKey, FsRepository.TYPE) + .put(repoSettingsAttributeKeyPrefix + "location", repoPath) + .put("node.attr." + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, repoName) + .put(repoTypeAttributeKey, FsRepository.TYPE) + .put(repoSettingsAttributeKeyPrefix + "location", repoPath) + .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, repoName) + .put(repoTypeAttributeKey, FsRepository.TYPE) + .put(repoSettingsAttributeKeyPrefix + "location", repoPath) + .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), false) + .build(); + } + private static void rerouteUntilActive(ClusterState state, ClusterStateChanges cluster) { - while (state.routingTable().index("index").shard(0).allShardsStarted() == false) { + while (state.routingTable().index(INDEX_NAME).shard(0).allShardsStarted() == false) { state = cluster.applyStartedShards( state, - state.routingTable().index("index").shard(0).shardsWithState(ShardRoutingState.INITIALIZING) + state.routingTable().index(INDEX_NAME).shard(0).shardsWithState(ShardRoutingState.INITIALIZING) ); state = cluster.reroute(state, new ClusterRerouteRequest()); } diff --git a/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java index aaeeb52ab5709..4263e1aa347dc 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java @@ -604,7 +604,8 @@ public void testAdaptiveReplicaSelection() throws Exception { null, null, collector, - outstandingRequests + outstandingRequests, + null ); assertThat("One group per index shard", groupIterator.size(), equalTo(numIndices * numShards)); @@ -616,7 +617,7 @@ public void testAdaptiveReplicaSelection() throws Exception { searchedShards.add(firstChoice); selectedNodes.add(firstChoice.currentNodeId()); - groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests); + groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests, null); assertThat(groupIterator.size(), equalTo(numIndices * numShards)); ShardRouting secondChoice = groupIterator.get(0).nextOrNull(); @@ -624,7 +625,7 @@ public void testAdaptiveReplicaSelection() throws Exception { searchedShards.add(secondChoice); selectedNodes.add(secondChoice.currentNodeId()); - groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests); + groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests, null); assertThat(groupIterator.size(), equalTo(numIndices * numShards)); ShardRouting thirdChoice = groupIterator.get(0).nextOrNull(); @@ -643,26 +644,26 @@ public void testAdaptiveReplicaSelection() throws Exception { outstandingRequests.put("node_1", 1L); outstandingRequests.put("node_2", 1L); - groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests); + groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests, null); ShardRouting shardChoice = groupIterator.get(0).nextOrNull(); // node 1 should be the lowest ranked node to start assertThat(shardChoice.currentNodeId(), equalTo("node_1")); // node 1 starts getting more loaded... collector.addNodeStatistics("node_1", 2, TimeValue.timeValueMillis(200).nanos(), TimeValue.timeValueMillis(150).nanos()); - groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests); + groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests, null); shardChoice = groupIterator.get(0).nextOrNull(); assertThat(shardChoice.currentNodeId(), equalTo("node_1")); // and more loaded... collector.addNodeStatistics("node_1", 3, TimeValue.timeValueMillis(250).nanos(), TimeValue.timeValueMillis(200).nanos()); - groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests); + groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests, null); shardChoice = groupIterator.get(0).nextOrNull(); assertThat(shardChoice.currentNodeId(), equalTo("node_1")); // and even more collector.addNodeStatistics("node_1", 4, TimeValue.timeValueMillis(300).nanos(), TimeValue.timeValueMillis(250).nanos()); - groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests); + groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests, null); shardChoice = groupIterator.get(0).nextOrNull(); // finally, node 2 is chosen instead assertThat(shardChoice.currentNodeId(), equalTo("node_2")); @@ -709,7 +710,8 @@ public void testAdaptiveReplicaSelectionWithZoneAwarenessIgnored() throws Except null, null, collector, - outstandingRequests + outstandingRequests, + null ); assertThat("One group per index shard", groupIterator.size(), equalTo(numIndices * numShards)); @@ -722,7 +724,7 @@ public void testAdaptiveReplicaSelectionWithZoneAwarenessIgnored() throws Except searchedShards.add(firstChoice); selectedNodes.add(firstChoice.currentNodeId()); - groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests); + groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests, null); assertThat(groupIterator.size(), equalTo(numIndices * numShards)); assertThat(groupIterator.get(0).size(), equalTo(numReplicas + 1)); @@ -745,18 +747,18 @@ public void testAdaptiveReplicaSelectionWithZoneAwarenessIgnored() throws Except outstandingRequests.put("node_a1", 1L); outstandingRequests.put("node_b2", 1L); - groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests); + groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests, null); // node_a0 or node_a1 should be the lowest ranked node to start groupIterator.forEach(shardRoutings -> assertThat(shardRoutings.nextOrNull().currentNodeId(), containsString("node_a"))); // Adding more load to node_a0 collector.addNodeStatistics("node_a0", 10, TimeValue.timeValueMillis(200).nanos(), TimeValue.timeValueMillis(150).nanos()); - groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests); + groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests, null); // Adding more load to node_a0 and node_a1 from zone-a collector.addNodeStatistics("node_a1", 100, TimeValue.timeValueMillis(300).nanos(), TimeValue.timeValueMillis(250).nanos()); collector.addNodeStatistics("node_a0", 100, TimeValue.timeValueMillis(300).nanos(), TimeValue.timeValueMillis(250).nanos()); - groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests); + groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests, null); // ARS should pick node_b2 from zone-b since both node_a0 and node_a1 are overloaded groupIterator.forEach(shardRoutings -> assertThat(shardRoutings.nextOrNull().currentNodeId(), containsString("node_b"))); @@ -842,8 +844,8 @@ public void testWeightedOperationRouting() throws Exception { null, null, collector, - outstandingRequests - + outstandingRequests, + null ); for (ShardIterator it : groupIterator) { @@ -871,7 +873,7 @@ public void testWeightedOperationRouting() throws Exception { opRouting = new OperationRouting(setting, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); // search shards call - groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests); + groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests, null); for (ShardIterator it : groupIterator) { List shardRoutings = Collections.singletonList(it.nextOrNull()); @@ -935,8 +937,8 @@ public void testWeightedOperationRoutingWeightUndefinedForOneZone() throws Excep null, null, collector, - outstandingRequests - + outstandingRequests, + null ); for (ShardIterator it : groupIterator) { @@ -969,7 +971,7 @@ public void testWeightedOperationRoutingWeightUndefinedForOneZone() throws Excep opRouting = new OperationRouting(setting, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); // search shards call - groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests); + groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests, null); for (ShardIterator it : groupIterator) { while (it.remaining() > 0) { diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java index 6a03a1f79bcde..a7f18aabf8436 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java @@ -194,7 +194,7 @@ public AllocationService createRemoteCapableAllocationService() { } public AllocationService createRemoteCapableAllocationService(String excludeNodes) { - Settings settings = Settings.builder().put("cluster.routing.allocation.exclude.node_id", excludeNodes).build(); + Settings settings = Settings.builder().put("cluster.routing.allocation.exclude._id", excludeNodes).build(); return new MockAllocationService( randomAllocationDeciders(settings, EMPTY_CLUSTER_SETTINGS, random()), new TestGatewayAllocator(), diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsRebalanceShardsTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsRebalanceShardsTests.java index e1c0a7eff1f6e..e55a9de160114 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsRebalanceShardsTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsRebalanceShardsTests.java @@ -25,25 +25,51 @@ public class RemoteShardsRebalanceShardsTests extends RemoteShardsBalancerBaseTe * Post rebalance primaries should be balanced across all the nodes. */ public void testShardAllocationAndRebalance() { - int localOnlyNodes = 20; - int remoteCapableNodes = 40; - int localIndices = 40; - int remoteIndices = 80; + final int localOnlyNodes = 20; + final int remoteCapableNodes = 40; + final int halfRemoteCapableNodes = remoteCapableNodes / 2; + final int localIndices = 40; + final int remoteIndices = 80; ClusterState clusterState = createInitialCluster(localOnlyNodes, remoteCapableNodes, localIndices, remoteIndices); - AllocationService service = this.createRemoteCapableAllocationService(); + final StringBuilder excludeNodes = new StringBuilder(); + for (int i = 0; i < halfRemoteCapableNodes; i++) { + excludeNodes.append(getNodeId(i, true)); + if (i != (remoteCapableNodes / 2 - 1)) { + excludeNodes.append(", "); + } + } + AllocationService service = this.createRemoteCapableAllocationService(excludeNodes.toString()); clusterState = allocateShardsAndBalance(clusterState, service); RoutingNodes routingNodes = clusterState.getRoutingNodes(); RoutingAllocation allocation = getRoutingAllocation(clusterState, routingNodes); - final Map nodePrimariesCounter = getShardCounterPerNodeForRemoteCapablePool(clusterState, allocation, true); - final Map nodeReplicaCounter = getShardCounterPerNodeForRemoteCapablePool(clusterState, allocation, false); + Map nodePrimariesCounter = getShardCounterPerNodeForRemoteCapablePool(clusterState, allocation, true); + Map nodeReplicaCounter = getShardCounterPerNodeForRemoteCapablePool(clusterState, allocation, false); int avgPrimariesPerNode = getTotalShardCountAcrossNodes(nodePrimariesCounter) / remoteCapableNodes; - // Primary and replica are balanced post first reroute + // Primary and replica are balanced after first allocating unassigned + for (RoutingNode node : routingNodes) { + if (RoutingPool.REMOTE_CAPABLE.equals(RoutingPool.getNodePool(node))) { + if (Integer.parseInt(node.nodeId().split("-")[4]) < halfRemoteCapableNodes) { + assertEquals(0, (int) nodePrimariesCounter.getOrDefault(node.nodeId(), 0)); + } else { + assertEquals(avgPrimariesPerNode * 2, (int) nodePrimariesCounter.get(node.nodeId())); + } + assertTrue(nodeReplicaCounter.getOrDefault(node.nodeId(), 0) >= 0); + } + } + + // Remove exclude constraint and rebalance + service = this.createRemoteCapableAllocationService(); + clusterState = allocateShardsAndBalance(clusterState, service); + routingNodes = clusterState.getRoutingNodes(); + allocation = getRoutingAllocation(clusterState, routingNodes); + nodePrimariesCounter = getShardCounterPerNodeForRemoteCapablePool(clusterState, allocation, true); + nodeReplicaCounter = getShardCounterPerNodeForRemoteCapablePool(clusterState, allocation, false); for (RoutingNode node : routingNodes) { if (RoutingPool.REMOTE_CAPABLE.equals(RoutingPool.getNodePool(node))) { - assertInRange(nodePrimariesCounter.get(node.nodeId()), avgPrimariesPerNode, remoteCapableNodes - 1); - assertTrue(nodeReplicaCounter.get(node.nodeId()) >= 0); + assertEquals(avgPrimariesPerNode, (int) nodePrimariesCounter.get(node.nodeId())); + assertTrue(nodeReplicaCounter.getOrDefault(node.nodeId(), 0) >= 0); } } } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/SearchReplicaAllocationDeciderTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/SearchReplicaAllocationDeciderTests.java index 8d4f4cdee26cc..9604e82fe4c88 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/SearchReplicaAllocationDeciderTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/SearchReplicaAllocationDeciderTests.java @@ -8,27 +8,44 @@ package org.opensearch.cluster.routing.allocation.decider; +import org.opensearch.Version; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.EmptyClusterInfoService; import org.opensearch.cluster.OpenSearchAllocationTestCase; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingHelper; import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; +import org.opensearch.cluster.routing.allocation.command.AllocationCommands; +import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.snapshots.EmptySnapshotsInfoService; import org.opensearch.test.gateway.TestGatewayAllocator; import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; +import java.util.Map; import java.util.Set; +import static org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING; +import static org.opensearch.cluster.routing.ShardRoutingState.STARTED; import static org.opensearch.cluster.routing.allocation.decider.SearchReplicaAllocationDecider.SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; public class SearchReplicaAllocationDeciderTests extends OpenSearchAllocationTestCase { @@ -130,4 +147,171 @@ public void testSearchReplicaRoutingDedicatedIncludes() { decision = (Decision.Single) filterAllocationDecider.canRemain(primary, state.getRoutingNodes().node("node1"), allocation); assertEquals(decision.toString(), Decision.Type.YES, decision.type()); } + + public void testSearchReplicaWithThrottlingDecider_PrimaryBasedReplication() { + TestGatewayAllocator gatewayAllocator = new TestGatewayAllocator(); + // throttle outgoing on primary + AllocationService strategy = createAllocationService(Settings.EMPTY, gatewayAllocator); + + Set> settings = new HashSet<>(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + settings.add(SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_SETTING); + Metadata metadata = Metadata.builder() + .put( + IndexMetadata.builder("test") + .settings(settings(Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(0) + .numberOfSearchReplicas(1) + ) + .build(); + + ClusterState clusterState = initializeClusterStateWithSingleIndexAndShard(newNode("node1"), metadata, gatewayAllocator); + clusterState = strategy.reroute(clusterState, "reroute"); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); + clusterState = strategy.reroute(clusterState, "reroute"); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + assertEquals(2, clusterState.routingTable().shardsWithState(STARTED).size()); + assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node1"), 0); + // start a third node, we will try and move the SR to this node + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build(); + clusterState = strategy.reroute(clusterState, "reroute"); + // remove the primary and reroute - this would throw an NPE for search replicas but *not* regular. + // regular replicas would get promoted to primary before the CanMoveAway call. + clusterState = strategy.disassociateDeadNodes( + ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node1")).build(), + true, + "test" + ); + + // attempt to move the replica + AllocationService.CommandsResult commandsResult = strategy.reroute( + clusterState, + new AllocationCommands(new MoveAllocationCommand("test", 0, "node2", "node3")), + true, + false + ); + + assertEquals(commandsResult.explanations().explanations().size(), 1); + assertEquals(commandsResult.explanations().explanations().get(0).decisions().type(), Decision.Type.NO); + boolean isCorrectNoDecision = false; + for (Decision decision : commandsResult.explanations().explanations().get(0).decisions().getDecisions()) { + if (decision.label().equals(ThrottlingAllocationDecider.NAME)) { + assertEquals("primary shard for this replica is not yet active", decision.getExplanation()); + assertEquals(Decision.Type.NO, decision.type()); + isCorrectNoDecision = true; + } + } + assertTrue(isCorrectNoDecision); + } + + public void testSearchReplicaWithThrottlingDeciderWithoutPrimary_RemoteStoreEnabled() { + TestGatewayAllocator gatewayAllocator = new TestGatewayAllocator(); + AllocationService strategy = createAllocationService(Settings.EMPTY, gatewayAllocator); + Set> settings = new HashSet<>(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + settings.add(SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_SETTING); + Metadata metadata = Metadata.builder() + .put( + IndexMetadata.builder("test") + .settings(settings(Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(0) + .numberOfSearchReplicas(1) + ) + .build(); + + ClusterState clusterState = initializeClusterStateWithSingleIndexAndShard(newRemoteNode("node1"), metadata, gatewayAllocator); + + clusterState = strategy.reroute(clusterState, "reroute"); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + DiscoveryNode node2 = newRemoteNode("node2"); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(node2)).build(); + clusterState = strategy.reroute(clusterState, "reroute"); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + assertEquals(2, clusterState.routingTable().shardsWithState(STARTED).size()); + assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node1"), 0); + // start a third node, we will try and move the SR to this node + DiscoveryNode node3 = newRemoteNode("node3"); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(node3)).build(); + clusterState = strategy.reroute(clusterState, "reroute"); + // remove the primary and reroute - this would throw an NPE for search replicas but *not* regular. + // regular replicas would get promoted to primary before the CanMoveAway call. + clusterState = strategy.disassociateDeadNodes( + ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node1")).build(), + true, + "test" + ); + + // attempt to move the replica + AllocationService.CommandsResult commandsResult = strategy.reroute( + clusterState, + new AllocationCommands(new MoveAllocationCommand("test", 0, "node2", "node3")), + true, + false + ); + + assertEquals(commandsResult.explanations().explanations().size(), 1); + assertEquals(commandsResult.explanations().explanations().get(0).decisions().type(), Decision.Type.NO); + boolean foundYesMessage = false; + for (Decision decision : commandsResult.explanations().explanations().get(0).decisions().getDecisions()) { + if (decision.label().equals(ThrottlingAllocationDecider.NAME)) { + assertEquals("Remote based search replica below incoming recovery limit: [0 < 2]", decision.getExplanation()); + assertEquals(Decision.Type.YES, decision.type()); + foundYesMessage = true; + } + } + assertTrue(foundYesMessage); + } + + private ClusterState initializeClusterStateWithSingleIndexAndShard( + DiscoveryNode primaryNode, + Metadata metadata, + TestGatewayAllocator gatewayAllocator + ) { + Metadata.Builder metadataBuilder = new Metadata.Builder(metadata); + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + IndexMetadata indexMetadata = metadata.index("test"); + IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(indexMetadata); + initializePrimaryAndMarkInSync(indexMetadata.getIndex(), indexMetadataBuilder, gatewayAllocator, primaryNode); + routingTableBuilder.addAsRecovery(indexMetadata); + metadataBuilder.put(indexMetadata, false); + RoutingTable routingTable = routingTableBuilder.build(); + return ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .nodes(DiscoveryNodes.builder().add(primaryNode)) + .metadata(metadataBuilder.build()) + .routingTable(routingTable) + .build(); + } + + private void initializePrimaryAndMarkInSync( + Index index, + IndexMetadata.Builder indexMetadata, + TestGatewayAllocator gatewayAllocator, + DiscoveryNode primaryNode + ) { + final ShardRouting unassigned = ShardRouting.newUnassigned( + new ShardId(index, 0), + true, + RecoverySource.EmptyStoreRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "test") + ); + ShardRouting started = ShardRoutingHelper.moveToStarted(ShardRoutingHelper.initialize(unassigned, primaryNode.getId())); + indexMetadata.putInSyncAllocationIds(0, Collections.singleton(started.allocationId().getId())); + gatewayAllocator.addKnownAllocation(started); + } + + private static DiscoveryNode newRemoteNode(String name) { + return newNode( + name, + name, + Map.of( + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, + "cluster-repo", + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, + "segment-repo", + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, + "translog-repo" + ) + ); + } } diff --git a/server/src/test/java/org/opensearch/common/time/DateUtilsTests.java b/server/src/test/java/org/opensearch/common/time/DateUtilsTests.java index 98a79f3ca38dc..cb691f2177f6d 100644 --- a/server/src/test/java/org/opensearch/common/time/DateUtilsTests.java +++ b/server/src/test/java/org/opensearch/common/time/DateUtilsTests.java @@ -260,4 +260,21 @@ public void testRoundYear() { long startOf1996 = Year.of(1996).atDay(1).atStartOfDay().toInstant(ZoneOffset.UTC).toEpochMilli(); assertThat(DateUtils.roundYear(endOf1996), is(startOf1996)); } + + public void testClampToMillisRange() { + Instant normalInstant = Instant.now(); + assertEquals(normalInstant, DateUtils.clampToMillisRange(normalInstant)); + + Instant beforeMinInstant = DateUtils.INSTANT_LONG_MIN_VALUE.minusMillis(1); + assertEquals(DateUtils.INSTANT_LONG_MIN_VALUE, DateUtils.clampToMillisRange(beforeMinInstant)); + + Instant afterMaxInstant = DateUtils.INSTANT_LONG_MAX_VALUE.plusMillis(1); + assertEquals(DateUtils.INSTANT_LONG_MAX_VALUE, DateUtils.clampToMillisRange(afterMaxInstant)); + + assertEquals(DateUtils.INSTANT_LONG_MIN_VALUE, DateUtils.clampToMillisRange(DateUtils.INSTANT_LONG_MIN_VALUE)); + + assertEquals(DateUtils.INSTANT_LONG_MAX_VALUE, DateUtils.clampToMillisRange(DateUtils.INSTANT_LONG_MAX_VALUE)); + + assertThrows(NullPointerException.class, () -> DateUtils.clampToMillisRange(null)); + } } diff --git a/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java b/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java index dd2fb51151a5b..d85ed10eeeae7 100644 --- a/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java +++ b/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java @@ -44,6 +44,7 @@ import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; @@ -489,4 +490,146 @@ public void testHideStateIfNotRecovered() { assertFalse(hiddenState.blocks().hasIndexBlock(indexMetadata.getIndex().getName(), IndexMetadata.INDEX_READ_ONLY_BLOCK)); } + public void testRemoteRestoreWithSearchOnlyShards() { + final int numOfShards = 10; + final int numAssignedSearchReplicas = 5; + final int numOfSearchReplicas = 1; + + final IndexMetadata remoteMetadata = createIndexMetadata( + "test-remote", + Settings.builder() + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numOfShards) + .put(IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS, numOfSearchReplicas) + .build() + ); + // create an initial routing table where all search replicas exist and are assigned, they should get included as is in the restored + // routing. + final Index index = remoteMetadata.getIndex(); + + Map routingTable = new HashMap<>(); + for (int shardNumber = 0; shardNumber < remoteMetadata.getNumberOfShards(); shardNumber++) { + ShardId shardId = new ShardId(index, shardNumber); + final String nodeId = "node " + shardNumber; + IndexShardRoutingTable.Builder builder = new IndexShardRoutingTable.Builder( + new ShardId(remoteMetadata.getIndex(), shardId.id()) + ); + // add a search replica for the shard + ShardRouting searchReplicaRouting = ShardRouting.newUnassigned( + shardId, + false, + true, + RecoverySource.EmptyStoreRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "test") + ); + if (shardNumber < numAssignedSearchReplicas) { + // first five shards add the SR as assigned + builder.addShard(searchReplicaRouting.initialize(nodeId, null, 0L)); + } else { + builder.addShard(searchReplicaRouting); + } + routingTable.put(shardId, builder.build()); + } + IndexRoutingTable.Builder routingTableAfterRestore = new IndexRoutingTable.Builder(remoteMetadata.getIndex()) + .initializeAsRemoteStoreRestore( + remoteMetadata, + new RecoverySource.RemoteStoreRecoverySource( + UUIDs.randomBase64UUID(), + remoteMetadata.getCreationVersion(), + new IndexId(remoteMetadata.getIndex().getName(), remoteMetadata.getIndexUUID()) + ), + routingTable, + true + ); + for (IndexShardRoutingTable indexShardRoutingTable : routingTableAfterRestore.build()) { + assertEquals(numOfSearchReplicas, indexShardRoutingTable.searchOnlyReplicas().size()); + for (ShardRouting shardRouting : indexShardRoutingTable.searchOnlyReplicas()) { + if (shardRouting.shardId().getId() < numAssignedSearchReplicas) { + assertTrue(shardRouting.assignedToNode()); + assertTrue(containsSameRouting(routingTable.get(indexShardRoutingTable.getShardId()), shardRouting)); + } else { + assertTrue(shardRouting.unassigned()); + assertFalse(containsSameRouting(routingTable.get(indexShardRoutingTable.getShardId()), shardRouting)); + } + } + } + } + + private boolean containsSameRouting(IndexShardRoutingTable oldRoutingTable, ShardRouting shardRouting) { + return oldRoutingTable.searchOnlyReplicas().stream().anyMatch(r -> r.isSameAllocation(shardRouting)); + } + + public void testRemoteRestoreWithActivePrimaryAndSearchOnlyShards() { + final int numOfShards = 10; + final int numAssignedSearchReplicas = 5; + final int numOfSearchReplicas = 1; + + final IndexMetadata remoteMetadata = createIndexMetadata( + "test-remote", + Settings.builder() + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numOfShards) + .put(IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS, numOfSearchReplicas) + .build() + ); + // create an initial routing table where all search replicas exist and are assigned, they should get included as is in the restored + // routing. + final Index index = remoteMetadata.getIndex(); + + Map routingTable = new HashMap<>(); + for (int shardNumber = 0; shardNumber < remoteMetadata.getNumberOfShards(); shardNumber++) { + ShardId shardId = new ShardId(index, shardNumber); + final String nodeId = "node " + shardNumber; + IndexShardRoutingTable.Builder builder = new IndexShardRoutingTable.Builder( + new ShardId(remoteMetadata.getIndex(), shardId.id()) + ); + // add the primary as assigned + ShardRouting primary = ShardRouting.newUnassigned( + shardId, + true, + RecoverySource.EmptyStoreRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "test") + ); + builder.addShard(primary.initialize(nodeId + " Primary", null, 0L)); + + // add a search replica for the shard + ShardRouting searchReplicaRouting = ShardRouting.newUnassigned( + shardId, + false, + true, + RecoverySource.EmptyStoreRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "test") + ); + if (shardNumber < numAssignedSearchReplicas) { + // first five shards add the SR as assigned + builder.addShard(searchReplicaRouting.initialize(nodeId, null, 0L)); + } else { + builder.addShard(searchReplicaRouting); + } + routingTable.put(shardId, builder.build()); + } + IndexRoutingTable.Builder routingTableAfterRestore = new IndexRoutingTable.Builder(remoteMetadata.getIndex()) + .initializeAsRemoteStoreRestore( + remoteMetadata, + new RecoverySource.RemoteStoreRecoverySource( + UUIDs.randomBase64UUID(), + remoteMetadata.getCreationVersion(), + new IndexId(remoteMetadata.getIndex().getName(), remoteMetadata.getIndexUUID()) + ), + routingTable, + false + ); + for (IndexShardRoutingTable indexShardRoutingTable : routingTableAfterRestore.build()) { + assertEquals(numOfSearchReplicas, indexShardRoutingTable.searchOnlyReplicas().size()); + for (ShardRouting shardRouting : indexShardRoutingTable.searchOnlyReplicas()) { + if (shardRouting.shardId().getId() < numAssignedSearchReplicas) { + assertTrue(shardRouting.assignedToNode()); + assertTrue(containsSameRouting(routingTable.get(indexShardRoutingTable.getShardId()), shardRouting)); + } else { + assertTrue(shardRouting.unassigned()); + assertFalse(containsSameRouting(routingTable.get(indexShardRoutingTable.getShardId()), shardRouting)); + } + } + } + } } diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java index be07aa0d05e9f..e3684178a18ea 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -2354,6 +2354,14 @@ public void testReadLatestClusterStateFromCache() throws IOException { .getState(clusterState.getClusterName().value(), expectedManifest); assertEquals(stateFromCache.getMetadata(), state.getMetadata()); + ClusterState stateFromCache2 = remoteClusterStateService.getClusterStateForManifest( + clusterState.getClusterName().value(), + expectedManifest, + "nodeA", + true + ); + assertEquals(stateFromCache2.getMetadata(), state.getMetadata()); + final ClusterMetadataManifest notExistMetadata = ClusterMetadataManifest.builder() .indices(List.of()) .clusterTerm(1L) diff --git a/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java b/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java index c34f13041cb11..cd6beffa6e195 100644 --- a/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java +++ b/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java @@ -59,6 +59,7 @@ import org.opensearch.test.rest.FakeRestRequest; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.Transport; import org.junit.After; import org.junit.Before; @@ -70,8 +71,6 @@ import static java.net.InetAddress.getByName; import static java.util.Arrays.asList; -import static org.opensearch.http.AbstractHttpServerTransport.resolvePublishPort; -import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; public class AbstractHttpServerTransportTests extends OpenSearchTestCase { @@ -101,47 +100,40 @@ public void testHttpPublishPort() throws Exception { int boundPort = randomIntBetween(9000, 9100); int otherBoundPort = randomIntBetween(9200, 9300); - int publishPort = resolvePublishPort( - Settings.builder().put(HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT.getKey(), 9080).build(), - randomAddresses(), - getByName("127.0.0.2") - ); + int publishPort = Transport.resolveTransportPublishPort(9080, randomAddresses(), getByName("127.0.0.2")); assertThat("Publish port should be explicitly set to 9080", publishPort, equalTo(9080)); - publishPort = resolvePublishPort( - Settings.EMPTY, + publishPort = Transport.resolveTransportPublishPort( + -1, asList(address("127.0.0.1", boundPort), address("127.0.0.2", otherBoundPort)), getByName("127.0.0.1") ); assertThat("Publish port should be derived from matched address", publishPort, equalTo(boundPort)); - publishPort = resolvePublishPort( - Settings.EMPTY, + publishPort = Transport.resolveTransportPublishPort( + -1, asList(address("127.0.0.1", boundPort), address("127.0.0.2", boundPort)), getByName("127.0.0.3") ); assertThat("Publish port should be derived from unique port of bound addresses", publishPort, equalTo(boundPort)); - final BindHttpException e = expectThrows( - BindHttpException.class, - () -> resolvePublishPort( - Settings.EMPTY, - asList(address("127.0.0.1", boundPort), address("127.0.0.2", otherBoundPort)), - getByName("127.0.0.3") - ) + publishPort = Transport.resolveTransportPublishPort( + -1, + asList(address("127.0.0.1", boundPort), address("127.0.0.2", otherBoundPort)), + getByName("127.0.0.3") ); - assertThat(e.getMessage(), containsString("Failed to auto-resolve http publish port")); + assertThat(publishPort, equalTo(-1)); - publishPort = resolvePublishPort( - Settings.EMPTY, + publishPort = Transport.resolveTransportPublishPort( + -1, asList(address("0.0.0.0", boundPort), address("127.0.0.2", otherBoundPort)), getByName("127.0.0.1") ); assertThat("Publish port should be derived from matching wildcard address", publishPort, equalTo(boundPort)); if (NetworkUtils.SUPPORTS_V6) { - publishPort = resolvePublishPort( - Settings.EMPTY, + publishPort = Transport.resolveTransportPublishPort( + -1, asList(address("0.0.0.0", boundPort), address("127.0.0.2", otherBoundPort)), getByName("::1") ); @@ -293,6 +285,8 @@ public HttpStats stats() { + opaqueId + "\\]\\[" + (badRequest ? "BAD_REQUEST" : "OK") + + "\\]\\[" + + (badRequest ? "400" : "200") + "\\]\\[null\\]\\[0\\] sent response to \\[.*" ) ); diff --git a/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java index 2aa310ae959d9..6d095bf3b61b7 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java @@ -156,7 +156,6 @@ public void testIgnoreMalformedLegacy() throws IOException { "failed to parse date field [2016-03-99] with format [strict_date_optional_time||epoch_millis]" ); testIgnoreMalformedForValue("-2147483648", "Invalid value for Year (valid values -999999999 - 999999999): -2147483648"); - testIgnoreMalformedForValue("-522000000", "long overflow"); } public void testIgnoreMalformed() throws IOException { @@ -170,7 +169,6 @@ public void testIgnoreMalformed() throws IOException { "failed to parse date field [2016-03-99] with format [strict_date_time_no_millis||strict_date_optional_time||epoch_millis]" ); testIgnoreMalformedForValue("-2147483648", "Invalid value for Year (valid values -999999999 - 999999999): -2147483648"); - testIgnoreMalformedForValue("-522000000", "long overflow"); } private void testIgnoreMalformedForValue(String value, String expectedCause) throws IOException { diff --git a/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java index fe5994011f1b9..297c0e3e356dd 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java @@ -31,20 +31,32 @@ package org.opensearch.index.mapper; +import org.apache.lucene.document.Field; import org.apache.lucene.document.LongPoint; import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.document.StoredField; +import org.apache.lucene.document.StringField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.MultiReader; import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.IndexSortSortedNumericDocValuesRangeQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; @@ -73,7 +85,10 @@ import java.io.IOException; import java.time.Instant; import java.time.ZoneOffset; +import java.util.Arrays; import java.util.Collections; +import java.util.List; +import java.util.Locale; import static org.hamcrest.CoreMatchers.is; import static org.apache.lucene.document.LongPoint.pack; @@ -495,4 +510,187 @@ public void testParseSourceValueNanos() throws IOException { MappedFieldType nullValueMapper = fieldType(Resolution.NANOSECONDS, "strict_date_time||epoch_millis", nullValueDate); assertEquals(Collections.singletonList(nullValueDate), fetchSourceValue(nullValueMapper, null)); } + + public void testDateResolutionForOverflow() throws IOException { + Directory dir = newDirectory(); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null)); + + DateFieldType ft = new DateFieldType( + "test_date", + true, + true, + true, + DateFormatter.forPattern("yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis||strict_date_optional_time"), + Resolution.MILLISECONDS, + null, + Collections.emptyMap() + ); + + List dates = Arrays.asList( + null, + "2020-01-01T00:00:00Z", + null, + "2021-01-01T00:00:00Z", + "+292278994-08-17T07:12:55.807Z", + null, + "-292275055-05-16T16:47:04.192Z" + ); + + int numNullDates = 0; + long minDateValue = Long.MAX_VALUE; + long maxDateValue = Long.MIN_VALUE; + + for (int i = 0; i < dates.size(); i++) { + ParseContext.Document doc = new ParseContext.Document(); + String dateStr = dates.get(i); + + if (dateStr != null) { + long timestamp = Resolution.MILLISECONDS.convert(DateFormatters.from(ft.dateTimeFormatter().parse(dateStr)).toInstant()); + doc.add(new LongPoint(ft.name(), timestamp)); + doc.add(new SortedNumericDocValuesField(ft.name(), timestamp)); + doc.add(new StoredField(ft.name(), timestamp)); + doc.add(new StoredField("id", i)); + minDateValue = Math.min(minDateValue, timestamp); + maxDateValue = Math.max(maxDateValue, timestamp); + } else { + numNullDates++; + doc.add(new StoredField("id", i)); + } + w.addDocument(doc); + } + + DirectoryReader reader = DirectoryReader.open(w); + IndexSearcher searcher = new IndexSearcher(reader); + + Settings indexSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .build(); + QueryShardContext context = new QueryShardContext( + 0, + new IndexSettings(IndexMetadata.builder("foo").settings(indexSettings).build(), indexSettings), + BigArrays.NON_RECYCLING_INSTANCE, + null, + null, + null, + null, + null, + xContentRegistry(), + writableRegistry(), + null, + null, + () -> nowInMillis, + null, + null, + () -> true, + null + ); + + Query rangeQuery = ft.rangeQuery( + "-292275055-05-16T16:47:04.192Z", + "+292278994-08-17T07:12:55.807Z", + true, + true, + null, + null, + null, + context + ); + + TopDocs topDocs = searcher.search(rangeQuery, dates.size()); + assertEquals("Number of non-null date documents", dates.size() - numNullDates, topDocs.totalHits.value); + + for (ScoreDoc scoreDoc : topDocs.scoreDocs) { + org.apache.lucene.document.Document doc = reader.document(scoreDoc.doc); + IndexableField dateField = doc.getField(ft.name()); + if (dateField != null) { + long dateValue = dateField.numericValue().longValue(); + assertTrue( + "Date value " + dateValue + " should be within valid range", + dateValue >= minDateValue && dateValue <= maxDateValue + ); + } + } + + DateFieldType ftWithNullValue = new DateFieldType( + "test_date", + true, + true, + true, + DateFormatter.forPattern("yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis||strict_date_optional_time"), + Resolution.MILLISECONDS, + "2020-01-01T00:00:00Z", + Collections.emptyMap() + ); + + Query nullValueQuery = ftWithNullValue.termQuery("2020-01-01T00:00:00Z", context); + topDocs = searcher.search(nullValueQuery, dates.size()); + assertEquals("Documents matching the 2020-01-01 date", 1, topDocs.totalHits.value); + + IOUtils.close(reader, w, dir); + } + + public void testDateFieldTypeWithNulls() throws IOException { + DateFieldType ft = new DateFieldType( + "domainAttributes.dueDate", + true, + true, + true, + DateFormatter.forPattern("yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis||date_optional_time"), + Resolution.MILLISECONDS, + null, + Collections.emptyMap() + ); + + Directory dir = newDirectory(); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null)); + + int nullDocs = 3500; + int datedDocs = 50; + + for (int i = 0; i < nullDocs; i++) { + ParseContext.Document doc = new ParseContext.Document(); + doc.add(new StringField("domainAttributes.firmId", "12345678910111213", Field.Store.YES)); + w.addDocument(doc); + } + + for (int i = 1; i <= datedDocs; i++) { + ParseContext.Document doc = new ParseContext.Document(); + String dateStr = String.format(Locale.ROOT, "2022-03-%02dT15:40:58.324", (i % 30) + 1); + long timestamp = Resolution.MILLISECONDS.convert(DateFormatters.from(ft.dateTimeFormatter().parse(dateStr)).toInstant()); + doc.add(new StringField("domainAttributes.firmId", "12345678910111213", Field.Store.YES)); + doc.add(new LongPoint(ft.name(), timestamp)); + doc.add(new SortedNumericDocValuesField(ft.name(), timestamp)); + doc.add(new StoredField(ft.name(), timestamp)); + w.addDocument(doc); + } + + DirectoryReader reader = DirectoryReader.open(w); + IndexSearcher searcher = new IndexSearcher(reader); + + BooleanQuery.Builder queryBuilder = new BooleanQuery.Builder(); + queryBuilder.add(new TermQuery(new Term("domainAttributes.firmId", "12345678910111213")), BooleanClause.Occur.MUST); + + Sort sort = new Sort(new SortField(ft.name(), SortField.Type.DOC, false)); + + for (int i = 0; i < 100; i++) { + TopDocs topDocs = searcher.search(queryBuilder.build(), nullDocs + datedDocs, sort); + assertEquals("Total hits should match total documents", nullDocs + datedDocs, topDocs.totalHits.value); + for (ScoreDoc scoreDoc : topDocs.scoreDocs) { + org.apache.lucene.document.Document doc = reader.document(scoreDoc.doc); + IndexableField dateField = doc.getField(ft.name()); + if (dateField != null) { + long dateValue = dateField.numericValue().longValue(); + Instant dateInstant = Instant.ofEpochMilli(dateValue); + assertTrue( + "Date should be in March 2022", + dateInstant.isAfter(Instant.parse("2022-03-01T00:00:00Z")) + && dateInstant.isBefore(Instant.parse("2022-04-01T00:00:00Z")) + ); + } + } + } + IOUtils.close(reader, w, dir); + } } diff --git a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldTypeTests.java index 38a6f13777f00..4160108342534 100644 --- a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldTypeTests.java @@ -9,6 +9,7 @@ package org.opensearch.index.mapper; import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.Term; import org.apache.lucene.search.FieldExistsQuery; @@ -24,6 +25,7 @@ import org.apache.lucene.search.WildcardQuery; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.automaton.Operations; +import org.opensearch.common.lucene.search.AutomatonQueries; import org.opensearch.common.unit.Fuzziness; import org.opensearch.index.analysis.AnalyzerScope; import org.opensearch.index.analysis.NamedAnalyzer; @@ -138,39 +140,273 @@ public void testRewriteValue() { assertEquals("field.bar=foo", searchValuesDocPath); } - public void testTermQuery() { + public void testTermQueryCaseInsensitive() { - FlatObjectFieldMapper.FlatObjectFieldType flatParentFieldType = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + // 1.test isSearchable=true, hasDocValues=true, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType flatParentFieldType = + (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType("field", null, true, true); + + MappedFieldType dynamicMappedFieldType = new FlatObjectFieldMapper.FlatObjectFieldType( + "field.bar", + flatParentFieldType.name(), + flatParentFieldType.getValueFieldType(), + flatParentFieldType.getValueAndPathFieldType() + ); + assertEquals( + AutomatonQueries.caseInsensitiveTermQuery(new Term("field._valueAndPath", "field.bar=fOo")), + dynamicMappedFieldType.termQueryCaseInsensitive("fOo", null) + ); + } + + // 2.test isSearchable=true, hasDocValues=false, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + true, + false + ); + assertEquals( + AutomatonQueries.caseInsensitiveTermQuery(new Term("field._value", "fOo")), + ft.termQueryCaseInsensitive("fOo", null) + ); + } + + // test isSearchable=true, hasDocValues=false, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + true, + false + ); + Query expected = new TermQuery(new Term("field" + VALUE_AND_PATH_SUFFIX, new BytesRef("fOo"))); + + assertEquals(expected, ft.termQuery("fOo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // 3.test isSearchable=false, hasDocValues=true, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + false, + true + ); + Query expected = AutomatonQueries.createAutomatonQuery( + new Term("field" + VALUE_SUFFIX, "field.fOo"), + AutomatonQueries.toCaseInsensitiveString("field.fOo", Integer.MAX_VALUE), + MultiTermQuery.DOC_VALUES_REWRITE + ); + assertEquals(expected, ft.termQueryCaseInsensitive("fOo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // test isSearchable=false, hasDocValues=true, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + false, + true + ); + Query expected = AutomatonQueries.createAutomatonQuery( + new Term("field" + VALUE_AND_PATH_SUFFIX, "field.fOo"), + AutomatonQueries.toCaseInsensitiveString("field.fOo", Integer.MAX_VALUE), + MultiTermQuery.DOC_VALUES_REWRITE + ); + + assertEquals(expected, ft.termQueryCaseInsensitive("fOo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // 4.test isSearchable=false, hasDocValues=false, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + false, + false + ); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> ft.termQueryCaseInsensitive("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES) + ); + assertEquals( + "Cannot search on field [field._value] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); + } + + // test isSearchable=false, hasDocValues=false, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + false, + false + ); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES) + ); + assertEquals( + "Cannot search on field [field._valueAndPath] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); + } + + MappedFieldType unsearchable = new FlatObjectFieldMapper.FlatObjectFieldType( "field", null, - true, - true + false, + false, + null, + Collections.emptyMap() + ); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> unsearchable.termQuery("bar", MOCK_QSC_ENABLE_INDEX_DOC_VALUES) ); + assertEquals( + "Cannot search on field [field._value] since it is both not indexed, and does not have doc_values enabled.", + e.getMessage() + ); + } - // when searching for "foo" in "field", the term query is directed to search "foo" in field._value field - String searchFieldName = (flatParentFieldType).directSubfield(); - String searchValues = (flatParentFieldType).rewriteValue("foo"); - assertEquals("foo", searchValues); - assertEquals(new TermQuery(new Term(searchFieldName, searchValues)), flatParentFieldType.termQuery(searchValues, null)); + public void testTermQuery() { - MappedFieldType dynamicMappedFieldType = new FlatObjectFieldMapper.FlatObjectFieldType( - "field.bar", - flatParentFieldType.name(), - flatParentFieldType.getValueFieldType(), - flatParentFieldType.getValueAndPathFieldType() - ); + // 1.test isSearchable=true, hasDocValues=true, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType flatParentFieldType = + (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType("field", null, true, true); - // when searching for "foo" in "field.bar", the term query is directed to search in field._valueAndPath field - String searchFieldNameDocPath = ((FlatObjectFieldMapper.FlatObjectFieldType) dynamicMappedFieldType).directSubfield(); - String searchValuesDocPath = ((FlatObjectFieldMapper.FlatObjectFieldType) dynamicMappedFieldType).rewriteValue("foo"); - assertEquals("field.bar=foo", searchValuesDocPath); - assertEquals(new TermQuery(new Term(searchFieldNameDocPath, searchValuesDocPath)), dynamicMappedFieldType.termQuery("foo", null)); + // when searching for "foo" in "field", the term query is directed to search "foo" in field._value field + String searchFieldName = (flatParentFieldType).directSubfield(); + String searchValues = (flatParentFieldType).rewriteValue("foo"); + assertEquals("foo", searchValues); + assertEquals(new TermQuery(new Term(searchFieldName, searchValues)), flatParentFieldType.termQuery(searchValues, null)); + + MappedFieldType dynamicMappedFieldType = new FlatObjectFieldMapper.FlatObjectFieldType( + "field.bar", + flatParentFieldType.name(), + flatParentFieldType.getValueFieldType(), + flatParentFieldType.getValueAndPathFieldType() + ); + + // when searching for "foo" in "field.bar", the term query is directed to search in field._valueAndPath field + String searchFieldNameDocPath = ((FlatObjectFieldMapper.FlatObjectFieldType) dynamicMappedFieldType).directSubfield(); + String searchValuesDocPath = ((FlatObjectFieldMapper.FlatObjectFieldType) dynamicMappedFieldType).rewriteValue("foo"); + assertEquals("field.bar=foo", searchValuesDocPath); + assertEquals( + new TermQuery(new Term(searchFieldNameDocPath, searchValuesDocPath)), + dynamicMappedFieldType.termQuery("foo", null) + ); + + } + + // 2.test isSearchable=true, hasDocValues=false, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + true, + false + ); + Query expected = new TermQuery(new Term("field" + VALUE_SUFFIX, new BytesRef("foo"))); + assertEquals(expected, ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // test isSearchable=true, hasDocValues=false, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + true, + false + ); + Query expected = new TermQuery(new Term("field" + VALUE_AND_PATH_SUFFIX, new BytesRef("foo"))); + + assertEquals(expected, ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // 3.test isSearchable=false, hasDocValues=true, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + false, + true + ); + Query expected = SortedSetDocValuesField.newSlowRangeQuery( + "field" + VALUE_SUFFIX, + new BytesRef("field.foo"), + new BytesRef("field.foo"), + true, + true + ); + assertEquals(expected, ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + + } + + // test isSearchable=false, hasDocValues=true, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + false, + true + ); + Query expected = SortedSetDocValuesField.newSlowRangeQuery( + "field" + VALUE_AND_PATH_SUFFIX, + new BytesRef("field.foo"), + new BytesRef("field.foo"), + true, + true + ); + assertEquals(expected, ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // 4.test isSearchable=false, hasDocValues=false, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + false, + false + ); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES) + ); + assertEquals( + "Cannot search on field [field._value] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); + } + + // test isSearchable=false, hasDocValues=false, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + false, + false + ); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES) + ); + assertEquals( + "Cannot search on field [field._valueAndPath] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); + } MappedFieldType unsearchable = new FlatObjectFieldMapper.FlatObjectFieldType( "field", null, false, - true, + false, null, Collections.emptyMap() ); @@ -178,7 +414,10 @@ public void testTermQuery() { IllegalArgumentException.class, () -> unsearchable.termQuery("bar", MOCK_QSC_ENABLE_INDEX_DOC_VALUES) ); - assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); + assertEquals( + "Cannot search on field [field._value] since it is both not indexed, and does not have doc_values enabled.", + e.getMessage() + ); } public void testExistsQuery() { diff --git a/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java index f291b864beb59..d52426c67d256 100644 --- a/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java @@ -41,6 +41,7 @@ import org.apache.lucene.analysis.core.WhitespaceTokenizer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.Term; import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.FuzzyQuery; @@ -60,6 +61,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.lucene.BytesRefs; import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.lucene.search.AutomatonQueries; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.Fuzziness; import org.opensearch.index.analysis.AnalyzerScope; @@ -100,13 +102,52 @@ public void testIsFieldWithinQuery() throws IOException { ); } + public void testTermQueryCaseInsensitive() { + MappedFieldType ft = new KeywordFieldType("field"); + Query expected = AutomatonQueries.caseInsensitiveTermQuery(new Term("field", BytesRefs.toBytesRef("foo"))); + assertEquals(expected, ft.termQueryCaseInsensitive("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + + ft = new KeywordFieldType("field", true, false, Collections.emptyMap()); + assertEquals(expected, ft.termQueryCaseInsensitive("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + + ft = new KeywordFieldType("field", false, true, Collections.emptyMap()); + Term term = new Term("field", "foo"); + + expected = AutomatonQueries.createAutomatonQuery( + term, + AutomatonQueries.toCaseInsensitiveString("foo", Integer.MAX_VALUE), + MultiTermQuery.DOC_VALUES_REWRITE + ); + assertEquals(expected, ft.termQueryCaseInsensitive("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + + MappedFieldType unsearchable = new KeywordFieldType("field", false, false, Collections.emptyMap()); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> unsearchable.termQueryCaseInsensitive("foo", null)); + assertEquals( + "Cannot search on field [field] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); + } + public void testTermQuery() { MappedFieldType ft = new KeywordFieldType("field"); - assertEquals(new TermQuery(new Term("field", "foo")), ft.termQuery("foo", null)); + assertEquals(new TermQuery(new Term("field", "foo")), ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + + ft = new KeywordFieldType("field", true, false, Collections.emptyMap()); + assertEquals(new TermQuery(new Term("field", "foo")), ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); - MappedFieldType unsearchable = new KeywordFieldType("field", false, true, Collections.emptyMap()); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> unsearchable.termQuery("bar", null)); - assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); + ft = new KeywordFieldType("field", false, true, Collections.emptyMap()); + Query expected = SortedSetDocValuesField.newSlowRangeQuery("field", new BytesRef("foo"), new BytesRef("foo"), true, true); + assertEquals(expected, ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + + MappedFieldType unsearchable = new KeywordFieldType("field", false, false, Collections.emptyMap()); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> unsearchable.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES) + ); + assertEquals( + "Cannot search on field [field] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); } public void testTermQueryWithNormalizer() { diff --git a/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapperTests.java index 580f8cccc9af5..d9f0fd6657085 100644 --- a/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapperTests.java @@ -15,11 +15,13 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.MultiPhraseQuery; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.tests.analysis.MockSynonymAnalyzer; +import org.opensearch.common.lucene.search.AutomatonQueries; import org.opensearch.common.lucene.search.MultiPhrasePrefixQuery; import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.MediaTypeRegistry; @@ -28,6 +30,7 @@ import org.opensearch.index.query.MatchPhraseQueryBuilder; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.query.SourceFieldMatchQuery; +import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.index.search.MatchQuery; import org.junit.Before; @@ -391,7 +394,7 @@ public void testPhraseQuery() throws IOException { assertThat(q, is(expectedQuery)); Query q4 = new MatchPhraseQueryBuilder("field", "singleton").toQuery(queryShardContext); - assertThat(q4, is(new TermQuery(new Term("field", "singleton")))); + assertThat(q4, is(new ConstantScoreQuery(new TermQuery(new Term("field", "singleton"))))); Query q2 = new MatchPhraseQueryBuilder("field", "three words here").toQuery(queryShardContext); expectedQuery = new SourceFieldMatchQuery( @@ -447,4 +450,22 @@ public void testPhraseQuery() throws IOException { ); assertThat(q6, is(expectedQuery)); } + + public void testTermQuery() throws Exception { + MapperService mapperService = createMapperService(mapping(b -> { + b.startObject("field"); + { + b.field("type", textFieldName); + b.field("analyzer", "my_stop_analyzer"); // "standard" will be replaced with MockSynonymAnalyzer + } + b.endObject(); + })); + QueryShardContext queryShardContext = createQueryShardContext(mapperService); + + Query q = new TermQueryBuilder("field", "foo").rewrite(queryShardContext).toQuery(queryShardContext); + assertEquals(new ConstantScoreQuery(new TermQuery(new Term("field", "foo"))), q); + + q = new TermQueryBuilder("field", "foo").caseInsensitive(true).rewrite(queryShardContext).toQuery(queryShardContext); + assertEquals(new ConstantScoreQuery(AutomatonQueries.caseInsensitiveTermQuery(new Term("field", "foo"))), q); + } } diff --git a/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldTypeTests.java index 51234fa04ddc2..0170cdde8b21c 100644 --- a/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldTypeTests.java @@ -8,7 +8,11 @@ package org.opensearch.index.mapper; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.ConstantScoreQuery; +import org.apache.lucene.search.TermQuery; import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.lucene.search.AutomatonQueries; public class MatchOnlyTextFieldTypeTests extends TextFieldTypeTests { @@ -28,4 +32,18 @@ TextFieldMapper.TextFieldType createFieldType(boolean searchable) { ParametrizedFieldMapper.Parameter.metaParam().get() ); } + + @Override + public void testTermQuery() { + MappedFieldType ft = createFieldType(true); + assertEquals(new ConstantScoreQuery(new TermQuery(new Term("field", "foo"))), ft.termQuery("foo", null)); + assertEquals( + new ConstantScoreQuery(AutomatonQueries.caseInsensitiveTermQuery(new Term("field", "fOo"))), + ft.termQueryCaseInsensitive("fOo", null) + ); + + MappedFieldType unsearchable = createFieldType(false); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> unsearchable.termQuery("bar", null)); + assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); + } } diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index 19569e1a19284..0268fafdfd246 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -2949,6 +2949,52 @@ public void testRestoreShardFromRemoteStore(boolean performFlush) throws IOExcep closeShards(target); } + public void testRestoreSearchOnlyShardFromStore() throws IOException { + // this test indexes docs on a primary, refreshes, then recovers a new Search Replica and asserts + // all docs are present + String remoteStorePath = createTempDir().toString(); + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, remoteStorePath + "__test") + .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, remoteStorePath + "__test") + .build(); + IndexShard primary = newStartedShard(true, settings, new InternalEngineFactory()); + indexDoc(primary, "_doc", "1"); + indexDoc(primary, "_doc", "2"); + primary.refresh("test"); + assertDocs(primary, "1", "2"); + + ShardRouting searchReplicaShardRouting = TestShardRouting.newShardRouting( + primary.shardId, + randomAlphaOfLength(10), + false, + true, + ShardRoutingState.INITIALIZING, + RecoverySource.EmptyStoreRecoverySource.INSTANCE + ); + IndexShard replica = newShard(searchReplicaShardRouting, settings, new NRTReplicationEngineFactory()); + recoverShardFromStore(replica); + searchReplicaShardRouting = replica.routingEntry(); + assertDocs(replica, "1", "2"); + assertEquals( + primary.getLatestReplicationCheckpoint().getSegmentInfosVersion(), + replica.getLatestReplicationCheckpoint().getSegmentInfosVersion() + ); + + // move to unassigned while the replica is active, then reinit from existing store. + searchReplicaShardRouting = ShardRoutingHelper.moveToUnassigned( + searchReplicaShardRouting, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "because I say so") + ); + searchReplicaShardRouting = ShardRoutingHelper.initialize(searchReplicaShardRouting, replica.routingEntry().currentNodeId()); + assertEquals(RecoverySource.ExistingStoreRecoverySource.INSTANCE, searchReplicaShardRouting.recoverySource()); + replica = reinitShard(replica, searchReplicaShardRouting); + recoverShardFromStore(replica); + assertDocs(replica, "1", "2"); + closeShards(primary, replica); + } + public void testReaderWrapperIsUsed() throws IOException { IndexShard shard = newStartedShard(true); indexDoc(shard, "_doc", "0", "{\"foo\" : \"bar\"}"); diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java index 57a561bc8f2a3..4d85a3c491af8 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java @@ -12,6 +12,9 @@ import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.util.Version; import org.opensearch.action.StepListener; +import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.settings.Settings; @@ -20,6 +23,7 @@ import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.InternalEngine; import org.opensearch.index.engine.NRTReplicationEngineFactory; +import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.replication.CheckpointInfoResponse; @@ -32,6 +36,11 @@ import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.ReplicationFailedException; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.snapshots.Snapshot; +import org.opensearch.snapshots.SnapshotId; +import org.opensearch.snapshots.SnapshotShardsService; import org.opensearch.test.CorruptionUtils; import org.opensearch.test.junit.annotations.TestLogging; import org.hamcrest.MatcherAssert; @@ -41,6 +50,7 @@ import java.nio.channels.FileChannel; import java.nio.file.Path; import java.nio.file.StandardOpenOption; +import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; import java.util.List; @@ -55,6 +65,8 @@ import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -541,6 +553,81 @@ public void onReplicationFailure( } } + public void testShallowCopySnapshotForClosedIndexSuccessful() throws Exception { + try (ReplicationGroup shards = createGroup(0, settings)) { + final IndexShard primaryShard = shards.getPrimary(); + shards.startAll(); + shards.indexDocs(10); + shards.refresh("test"); + shards.flush(); + shards.assertAllEqual(10); + + RepositoriesService repositoriesService = createRepositoriesService(); + BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository("random"); + + doAnswer(invocation -> { + IndexShardSnapshotStatus snapshotStatus = invocation.getArgument(5); + long commitGeneration = invocation.getArgument(7); + long startTime = invocation.getArgument(8); + final Map indexFilesToFileLengthMap = invocation.getArgument(9); + ActionListener listener = invocation.getArgument(10); + if (indexFilesToFileLengthMap != null) { + List fileNames = new ArrayList<>(indexFilesToFileLengthMap.keySet()); + long indexTotalFileSize = indexFilesToFileLengthMap.values().stream().mapToLong(Long::longValue).sum(); + int indexTotalNumberOfFiles = fileNames.size(); + snapshotStatus.moveToStarted(startTime, 0, indexTotalNumberOfFiles, 0, indexTotalFileSize); + // Not performing actual snapshot, just modifying the state + snapshotStatus.moveToFinalize(commitGeneration); + snapshotStatus.moveToDone(System.currentTimeMillis(), snapshotStatus.generation()); + listener.onResponse(snapshotStatus.generation()); + return null; + } + listener.onResponse(snapshotStatus.generation()); + return null; + }).when(repository) + .snapshotRemoteStoreIndexShard(any(), any(), any(), any(), any(), any(), anyLong(), anyLong(), anyLong(), any(), any()); + + final SnapshotShardsService shardsService = getSnapshotShardsService( + primaryShard, + shards.getIndexMetadata(), + true, + repositoriesService + ); + final Snapshot snapshot1 = new Snapshot( + randomAlphaOfLength(10), + new SnapshotId(randomAlphaOfLength(5), randomAlphaOfLength(5)) + ); + + // Initialize the shallow copy snapshot + final ClusterState initState = addSnapshotIndex( + clusterService.state(), + snapshot1, + primaryShard, + SnapshotsInProgress.State.INIT, + true + ); + shardsService.clusterChanged(new ClusterChangedEvent("test", initState, clusterService.state())); + + // start the snapshot + shardsService.clusterChanged( + new ClusterChangedEvent( + "test", + addSnapshotIndex(clusterService.state(), snapshot1, primaryShard, SnapshotsInProgress.State.STARTED, true), + initState + ) + ); + + // Check the snapshot got completed successfully + assertBusy(() -> { + final IndexShardSnapshotStatus.Copy copy = shardsService.currentSnapshotShards(snapshot1) + .get(primaryShard.shardId) + .asCopy(); + final IndexShardSnapshotStatus.Stage stage = copy.getStage(); + assertEquals(IndexShardSnapshotStatus.Stage.DONE, stage); + }); + } + } + private RemoteStoreReplicationSource getRemoteStoreReplicationSource(IndexShard shard, Runnable postGetFilesRunnable) { return new RemoteStoreReplicationSource(shard) { @Override diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java index 2311fc582616f..f4f94baabd7b0 100644 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java @@ -68,6 +68,7 @@ import org.opensearch.indices.replication.common.ReplicationState; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.repositories.IndexId; +import org.opensearch.repositories.RepositoriesService; import org.opensearch.snapshots.Snapshot; import org.opensearch.snapshots.SnapshotId; import org.opensearch.snapshots.SnapshotInfoTests; @@ -892,10 +893,21 @@ public void testSnapshotWhileFailoverIncomplete() throws Exception { replicateSegments(primaryShard, shards.getReplicas()); shards.assertAllEqual(10); - final SnapshotShardsService shardsService = getSnapshotShardsService(replicaShard); + final SnapshotShardsService shardsService = getSnapshotShardsService( + replicaShard, + shards.getIndexMetadata(), + false, + createRepositoriesService() + ); final Snapshot snapshot = new Snapshot(randomAlphaOfLength(10), new SnapshotId(randomAlphaOfLength(5), randomAlphaOfLength(5))); - final ClusterState initState = addSnapshotIndex(clusterService.state(), snapshot, replicaShard, SnapshotsInProgress.State.INIT); + final ClusterState initState = addSnapshotIndex( + clusterService.state(), + snapshot, + replicaShard, + SnapshotsInProgress.State.INIT, + false + ); shardsService.clusterChanged(new ClusterChangedEvent("test", initState, clusterService.state())); CountDownLatch latch = new CountDownLatch(1); @@ -907,7 +919,7 @@ public void testSnapshotWhileFailoverIncomplete() throws Exception { shardsService.clusterChanged( new ClusterChangedEvent( "test", - addSnapshotIndex(clusterService.state(), snapshot, replicaShard, SnapshotsInProgress.State.STARTED), + addSnapshotIndex(clusterService.state(), snapshot, replicaShard, SnapshotsInProgress.State.STARTED, false), initState ) ); @@ -956,21 +968,30 @@ public void testComputeReplicationCheckpointNullInfosReturnsEmptyCheckpoint() th } } - private SnapshotShardsService getSnapshotShardsService(IndexShard replicaShard) { + protected SnapshotShardsService getSnapshotShardsService( + IndexShard indexShard, + IndexMetadata indexMetadata, + boolean closedIdx, + RepositoriesService repositoriesService + ) { final TransportService transportService = mock(TransportService.class); when(transportService.getThreadPool()).thenReturn(threadPool); final IndicesService indicesService = mock(IndicesService.class); final IndexService indexService = mock(IndexService.class); when(indicesService.indexServiceSafe(any())).thenReturn(indexService); - when(indexService.getShardOrNull(anyInt())).thenReturn(replicaShard); - return new SnapshotShardsService(settings, clusterService, createRepositoriesService(), transportService, indicesService); + when(indexService.getShardOrNull(anyInt())).thenReturn(indexShard); + when(indexService.getMetadata()).thenReturn( + new IndexMetadata.Builder(indexMetadata).state(closedIdx ? IndexMetadata.State.CLOSE : IndexMetadata.State.OPEN).build() + ); + return new SnapshotShardsService(settings, clusterService, repositoriesService, transportService, indicesService); } - private ClusterState addSnapshotIndex( + protected ClusterState addSnapshotIndex( ClusterState state, Snapshot snapshot, IndexShard shard, - SnapshotsInProgress.State snapshotState + SnapshotsInProgress.State snapshotState, + boolean shallowCopySnapshot ) { final Map shardsBuilder = new HashMap<>(); ShardRouting shardRouting = shard.shardRouting; @@ -991,7 +1012,7 @@ private ClusterState addSnapshotIndex( null, SnapshotInfoTests.randomUserMetadata(), VersionUtils.randomVersion(random()), - false + shallowCopySnapshot ); return ClusterState.builder(state) .putCustom(SnapshotsInProgress.TYPE, SnapshotsInProgress.of(Collections.singletonList(entry))) diff --git a/server/src/test/java/org/opensearch/plugins/PluginInfoTests.java b/server/src/test/java/org/opensearch/plugins/PluginInfoTests.java index 12c7dc870c104..76294d85c64d4 100644 --- a/server/src/test/java/org/opensearch/plugins/PluginInfoTests.java +++ b/server/src/test/java/org/opensearch/plugins/PluginInfoTests.java @@ -44,6 +44,7 @@ import org.opensearch.semver.SemverRange; import org.opensearch.test.OpenSearchTestCase; +import java.io.IOException; import java.nio.ByteBuffer; import java.nio.file.Path; import java.util.ArrayList; @@ -55,6 +56,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; public class PluginInfoTests extends OpenSearchTestCase { @@ -281,6 +283,30 @@ public void testReadFromPropertiesJvmMissingClassname() throws Exception { assertThat(e.getMessage(), containsString("property [classname] is missing")); } + public void testExtendedPluginsSingleOptionalExtension() throws IOException { + Path pluginDir = createTempDir().resolve("fake-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "fake desc", + "name", + "my_plugin", + "version", + "1.0", + "opensearch.version", + Version.CURRENT.toString(), + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin", + "extended.plugins", + "foo;optional=true" + ); + PluginInfo info = PluginInfo.readFromProperties(pluginDir); + assertThat(info.getExtendedPlugins(), contains("foo")); + assertThat(info.isExtendedPluginOptional("foo"), is(true)); + } + public void testExtendedPluginsSingleExtension() throws Exception { Path pluginDir = createTempDir().resolve("fake-plugin"); PluginTestUtil.writePluginProperties( @@ -302,6 +328,7 @@ public void testExtendedPluginsSingleExtension() throws Exception { ); PluginInfo info = PluginInfo.readFromProperties(pluginDir); assertThat(info.getExtendedPlugins(), contains("foo")); + assertThat(info.isExtendedPluginOptional("foo"), is(false)); } public void testExtendedPluginsMultipleExtensions() throws Exception { diff --git a/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java b/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java index 3c40f9190ea8b..46ded8af3ee9a 100644 --- a/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java +++ b/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java @@ -361,7 +361,7 @@ public void testSortBundlesNoDeps() throws Exception { assertThat(sortedBundles, Matchers.contains(bundle1, bundle2, bundle3)); } - public void testSortBundlesMissingDep() throws Exception { + public void testSortBundlesMissingRequiredDep() throws Exception { Path pluginDir = createTempDir(); PluginInfo info = new PluginInfo("foo", "desc", "1.0", Version.CURRENT, "1.8", "MyPlugin", Collections.singletonList("dne"), false); PluginsService.Bundle bundle = new PluginsService.Bundle(info, pluginDir); @@ -372,6 +372,33 @@ public void testSortBundlesMissingDep() throws Exception { assertEquals("Missing plugin [dne], dependency of [foo]", e.getMessage()); } + public void testSortBundlesMissingOptionalDep() throws Exception { + try (MockLogAppender mockLogAppender = MockLogAppender.createForLoggers(LogManager.getLogger(PluginsService.class))) { + mockLogAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "[.test] warning", + "org.opensearch.plugins.PluginsService", + Level.WARN, + "Missing plugin [dne], dependency of [foo]" + ) + ); + Path pluginDir = createTempDir(); + PluginInfo info = new PluginInfo( + "foo", + "desc", + "1.0", + Version.CURRENT, + "1.8", + "MyPlugin", + Collections.singletonList("dne;optional=true"), + false + ); + PluginsService.Bundle bundle = new PluginsService.Bundle(info, pluginDir); + PluginsService.sortBundles(Collections.singleton(bundle)); + mockLogAppender.assertAllExpectationsMatched(); + } + } + public void testSortBundlesCommonDep() throws Exception { Path pluginDir = createTempDir(); Set bundles = new LinkedHashSet<>(); // control iteration order diff --git a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java index 71460d6248c9e..2415b917338e8 100644 --- a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java @@ -779,7 +779,9 @@ public void snapshotRemoteStoreIndexShard( String shardStateIdentifier, IndexShardSnapshotStatus snapshotStatus, long primaryTerm, + long commitGeneration, long startTime, + Map indexFilesToFileLengthMap, ActionListener listener ) { diff --git a/server/src/test/java/org/opensearch/search/MultiValueModeTests.java b/server/src/test/java/org/opensearch/search/MultiValueModeTests.java index 948d2cffceabe..e011dd0bcf6c0 100644 --- a/server/src/test/java/org/opensearch/search/MultiValueModeTests.java +++ b/server/src/test/java/org/opensearch/search/MultiValueModeTests.java @@ -41,6 +41,7 @@ import org.apache.lucene.util.BitSetIterator; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.FixedBitSet; +import org.opensearch.common.Numbers; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.index.fielddata.AbstractBinaryDocValues; @@ -52,9 +53,13 @@ import org.opensearch.index.fielddata.NumericDoubleValues; import org.opensearch.index.fielddata.SortedBinaryDocValues; import org.opensearch.index.fielddata.SortedNumericDoubleValues; +import org.opensearch.index.fielddata.SortedNumericUnsignedLongValues; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.math.RoundingMode; import java.util.Arrays; import static org.hamcrest.Matchers.equalTo; @@ -776,6 +781,96 @@ public int docValueCount() { verifySortedSet(multiValues, numDocs, rootDocs, innerDocs, randomIntBetween(1, numDocs)); } + public void testSingleValuedUnsignedLongs() throws Exception { + final int numDocs = scaledRandomIntBetween(1, 100); + final long[] array = new long[numDocs]; + final FixedBitSet docsWithValue = randomBoolean() ? null : new FixedBitSet(numDocs); + for (int i = 0; i < array.length; ++i) { + if (randomBoolean()) { + array[i] = randomUnsignedLong().longValue(); + if (docsWithValue != null) { + docsWithValue.set(i); + } + } else if (docsWithValue != null && randomBoolean()) { + docsWithValue.set(i); + } + } + + final Supplier multiValues = () -> new SortedNumericUnsignedLongValues() { + int docId = -1; + + @Override + public boolean advanceExact(int target) throws IOException { + this.docId = target; + return docsWithValue == null || docsWithValue.get(docId); + } + + @Override + public int docID() { + return docId; + } + + @Override + public long nextValue() { + return array[docId]; + } + + @Override + public int docValueCount() { + return 1; + } + }; + verifySortedUnsignedLong(multiValues, numDocs); + final FixedBitSet rootDocs = randomRootDocs(numDocs); + final FixedBitSet innerDocs = randomInnerDocs(rootDocs); + verifySortedUnsignedLong(multiValues, numDocs, rootDocs, innerDocs, Integer.MAX_VALUE); + verifySortedUnsignedLong(multiValues, numDocs, rootDocs, innerDocs, randomIntBetween(1, numDocs)); + } + + public void testMultiValuedUnsignedLongs() throws Exception { + final int numDocs = scaledRandomIntBetween(1, 100); + final long[][] array = new long[numDocs][]; + for (int i = 0; i < numDocs; ++i) { + final long[] values = new long[randomInt(4)]; + for (int j = 0; j < values.length; ++j) { + values[j] = randomUnsignedLong().longValue(); + } + Arrays.sort(values); + array[i] = values; + } + final Supplier multiValues = () -> new SortedNumericUnsignedLongValues() { + int doc; + int i; + + @Override + public long nextValue() { + return array[doc][i++]; + } + + @Override + public boolean advanceExact(int doc) { + this.doc = doc; + i = 0; + return array[doc].length > 0; + } + + @Override + public int docValueCount() { + return array[doc].length; + } + + @Override + public int docID() { + return doc; + } + }; + verifySortedUnsignedLong(multiValues, numDocs); + final FixedBitSet rootDocs = randomRootDocs(numDocs); + final FixedBitSet innerDocs = randomInnerDocs(rootDocs); + verifySortedUnsignedLong(multiValues, numDocs, rootDocs, innerDocs, Integer.MAX_VALUE); + verifySortedUnsignedLong(multiValues, numDocs, rootDocs, innerDocs, randomIntBetween(1, numDocs)); + } + private void verifySortedSet(Supplier supplier, int maxDoc) throws IOException { for (MultiValueMode mode : new MultiValueMode[] { MultiValueMode.MIN, MultiValueMode.MAX }) { SortedSetDocValues values = supplier.get(); @@ -857,6 +952,141 @@ private void verifySortedSet( } } + private void verifySortedUnsignedLong(Supplier supplier, int maxDoc) throws IOException { + for (MultiValueMode mode : MultiValueMode.values()) { + SortedNumericUnsignedLongValues values = supplier.get(); + final NumericDocValues selected = mode.select(values); + for (int i = 0; i < maxDoc; ++i) { + Long actual = null; + if (selected.advanceExact(i)) { + actual = selected.longValue(); + verifyLongValueCanCalledMoreThanOnce(selected, actual); + } + + BigInteger expected = null; + if (values.advanceExact(i)) { + int numValues = values.docValueCount(); + if (mode == MultiValueMode.MAX) { + expected = Numbers.MIN_UNSIGNED_LONG_VALUE; + } else if (mode == MultiValueMode.MIN) { + expected = Numbers.MAX_UNSIGNED_LONG_VALUE; + } else { + expected = BigInteger.ZERO; + } + for (int j = 0; j < numValues; ++j) { + if (mode == MultiValueMode.SUM || mode == MultiValueMode.AVG) { + expected = expected.add(Numbers.toUnsignedBigInteger(values.nextValue())); + } else if (mode == MultiValueMode.MIN) { + expected = expected.min(Numbers.toUnsignedBigInteger(values.nextValue())); + } else if (mode == MultiValueMode.MAX) { + expected = expected.max(Numbers.toUnsignedBigInteger(values.nextValue())); + } + } + if (mode == MultiValueMode.AVG) { + expected = Numbers.toUnsignedBigInteger(expected.longValue()); + expected = numValues > 1 + ? new BigDecimal(expected).divide(new BigDecimal(numValues), RoundingMode.HALF_UP).toBigInteger() + : expected; + } else if (mode == MultiValueMode.MEDIAN) { + final Long[] docValues = new Long[numValues]; + for (int j = 0; j < numValues; ++j) { + docValues[j] = values.nextValue(); + } + Arrays.sort(docValues, Long::compareUnsigned); + int value = numValues / 2; + if (numValues % 2 == 0) { + expected = Numbers.toUnsignedBigInteger(docValues[value - 1]) + .add(Numbers.toUnsignedBigInteger(docValues[value])); + expected = Numbers.toUnsignedBigInteger(expected.longValue()); + expected = new BigDecimal(expected).divide(new BigDecimal(2), RoundingMode.HALF_UP).toBigInteger(); + } else { + expected = Numbers.toUnsignedBigInteger(docValues[value]); + } + } + } + + final Long expectedLong = expected == null ? null : expected.longValue(); + assertEquals(mode.toString() + " docId=" + i, expectedLong, actual); + } + } + } + + private void verifySortedUnsignedLong( + Supplier supplier, + int maxDoc, + FixedBitSet rootDocs, + FixedBitSet innerDocs, + int maxChildren + ) throws IOException { + for (long missingValue : new long[] { 0, randomUnsignedLong().longValue() }) { + for (MultiValueMode mode : new MultiValueMode[] { + MultiValueMode.MIN, + MultiValueMode.MAX, + MultiValueMode.SUM, + MultiValueMode.AVG }) { + SortedNumericUnsignedLongValues values = supplier.get(); + final NumericDocValues selected = mode.select( + values, + missingValue, + rootDocs, + new BitSetIterator(innerDocs, 0L), + maxDoc, + maxChildren + ); + int prevRoot = -1; + for (int root = rootDocs.nextSetBit(0); root != -1; root = root + 1 < maxDoc ? rootDocs.nextSetBit(root + 1) : -1) { + assertTrue(selected.advanceExact(root)); + final long actual = selected.longValue(); + verifyLongValueCanCalledMoreThanOnce(selected, actual); + + BigInteger expected = BigInteger.ZERO; + if (mode == MultiValueMode.MAX) { + expected = Numbers.MIN_UNSIGNED_LONG_VALUE; + } else if (mode == MultiValueMode.MIN) { + expected = Numbers.MAX_UNSIGNED_LONG_VALUE; + } + int numValues = 0; + int count = 0; + for (int child = innerDocs.nextSetBit(prevRoot + 1); child != -1 && child < root; child = innerDocs.nextSetBit( + child + 1 + )) { + if (values.advanceExact(child)) { + if (++count > maxChildren) { + break; + } + for (int j = 0; j < values.docValueCount(); ++j) { + if (mode == MultiValueMode.SUM || mode == MultiValueMode.AVG) { + expected = expected.add(Numbers.toUnsignedBigInteger(values.nextValue())); + } else if (mode == MultiValueMode.MIN) { + expected = expected.min(Numbers.toUnsignedBigInteger(values.nextValue())); + } else if (mode == MultiValueMode.MAX) { + expected = expected.max(Numbers.toUnsignedBigInteger(values.nextValue())); + } + ++numValues; + } + } + } + final long expectedLong; + if (numValues == 0) { + expectedLong = missingValue; + } else if (mode == MultiValueMode.AVG) { + expected = Numbers.toUnsignedBigInteger(expected.longValue()); + expected = numValues > 1 + ? new BigDecimal(expected).divide(new BigDecimal(numValues), RoundingMode.HALF_UP).toBigInteger() + : expected; + expectedLong = expected.longValue(); + } else { + expectedLong = expected.longValue(); + } + + assertEquals(mode.toString() + " docId=" + root, expectedLong, actual); + + prevRoot = root; + } + } + } + } + public void testValidOrdinals() { assertThat(MultiValueMode.SUM.ordinal(), equalTo(0)); assertThat(MultiValueMode.AVG.ordinal(), equalTo(1)); diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilderTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilderTests.java index b2025ae5f03c1..e7c1de0123c9e 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilderTests.java @@ -57,7 +57,7 @@ public class AdjacencyMatrixAggregationBuilderTests extends OpenSearchTestCase { public void testFilterSizeLimitation() throws Exception { - // filter size grater than max size should thrown a exception + // filter size grater than max size should throw an exception QueryShardContext queryShardContext = mock(QueryShardContext.class); IndexShard indexShard = mock(IndexShard.class); Settings settings = Settings.builder() @@ -94,7 +94,7 @@ public void testFilterSizeLimitation() throws Exception { ) ); - // filter size not grater than max size should return an instance of AdjacencyMatrixAggregatorFactory + // filter size not greater than max size should return an instance of AdjacencyMatrixAggregatorFactory Map emptyFilters = Collections.emptyMap(); AdjacencyMatrixAggregationBuilder aggregationBuilder = new AdjacencyMatrixAggregationBuilder("dummy", emptyFilters); @@ -106,4 +106,21 @@ public void testFilterSizeLimitation() throws Exception { + "removed in a future release! See the breaking changes documentation for the next major version." ); } + + public void testShowOnlyIntersecting() throws Exception { + QueryShardContext queryShardContext = mock(QueryShardContext.class); + + Map filters = new HashMap<>(3); + for (int i = 0; i < 2; i++) { + QueryBuilder queryBuilder = mock(QueryBuilder.class); + // return builder itself to skip rewrite + when(queryBuilder.rewrite(queryShardContext)).thenReturn(queryBuilder); + filters.put("filter" + i, queryBuilder); + } + AdjacencyMatrixAggregationBuilder builder = new AdjacencyMatrixAggregationBuilder("dummy", filters, true); + assertTrue(builder.isShowOnlyIntersecting()); + + builder = new AdjacencyMatrixAggregationBuilder("dummy", filters, false); + assertFalse(builder.isShowOnlyIntersecting()); + } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/AdjacencyMatrixTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/AdjacencyMatrixTests.java index c5cf56f6caff7..38e53d65a69e6 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/AdjacencyMatrixTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/AdjacencyMatrixTests.java @@ -68,4 +68,22 @@ public void testFiltersSameMap() { assertEquals(original, builder.filters()); assert original != builder.filters(); } + + public void testShowOnlyIntersecting() { + Map original = new HashMap<>(); + original.put("bbb", new MatchNoneQueryBuilder()); + original.put("aaa", new MatchNoneQueryBuilder()); + AdjacencyMatrixAggregationBuilder builder; + builder = new AdjacencyMatrixAggregationBuilder("my-agg", "&", original, true); + assertTrue(builder.isShowOnlyIntersecting()); + } + + public void testShowOnlyIntersectingAsFalse() { + Map original = new HashMap<>(); + original.put("bbb", new MatchNoneQueryBuilder()); + original.put("aaa", new MatchNoneQueryBuilder()); + AdjacencyMatrixAggregationBuilder builder; + builder = new AdjacencyMatrixAggregationBuilder("my-agg", original, false); + assertFalse(builder.isShowOnlyIntersecting()); + } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java index 0327bd9990784..98af025563004 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java @@ -28,18 +28,27 @@ import org.opensearch.common.lucene.Lucene; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; +import org.opensearch.common.util.MockBigArrays; +import org.opensearch.common.util.MockPageCacheRecycler; +import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; import org.opensearch.index.codec.composite.CompositeIndexReader; import org.opensearch.index.codec.composite.composite912.Composite912Codec; import org.opensearch.index.codec.composite912.datacube.startree.StarTreeDocValuesFormatTests; import org.opensearch.index.compositeindex.datacube.Dimension; +import org.opensearch.index.compositeindex.datacube.Metric; +import org.opensearch.index.compositeindex.datacube.MetricStat; import org.opensearch.index.compositeindex.datacube.NumericDimension; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.search.aggregations.AggregationBuilder; +import org.opensearch.search.aggregations.AggregatorFactories; +import org.opensearch.search.aggregations.AggregatorFactory; import org.opensearch.search.aggregations.AggregatorTestCase; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.metrics.AvgAggregationBuilder; @@ -49,14 +58,17 @@ import org.opensearch.search.aggregations.metrics.InternalSum; import org.opensearch.search.aggregations.metrics.InternalValueCount; import org.opensearch.search.aggregations.metrics.MaxAggregationBuilder; +import org.opensearch.search.aggregations.metrics.MetricAggregatorFactory; import org.opensearch.search.aggregations.metrics.MinAggregationBuilder; import org.opensearch.search.aggregations.metrics.SumAggregationBuilder; import org.opensearch.search.aggregations.metrics.ValueCountAggregationBuilder; +import org.opensearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.junit.After; import org.junit.Before; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.LinkedList; import java.util.List; import java.util.Random; @@ -69,6 +81,8 @@ import static org.opensearch.search.aggregations.AggregationBuilders.min; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.test.InternalAggregationTestCase.DEFAULT_MAX_BUCKETS; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class MetricAggregatorTests extends AggregatorTestCase { @@ -267,6 +281,110 @@ public void testStarTreeDocValues() throws IOException { ); } + CircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService(); + + QueryShardContext queryShardContext = queryShardContextMock( + indexSearcher, + mapperServiceMock(), + createIndexSettings(), + circuitBreakerService, + new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), circuitBreakerService).withCircuitBreaking() + ); + + MetricAggregatorFactory aggregatorFactory = mock(MetricAggregatorFactory.class); + when(aggregatorFactory.getSubFactories()).thenReturn(AggregatorFactories.EMPTY); + when(aggregatorFactory.getField()).thenReturn(FIELD_NAME); + when(aggregatorFactory.getMetricStat()).thenReturn(MetricStat.SUM); + + // Case when field and metric type in aggregation are fully supported by star tree. + testCase( + indexSearcher, + query, + queryBuilder, + sumAggregationBuilder, + starTree, + supportedDimensions, + List.of(new Metric(FIELD_NAME, List.of(MetricStat.SUM, MetricStat.MAX, MetricStat.MIN, MetricStat.AVG))), + verifyAggregation(InternalSum::getValue), + aggregatorFactory, + true + ); + + // Case when the field is not supported by star tree + SumAggregationBuilder invalidFieldSumAggBuilder = sum("_name").field("hello"); + testCase( + indexSearcher, + query, + queryBuilder, + invalidFieldSumAggBuilder, + starTree, + supportedDimensions, + Collections.emptyList(), + verifyAggregation(InternalSum::getValue), + invalidFieldSumAggBuilder.build(queryShardContext, null), + false // Invalid fields will return null StarTreeQueryContext which will not cause early termination by leaf collector + ); + + // Case when metric type in aggregation is not supported by star tree but the field is supported. + testCase( + indexSearcher, + query, + queryBuilder, + sumAggregationBuilder, + starTree, + supportedDimensions, + List.of(new Metric(FIELD_NAME, List.of(MetricStat.MAX, MetricStat.MIN, MetricStat.AVG))), + verifyAggregation(InternalSum::getValue), + aggregatorFactory, + false + ); + + // Case when field is not present in supported metrics + testCase( + indexSearcher, + query, + queryBuilder, + sumAggregationBuilder, + starTree, + supportedDimensions, + List.of(new Metric("hello", List.of(MetricStat.MAX, MetricStat.MIN, MetricStat.AVG))), + verifyAggregation(InternalSum::getValue), + aggregatorFactory, + false + ); + + AggregatorFactories aggregatorFactories = mock(AggregatorFactories.class); + when(aggregatorFactories.getFactories()).thenReturn(new AggregatorFactory[] { mock(MetricAggregatorFactory.class) }); + when(aggregatorFactory.getSubFactories()).thenReturn(aggregatorFactories); + + // Case when sub aggregations are present + testCase( + indexSearcher, + query, + queryBuilder, + sumAggregationBuilder, + starTree, + supportedDimensions, + List.of(new Metric("hello", List.of(MetricStat.MAX, MetricStat.MIN, MetricStat.AVG))), + verifyAggregation(InternalSum::getValue), + aggregatorFactory, + false + ); + + // Case when aggregation factory is not metric aggregation + testCase( + indexSearcher, + query, + queryBuilder, + sumAggregationBuilder, + starTree, + supportedDimensions, + List.of(new Metric("hello", List.of(MetricStat.MAX, MetricStat.MIN, MetricStat.AVG))), + verifyAggregation(InternalSum::getValue), + mock(ValuesSourceAggregatorFactory.class), + false + ); + ir.close(); directory.close(); } @@ -287,6 +405,21 @@ private void testC CompositeIndexFieldInfo starTree, List supportedDimensions, BiConsumer verify + ) throws IOException { + testCase(searcher, query, queryBuilder, aggBuilder, starTree, supportedDimensions, Collections.emptyList(), verify, null, true); + } + + private void testCase( + IndexSearcher searcher, + Query query, + QueryBuilder queryBuilder, + T aggBuilder, + CompositeIndexFieldInfo starTree, + List supportedDimensions, + List supportedMetrics, + BiConsumer verify, + AggregatorFactory aggregatorFactory, + boolean assertCollectorEarlyTermination ) throws IOException { V starTreeAggregation = searchAndReduceStarTree( createIndexSettings(), @@ -296,8 +429,11 @@ private void testC aggBuilder, starTree, supportedDimensions, + supportedMetrics, DEFAULT_MAX_BUCKETS, false, + aggregatorFactory, + assertCollectorEarlyTermination, DEFAULT_MAPPED_FIELD ); V expectedAggregation = searchAndReduceStarTree( @@ -308,8 +444,11 @@ private void testC aggBuilder, null, null, + null, DEFAULT_MAX_BUCKETS, false, + aggregatorFactory, + assertCollectorEarlyTermination, DEFAULT_MAPPED_FIELD ); verify.accept(expectedAggregation, starTreeAggregation); diff --git a/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java b/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java index f8eb71a40319a..6a3eb9f8bb1f8 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java @@ -87,7 +87,8 @@ public void testStarTreeFilterWithDocsInSVDFieldButNoStarNode() throws IOExcepti testStarTreeFilter(10, false); } - private void testStarTreeFilter(int maxLeafDoc, boolean skipStarNodeCreationForSDVDimension) throws IOException { + private Directory createStarTreeIndex(int maxLeafDoc, boolean skipStarNodeCreationForSDVDimension, List docs) + throws IOException { Directory directory = newDirectory(); IndexWriterConfig conf = newIndexWriterConfig(null); conf.setCodec(getCodec(maxLeafDoc, skipStarNodeCreationForSDVDimension)); @@ -95,7 +96,6 @@ private void testStarTreeFilter(int maxLeafDoc, boolean skipStarNodeCreationForS RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf); int totalDocs = 100; - List docs = new ArrayList<>(); for (int i = 0; i < totalDocs; i++) { Document doc = new Document(); doc.add(new SortedNumericDocValuesField(SNDV, i)); @@ -110,6 +110,15 @@ private void testStarTreeFilter(int maxLeafDoc, boolean skipStarNodeCreationForS } iw.forceMerge(1); iw.close(); + return directory; + } + + private void testStarTreeFilter(int maxLeafDoc, boolean skipStarNodeCreationForSDVDimension) throws IOException { + List docs = new ArrayList<>(); + + Directory directory = createStarTreeIndex(maxLeafDoc, skipStarNodeCreationForSDVDimension, docs); + + int totalDocs = docs.size(); DirectoryReader ir = DirectoryReader.open(directory); initValuesSourceRegistry(); diff --git a/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java b/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java index b4726bab50198..23c21648b1263 100644 --- a/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java +++ b/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java @@ -156,7 +156,6 @@ private int expectedSize(final String threadPoolName, final int numberOfProcesso sizes.put(ThreadPool.Names.REMOTE_PURGE, ThreadPool::halfAllocatedProcessors); sizes.put(ThreadPool.Names.REMOTE_REFRESH_RETRY, ThreadPool::halfAllocatedProcessors); sizes.put(ThreadPool.Names.REMOTE_RECOVERY, ThreadPool::twiceAllocatedProcessors); - sizes.put(ThreadPool.Names.REMOTE_STATE_READ, n -> ThreadPool.boundedBy(4 * n, 4, 32)); return sizes.get(threadPoolName).apply(numberOfProcessors); } diff --git a/server/src/test/java/org/opensearch/transport/PublishPortTests.java b/server/src/test/java/org/opensearch/transport/PublishPortTests.java index 6a41409f6f181..2e5a57c4cdd60 100644 --- a/server/src/test/java/org/opensearch/transport/PublishPortTests.java +++ b/server/src/test/java/org/opensearch/transport/PublishPortTests.java @@ -43,8 +43,6 @@ import static java.net.InetAddress.getByName; import static java.util.Arrays.asList; -import static org.opensearch.transport.TcpTransport.resolvePublishPort; -import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; public class PublishPortTests extends OpenSearchTestCase { @@ -73,48 +71,44 @@ public void testPublishPort() throws Exception { } - int publishPort = resolvePublishPort( - new TcpTransport.ProfileSettings(settings, profile), + int publishPort = Transport.resolvePublishPort( + new TcpTransport.ProfileSettings(settings, profile).publishPort, randomAddresses(), getByName("127.0.0.2") ); assertThat("Publish port should be explicitly set", publishPort, equalTo(useProfile ? 9080 : 9081)); - publishPort = resolvePublishPort( - new TcpTransport.ProfileSettings(baseSettings, profile), + publishPort = Transport.resolvePublishPort( + new TcpTransport.ProfileSettings(baseSettings, profile).publishPort, asList(address("127.0.0.1", boundPort), address("127.0.0.2", otherBoundPort)), getByName("127.0.0.1") ); assertThat("Publish port should be derived from matched address", publishPort, equalTo(boundPort)); - publishPort = resolvePublishPort( - new TcpTransport.ProfileSettings(baseSettings, profile), + publishPort = Transport.resolvePublishPort( + new TcpTransport.ProfileSettings(baseSettings, profile).publishPort, asList(address("127.0.0.1", boundPort), address("127.0.0.2", boundPort)), getByName("127.0.0.3") ); assertThat("Publish port should be derived from unique port of bound addresses", publishPort, equalTo(boundPort)); - try { - resolvePublishPort( - new TcpTransport.ProfileSettings(baseSettings, profile), - asList(address("127.0.0.1", boundPort), address("127.0.0.2", otherBoundPort)), - getByName("127.0.0.3") - ); - fail("Expected BindTransportException as publish_port not specified and non-unique port of bound addresses"); - } catch (BindTransportException e) { - assertThat(e.getMessage(), containsString("Failed to auto-resolve publish port")); - } + int resPort = Transport.resolvePublishPort( + new TcpTransport.ProfileSettings(baseSettings, profile).publishPort, + asList(address("127.0.0.1", boundPort), address("127.0.0.2", otherBoundPort)), + getByName("127.0.0.3") + ); + assertThat("as publish_port not specified and non-unique port of bound addresses", resPort, equalTo(-1)); - publishPort = resolvePublishPort( - new TcpTransport.ProfileSettings(baseSettings, profile), + publishPort = Transport.resolvePublishPort( + new TcpTransport.ProfileSettings(baseSettings, profile).publishPort, asList(address("0.0.0.0", boundPort), address("127.0.0.2", otherBoundPort)), getByName("127.0.0.1") ); assertThat("Publish port should be derived from matching wildcard address", publishPort, equalTo(boundPort)); if (NetworkUtils.SUPPORTS_V6) { - publishPort = resolvePublishPort( - new TcpTransport.ProfileSettings(baseSettings, profile), + publishPort = Transport.resolvePublishPort( + new TcpTransport.ProfileSettings(baseSettings, profile).publishPort, asList(address("0.0.0.0", boundPort), address("127.0.0.2", otherBoundPort)), getByName("::1") ); diff --git a/settings.gradle b/settings.gradle index 035fe69eda7e9..a24da40069b90 100644 --- a/settings.gradle +++ b/settings.gradle @@ -10,7 +10,7 @@ */ plugins { - id "com.gradle.develocity" version "3.18.2" + id "com.gradle.develocity" version "3.19" } ext.disableBuildCache = hasProperty('DISABLE_BUILD_CACHE') || System.getenv().containsKey('DISABLE_BUILD_CACHE') diff --git a/test/external-modules/build.gradle b/test/external-modules/build.gradle index 8e59c309826e7..e575323b6248c 100644 --- a/test/external-modules/build.gradle +++ b/test/external-modules/build.gradle @@ -17,9 +17,9 @@ subprojects { apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - name it.name - licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') - noticeFile rootProject.file('NOTICE.txt') + name = it.name + licenseFile = rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile = rootProject.file('NOTICE.txt') } tasks.named('yamlRestTest').configure { diff --git a/test/external-modules/delayed-aggs/build.gradle b/test/external-modules/delayed-aggs/build.gradle index d470269c8a6e2..a7662f72e64e6 100644 --- a/test/external-modules/delayed-aggs/build.gradle +++ b/test/external-modules/delayed-aggs/build.gradle @@ -29,8 +29,8 @@ */ opensearchplugin { - description 'A test module that allows to delay aggregations on shards with a configurable time' - classname 'org.opensearch.search.aggregations.DelayedShardAggregationPlugin' + description = 'A test module that allows to delay aggregations on shards with a configurable time' + classname = 'org.opensearch.search.aggregations.DelayedShardAggregationPlugin' } restResources { diff --git a/test/fixtures/azure-fixture/build.gradle b/test/fixtures/azure-fixture/build.gradle index e2b1d475fbab7..904297a3b4c65 100644 --- a/test/fixtures/azure-fixture/build.gradle +++ b/test/fixtures/azure-fixture/build.gradle @@ -46,7 +46,7 @@ preProcessFixture { } doLast { file("${testFixturesDir}/shared").mkdirs() - project.copy { + copy { from jar from configurations.runtimeClasspath into "${testFixturesDir}/shared" diff --git a/test/fixtures/gcs-fixture/build.gradle b/test/fixtures/gcs-fixture/build.gradle index 564cf33687436..60f672e6bd00b 100644 --- a/test/fixtures/gcs-fixture/build.gradle +++ b/test/fixtures/gcs-fixture/build.gradle @@ -46,7 +46,7 @@ preProcessFixture { } doLast { file("${testFixturesDir}/shared").mkdirs() - project.copy { + copy { from jar from configurations.runtimeClasspath into "${testFixturesDir}/shared" diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index de533afd7342a..2166251d74850 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -71,14 +71,14 @@ dependencies { api "org.eclipse.jetty:jetty-server:${versions.jetty}" api "org.eclipse.jetty.websocket:javax-websocket-server-impl:${versions.jetty}" api 'org.apache.zookeeper:zookeeper:3.9.3' - api "org.apache.commons:commons-text:1.12.0" + api "org.apache.commons:commons-text:1.13.0" api "commons-net:commons-net:3.11.1" - api "ch.qos.logback:logback-core:1.5.12" - api "ch.qos.logback:logback-classic:1.5.12" + api "ch.qos.logback:logback-core:1.5.16" + api "ch.qos.logback:logback-classic:1.5.15" api "org.jboss.xnio:xnio-nio:3.8.16.Final" - api 'org.jline:jline:3.27.1' + api 'org.jline:jline:3.28.0' api 'org.apache.commons:commons-configuration2:2.11.0' - api 'com.nimbusds:nimbus-jose-jwt:9.46' + api 'com.nimbusds:nimbus-jose-jwt:10.0.1' api ('org.apache.kerby:kerb-admin:2.1.0') { exclude group: "org.jboss.xnio" exclude group: "org.jline" diff --git a/test/fixtures/s3-fixture/build.gradle b/test/fixtures/s3-fixture/build.gradle index 86456b3364c4c..519e8514af4d4 100644 --- a/test/fixtures/s3-fixture/build.gradle +++ b/test/fixtures/s3-fixture/build.gradle @@ -46,7 +46,7 @@ preProcessFixture { } doLast { file("${testFixturesDir}/shared").mkdirs() - project.copy { + copy { from jar from configurations.runtimeClasspath into "${testFixturesDir}/shared" diff --git a/test/framework/src/main/java/org/opensearch/cluster/routing/TestShardRouting.java b/test/framework/src/main/java/org/opensearch/cluster/routing/TestShardRouting.java index 9a000a4eeda72..a6af658be2ca1 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/routing/TestShardRouting.java +++ b/test/framework/src/main/java/org/opensearch/cluster/routing/TestShardRouting.java @@ -342,4 +342,26 @@ public static ShardRouting newShardRouting( -1 ); } + + public static ShardRouting newShardRouting( + ShardId shardId, + String currentNodeId, + boolean primary, + boolean searchOnly, + ShardRoutingState state, + RecoverySource recoverySource + ) { + return new ShardRouting( + shardId, + currentNodeId, + null, + primary, + searchOnly, + state, + recoverySource, + buildUnassignedInfo(state), + buildAllocationId(state), + -1 + ); + } } diff --git a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java index a5dc13c334513..062ebd2051f6e 100644 --- a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java @@ -289,6 +289,10 @@ protected EngineConfigFactory getEngineConfigFactory(IndexSettings indexSettings return new EngineConfigFactory(indexSettings); } + public IndexMetadata getIndexMetadata() { + return indexMetadata; + } + public int indexDocs(final int numOfDoc) throws Exception { for (int doc = 0; doc < numOfDoc; doc++) { final IndexRequest indexRequest = new IndexRequest(index.getName()).id(Integer.toString(docId.incrementAndGet())) diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java index e1728c4476699..27142b298db52 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java @@ -93,6 +93,7 @@ import org.opensearch.index.cache.query.DisabledQueryCache; import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; import org.opensearch.index.compositeindex.datacube.Dimension; +import org.opensearch.index.compositeindex.datacube.Metric; import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexFieldDataCache; @@ -348,7 +349,9 @@ protected CountingAggregator createCountingAggregator( IndexSettings indexSettings, CompositeIndexFieldInfo starTree, List supportedDimensions, + List supportedMetrics, MultiBucketConsumer bucketConsumer, + AggregatorFactory aggregatorFactory, MappedFieldType... fieldTypes ) throws IOException { SearchContext searchContext; @@ -360,7 +363,9 @@ protected CountingAggregator createCountingAggregator( queryBuilder, starTree, supportedDimensions, + supportedMetrics, bucketConsumer, + aggregatorFactory, fieldTypes ); } else { @@ -389,7 +394,9 @@ protected SearchContext createSearchContextWithStarTreeContext( QueryBuilder queryBuilder, CompositeIndexFieldInfo starTree, List supportedDimensions, + List supportedMetrics, MultiBucketConsumer bucketConsumer, + AggregatorFactory aggregatorFactory, MappedFieldType... fieldTypes ) throws IOException { SearchContext searchContext = createSearchContext( @@ -406,7 +413,12 @@ protected SearchContext createSearchContextWithStarTreeContext( AggregatorFactories aggregatorFactories = mock(AggregatorFactories.class); when(searchContext.aggregations()).thenReturn(searchContextAggregations); when(searchContextAggregations.factories()).thenReturn(aggregatorFactories); - when(aggregatorFactories.getFactories()).thenReturn(new AggregatorFactory[] {}); + + if (aggregatorFactory != null) { + when(aggregatorFactories.getFactories()).thenReturn(new AggregatorFactory[] { aggregatorFactory }); + } else { + when(aggregatorFactories.getFactories()).thenReturn(new AggregatorFactory[] {}); + } CompositeDataCubeFieldType compositeMappedFieldType = mock(CompositeDataCubeFieldType.class); when(compositeMappedFieldType.name()).thenReturn(starTree.getField()); @@ -414,6 +426,7 @@ protected SearchContext createSearchContextWithStarTreeContext( Set compositeFieldTypes = Set.of(compositeMappedFieldType); when((compositeMappedFieldType).getDimensions()).thenReturn(supportedDimensions); + when((compositeMappedFieldType).getMetrics()).thenReturn(supportedMetrics); MapperService mapperService = mock(MapperService.class); when(mapperService.getCompositeFieldTypes()).thenReturn(compositeFieldTypes); when(searchContext.mapperService()).thenReturn(mapperService); @@ -740,8 +753,11 @@ protected A searchAndReduc AggregationBuilder builder, CompositeIndexFieldInfo compositeIndexFieldInfo, List supportedDimensions, + List supportedMetrics, int maxBucket, boolean hasNested, + AggregatorFactory aggregatorFactory, + boolean assertCollectorEarlyTermination, MappedFieldType... fieldTypes ) throws IOException { query = query.rewrite(searcher); @@ -764,7 +780,9 @@ protected A searchAndReduc indexSettings, compositeIndexFieldInfo, supportedDimensions, + supportedMetrics, bucketConsumer, + aggregatorFactory, fieldTypes ); @@ -772,7 +790,7 @@ protected A searchAndReduc searcher.search(query, countingAggregator); countingAggregator.postCollection(); aggs.add(countingAggregator.buildTopLevel()); - if (compositeIndexFieldInfo != null) { + if (compositeIndexFieldInfo != null && assertCollectorEarlyTermination) { assertEquals(0, countingAggregator.collectCounter.get()); } diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index 644ea33e8b89f..0c68108434110 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -2311,10 +2311,24 @@ public List startNodes(int numOfNodes, Settings settings) { return startNodes(Collections.nCopies(numOfNodes, settings).toArray(new Settings[0])); } + /** + * Starts multiple nodes with the given settings and returns their names + */ + public List startNodes(int numOfNodes, Settings settings, Boolean waitForNodeJoin) { + return startNodes(waitForNodeJoin, Collections.nCopies(numOfNodes, settings).toArray(new Settings[0])); + } + /** * Starts multiple nodes with the given settings and returns their names */ public synchronized List startNodes(Settings... extraSettings) { + return startNodes(false, extraSettings); + } + + /** + * Starts multiple nodes with the given settings and returns their names + */ + public synchronized List startNodes(Boolean waitForNodeJoin, Settings... extraSettings) { final int newClusterManagerCount = Math.toIntExact(Stream.of(extraSettings).filter(DiscoveryNode::isClusterManagerNode).count()); final int defaultMinClusterManagerNodes; if (autoManageClusterManagerNodes) { @@ -2366,7 +2380,7 @@ public synchronized List startNodes(Settings... extraSettings) { nodes.add(nodeAndClient); } startAndPublishNodesAndClients(nodes); - if (autoManageClusterManagerNodes) { + if (autoManageClusterManagerNodes && !waitForNodeJoin) { validateClusterFormed(); } return nodes.stream().map(NodeAndClient::getName).collect(Collectors.toList()); @@ -2411,6 +2425,10 @@ public List startDataOnlyNodes(int numNodes, Settings settings) { return startNodes(numNodes, Settings.builder().put(onlyRole(settings, DiscoveryNodeRole.DATA_ROLE)).build()); } + public List startDataOnlyNodes(int numNodes, Settings settings, Boolean ignoreNodeJoin) { + return startNodes(numNodes, Settings.builder().put(onlyRole(settings, DiscoveryNodeRole.DATA_ROLE)).build(), ignoreNodeJoin); + } + public List startSearchOnlyNodes(int numNodes) { return startSearchOnlyNodes(numNodes, Settings.EMPTY); } diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index d84c60989948f..4a7570c8a50ce 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -221,6 +221,8 @@ import java.util.function.Function; import java.util.stream.Collectors; +import reactor.util.annotation.NonNull; + import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.common.unit.TimeValue.timeValueMillis; @@ -2958,6 +2960,43 @@ protected static Settings buildRemoteStoreNodeAttributes( return settings.build(); } + protected Settings buildRemotePublicationNodeAttributes( + @NonNull String remoteStateRepoName, + @NonNull String remoteStateRepoType, + @NonNull String routingTableRepoName, + @NonNull String routingTableRepoType + ) { + String remoteStateRepositoryTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + remoteStateRepoName + ); + String routingTableRepositoryTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + routingTableRepoName + ); + String remoteStateRepositorySettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + remoteStateRepoName + ); + String routingTableRepositorySettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + routingTableRepoName + ); + + return Settings.builder() + .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, remoteStateRepoName) + .put("node.attr." + REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY, routingTableRepoName) + .put(remoteStateRepositoryTypeAttributeKey, remoteStateRepoType) + .put(routingTableRepositoryTypeAttributeKey, routingTableRepoType) + .put(remoteStateRepositorySettingsAttributeKeyPrefix + "location", randomRepoPath().toAbsolutePath()) + .put(routingTableRepositorySettingsAttributeKeyPrefix + "location", randomRepoPath().toAbsolutePath()) + .build(); + } + public static String resolvePath(IndexId indexId, String shardId) { PathType pathType = PathType.fromCode(indexId.getShardPathType()); RemoteStorePathStrategy.SnapshotShardPathInput shardPathInput = new RemoteStorePathStrategy.SnapshotShardPathInput.Builder()