From 09ad8b14a37a59a9846db0c38947b570690463be Mon Sep 17 00:00:00 2001 From: meetvm Date: Fri, 20 Dec 2024 16:01:42 +0530 Subject: [PATCH] Fix Issue 16890: Hide Stracktrace while translog transfer Signed-off-by: meetvm Bump com.nimbusds:oauth2-oidc-sdk from 11.19.1 to 11.20.1 in /plugins/repository-azure (#16895) * Bump com.nimbusds:oauth2-oidc-sdk in /plugins/repository-azure Bumps [com.nimbusds:oauth2-oidc-sdk](https://bitbucket.org/connect2id/oauth-2.0-sdk-with-openid-connect-extensions) from 11.19.1 to 11.20.1. - [Changelog](https://bitbucket.org/connect2id/oauth-2.0-sdk-with-openid-connect-extensions/src/master/CHANGELOG.txt) - [Commits](https://bitbucket.org/connect2id/oauth-2.0-sdk-with-openid-connect-extensions/branches/compare/11.20.1..11.19.1) --- updated-dependencies: - dependency-name: com.nimbusds:oauth2-oidc-sdk dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Bump com.netflix.nebula.ospackage-base from 11.10.0 to 11.10.1 in /distribution/packages (#16896) * Bump com.netflix.nebula.ospackage-base in /distribution/packages Bumps com.netflix.nebula.ospackage-base from 11.10.0 to 11.10.1. --- updated-dependencies: - dependency-name: com.netflix.nebula.ospackage-base dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Bump ch.qos.logback:logback-classic from 1.5.12 to 1.5.15 in /test/fixtures/hdfs-fixture (#16898) * Bump ch.qos.logback:logback-classic in /test/fixtures/hdfs-fixture Bumps [ch.qos.logback:logback-classic](https://github.com/qos-ch/logback) from 1.5.12 to 1.5.15. - [Commits](https://github.com/qos-ch/logback/compare/v_1.5.12...v_1.5.15) --- updated-dependencies: - dependency-name: ch.qos.logback:logback-classic dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Bump lycheeverse/lychee-action from 2.1.0 to 2.2.0 (#16897) * Bump lycheeverse/lychee-action from 2.1.0 to 2.2.0 Bumps [lycheeverse/lychee-action](https://github.com/lycheeverse/lychee-action) from 2.1.0 to 2.2.0. - [Release notes](https://github.com/lycheeverse/lychee-action/releases) - [Commits](https://github.com/lycheeverse/lychee-action/compare/v2.1.0...v2.2.0) --- updated-dependencies: - dependency-name: lycheeverse/lychee-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Create sub directories for ThirdPartyAudit dependency metadata (#16844) * Extract jars to sub dirs during thirdPartyAudit task. Signed-off-by: Finn Carroll * Change regex to split on '-'/'.'. Ignore version. Signed-off-by: Finn Carroll * Split on .jar for sub folder prefix. Signed-off-by: Finn Carroll --------- Signed-off-by: Finn Carroll Retrieve value from DocValues in a flat_object filed (#16802) Bump com.microsoft.azure:msal4j from 1.17.2 to 1.18.0 in /plugins/repository-azure (#16918) * Bump com.microsoft.azure:msal4j in /plugins/repository-azure Bumps [com.microsoft.azure:msal4j](https://github.com/AzureAD/microsoft-authentication-library-for-java) from 1.17.2 to 1.18.0. - [Release notes](https://github.com/AzureAD/microsoft-authentication-library-for-java/releases) - [Changelog](https://github.com/AzureAD/microsoft-authentication-library-for-java/blob/dev/changelog.txt) - [Commits](https://github.com/AzureAD/microsoft-authentication-library-for-java/compare/v1.17.2...v1.18.0) --- updated-dependencies: - dependency-name: com.microsoft.azure:msal4j dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Bump org.apache.commons:commons-text from 1.12.0 to 1.13.0 in /test/fixtures/hdfs-fixture (#16919) * Bump org.apache.commons:commons-text in /test/fixtures/hdfs-fixture Bumps org.apache.commons:commons-text from 1.12.0 to 1.13.0. --- updated-dependencies: - dependency-name: org.apache.commons:commons-text dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Add gRPC server as transport-grpc plugin (#16534) Introduce auxiliary transport to NetworkPlugin and add gRPC plugin. Auxiliary transports are optional lifecycle components provided by network plugins which run in parallel to the http server/native transport. They are distinct from the existing NetworkPlugin interfaces of 'getTransports' and 'getHttpTransports' as auxiliary transports are optional. Each AuxTransport implements it's own 'aux.transport.type' and 'aux.transport..ports' setting. Since Security.java initializes previous to Node.java during bootstrap socket binding permissions are granted based on 'aux.transport..ports' for each enabled 'aux.transport.type', falling back to a default if no ports are specified. Signed-off-by: Finn Carroll Update script supports java.lang.String.sha1() and java.lang.String.sha256() methods (#16923) * Update script supports java.lang.String.sha1() and java.lang.String.sha256() methods Signed-off-by: Gao Binlong * Modify change log Signed-off-by: Gao Binlong --------- Signed-off-by: Gao Binlong Workflow benchmark-pull-request.yml fix (#16925) Signed-off-by: Prudhvi Godithi Add benchmark confirm for lucene-10 big5 index snapshot (#16940) Signed-off-by: Rishabh Singh Remove duplicate DCO check (#16942) Signed-off-by: Andriy Redko Allow extended plugins to be optional (#16909) * Make extended plugins optional Signed-off-by: Craig Perkins * Make extended plugins optional Signed-off-by: Craig Perkins * Load extensions for classpath plugins Signed-off-by: Craig Perkins * Ensure only single instance for each classpath extension Signed-off-by: Craig Perkins * Add test for classpath plugin extended plugin loading Signed-off-by: Craig Perkins * Modify test to allow optional extended plugin Signed-off-by: Craig Perkins * Only optional extended plugins Signed-off-by: Craig Perkins * Add additional warning message Signed-off-by: Craig Perkins * Add to CHANGELOG Signed-off-by: Craig Perkins * Add tag to make extended plugin optional Signed-off-by: Craig Perkins * Only send plugin names when serializing PluginInfo Signed-off-by: Craig Perkins * Keep track of optional extended plugins in separate set Signed-off-by: Craig Perkins * Include in ser/de of PluginInfo Signed-off-by: Craig Perkins * Change to 3_0_0 Signed-off-by: Craig Perkins --------- Signed-off-by: Craig Perkins Change version in PluginInfo to V_2_19_0 after backport to 2.x merged (#16947) Signed-off-by: Craig Perkins Support object fields in star-tree index (#16728) --------- Signed-off-by: bharath-techie Bump ch.qos.logback:logback-core from 1.5.12 to 1.5.16 in /test/fixtures/hdfs-fixture (#16951) * Bump ch.qos.logback:logback-core in /test/fixtures/hdfs-fixture Bumps [ch.qos.logback:logback-core](https://github.com/qos-ch/logback) from 1.5.12 to 1.5.16. - [Commits](https://github.com/qos-ch/logback/compare/v_1.5.12...v_1.5.16) --- updated-dependencies: - dependency-name: ch.qos.logback:logback-core dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] [Workload Management] Add Workload Management IT (#16359) * add workload management IT Signed-off-by: Ruirui Zhang * address comments Signed-off-by: Ruirui Zhang --------- Signed-off-by: Ruirui Zhang Add new benchmark config for nested workload (#16956) Signed-off-by: Rishabh Singh Bump com.azure:azure-core-http-netty from 1.15.5 to 1.15.7 in /plugins/repository-azure (#16952) * Bump com.azure:azure-core-http-netty in /plugins/repository-azure Bumps [com.azure:azure-core-http-netty](https://github.com/Azure/azure-sdk-for-java) from 1.15.5 to 1.15.7. - [Release notes](https://github.com/Azure/azure-sdk-for-java/releases) - [Commits](https://github.com/Azure/azure-sdk-for-java/compare/azure-core-http-netty_1.15.5...azure-core-http-netty_1.15.7) --- updated-dependencies: - dependency-name: com.azure:azure-core-http-netty dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Always use constant_score query for match_only_text (#16964) In some cases, when we create a term query over a `match_only_text` field, it may still try to compute scores, which prevents early termination. We should *always* use a constant score query when querying `match_only_text`, since we don't have the statistics required to compute scores. --------- Signed-off-by: Michael Froh Changes to support unmapped fields in metric aggregation (#16481) Avoids exception when querying unmapped field when star tree experimental feature is enables. --------- Signed-off-by: expani Use async client for delete blob or path in S3 Blob Container (#16788) * Use async client for delete blob or path in S3 Blob Container Signed-off-by: Ashish Singh * Fix UTs Signed-off-by: Ashish Singh * Fix failures in S3BlobStoreRepositoryTests Signed-off-by: Ashish Singh * Fix S3BlobStoreRepositoryTests Signed-off-by: Ashish Singh * Fix failures in S3RepositoryThirdPartyTests Signed-off-by: Ashish Singh * Fix failures in S3RepositoryPluginTests Signed-off-by: Ashish Singh --------- Signed-off-by: Ashish Singh Fix Shallow copy snapshot failures on closed index (#16868) * Fix shallow v1 snapshot failures on closed index Signed-off-by: Shubh Sahu * UT fix Signed-off-by: Shubh Sahu * Adding UT Signed-off-by: Shubh Sahu * small fix Signed-off-by: Shubh Sahu * Addressing comments Signed-off-by: Shubh Sahu * Addressing comments Signed-off-by: Shubh Sahu * Modifying IT to restore snapshot Signed-off-by: Shubh Sahu --------- Signed-off-by: Shubh Sahu Co-authored-by: Shubh Sahu Add Response Status Number in http trace logs. (#16978) Signed-off-by: Rishikesh1159 support termQueryCaseInsensitive/termQuery can search from doc_value in flat_object/keyword field (#16974) Signed-off-by: kkewwei Signed-off-by: kkewwei use the correct type to widen the sort fields when merging top docs (#16881) * use the correct type to widen the sort fields when merging top docs Signed-off-by: panguixin * fix Signed-off-by: panguixin * apply commments Signed-off-by: panguixin * changelog Signed-off-by: panguixin * add more tests Signed-off-by: panguixin --------- Signed-off-by: panguixin Fix multi-value sort for unsigned long (#16732) * Fix multi-value sort for unsigned long Signed-off-by: panguixin * Add initial rest-api-spec tests Signed-off-by: Andriy Redko * add more rest tests Signed-off-by: panguixin * fix Signed-off-by: panguixin * fix Signed-off-by: panguixin * Extend MultiValueMode with dedicated support of unsigned_long doc values Signed-off-by: Andriy Redko * Add CHANGELOG.md, minor cleanups Signed-off-by: Andriy Redko * Correct the license headers Signed-off-by: Andriy Redko * Correct the @PublicApi version Signed-off-by: Andriy Redko * Replace SingletonSortedNumericUnsignedLongValues with LongToSortedNumericUnsignedLongValues (as per review comments) Signed-off-by: Andriy Redko --------- Signed-off-by: panguixin Signed-off-by: Andriy Redko Co-authored-by: Andriy Redko Update Gradle to 8.12 (#16884) Signed-off-by: Andriy Redko `phone-search` analyzer: don't emit sip/tel prefix, int'l prefix, extension & unformatted input (#16993) * `phone-search` analyzer: don't emit int'l prefix this was an oversight in the initial implementation: if the tokenizer emits the international calling prefix in the search analyzer then all documents with the same international calling prefix will match. e.g. when searching for `+1-555-123-4567` not only documents with this number would match but also any other document with a `1` token (i.e. any other number with this prefix). thus the search functionality is currently broken for this analyzer, making it useless. the test coverage has now been extended to cover these and other use-cases. Signed-off-by: Ralph Ursprung * `phone-search` analyzer: don't emit extension & unformatted input if these tokens are emitted it meant that phone numbers with other international dialling prefixes still matched. e.g. searching for `+1 1234` would also match a number stored as `+2 1234`, which was wrong. the tokens still need to be emited for the `phone` analyzer, e.g. when the user only enters the extension / local number it should still match, the same is with the other ngrams: these are needed for search-as-you-type style queries where the user input needs to match against partial phone numbers. Signed-off-by: Ralph Ursprung * `phone-search` analyzer: don't emit sip/tel prefix in line with the previous two commits, this is something else the search analyzer shouldn't emit since otherwise searching for any number with such a prefix will match _any_ document with the same prefix. Signed-off-by: Ralph Ursprung --------- Signed-off-by: Ralph Ursprung Limit RW separation to remote store enabled clusters and update recovery flow (#16760) * Update search only replica recovery flow This PR includes multiple changes to search replica recovery. 1. Change search only replica copies to recover as empty store instead of PEER. This will run a store recovery that syncs segments from remote store directly and eliminate any primary communication. 2. Remove search replicas from the in-sync allocation ID set and update routing table to exclude them from allAllocationIds. This ensures primaries aren't tracking or validating the routing table for any search replica's presence. 3. Change search replica validation to require remote store. There are versions of the above changes that are still possible with primary based node-node replication, but I don't think they are worth making at this time. Signed-off-by: Marc Handalian * more coverage Signed-off-by: Marc Handalian * add changelog entry Signed-off-by: Marc Handalian * add assertions that Search Replicas are not in the in-sync id set nor the AllAllocationIds set in the routing table Signed-off-by: Marc Handalian * update async task to only run if the FF is enabled and we are a remote store cluster. This check had previously only checked for segrep Signed-off-by: Marc Handalian * clean up max shards logic Signed-off-by: Marc Handalian * remove search replicas from check during renewPeerRecoveryRetentionLeases Signed-off-by: Marc Handalian * Revert "update async task to only run if the FF is enabled and we are a remote store cluster." reverting this, we already check for remote store earlier. This reverts commit 48ca1a3050d0f24757c70ae23a9d9e185cb3bc40. Signed-off-by: Marc Handalian * Add more tests for failover case Signed-off-by: Marc Handalian * Update remotestore restore logic and add test ensuring we can restore only writers when red Signed-off-by: Marc Handalian * Fix Search replicas to honor node level recovery limits Signed-off-by: Marc Handalian * Fix translog UUID mismatch on existing store recovery. This commit adds PR feedback and recovery tests post node restart. Signed-off-by: Marc Handalian * Fix spotless Signed-off-by: Marc Handalian * Fix bug with remote restore and add more tests Signed-off-by: Marc Handalian --------- Signed-off-by: Marc Handalian Fix case insensitive and escaped query on wildcard (#16827) * fix case insensitive and escaped query on wildcard Signed-off-by: gesong.samuel * add changelog Signed-off-by: gesong.samuel --------- Signed-off-by: gesong.samuel Signed-off-by: Michael Froh Co-authored-by: gesong.samuel Co-authored-by: Michael Froh Bump opentelemetry from 1.41.0 to 1.46.0 and opentelemetry-semconv from 1.27.0-alpha to 1.29.0-alpha (#17000) Signed-off-by: Andriy Redko TransportBulkAction.doRun() (#16950) Signed-off-by: kkewwei Signed-off-by: kkewwei Show only intersecting buckets to the Adjacency matrix aggregation (#11733) Signed-off-by: Ivan Brusic Bump com.google.re2j:re2j from 1.7 to 1.8 in /plugins/repository-hdfs (#17012) * Bump com.google.re2j:re2j from 1.7 to 1.8 in /plugins/repository-hdfs Bumps [com.google.re2j:re2j](https://github.com/google/re2j) from 1.7 to 1.8. - [Release notes](https://github.com/google/re2j/releases) - [Commits](https://github.com/google/re2j/compare/re2j-1.7...re2j-1.8) --- updated-dependencies: - dependency-name: com.google.re2j:re2j dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Bump com.nimbusds:oauth2-oidc-sdk from 11.20.1 to 11.21 in /plugins/repository-azure (#17010) * Bump com.nimbusds:oauth2-oidc-sdk in /plugins/repository-azure Bumps [com.nimbusds:oauth2-oidc-sdk](https://bitbucket.org/connect2id/oauth-2.0-sdk-with-openid-connect-extensions) from 11.20.1 to 11.21. - [Changelog](https://bitbucket.org/connect2id/oauth-2.0-sdk-with-openid-connect-extensions/src/master/CHANGELOG.txt) - [Commits](https://bitbucket.org/connect2id/oauth-2.0-sdk-with-openid-connect-extensions/branches/compare/11.21..11.20.1) --- updated-dependencies: - dependency-name: com.nimbusds:oauth2-oidc-sdk dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> improve `PhoneNumberAnalyzerTests#testTelPrefixSearch` (#17016) this way we ensure that it doesn't include any additional tokens which we don't want. this is a follow-up to commit 4d943993ac9 / #16993. Signed-off-by: Ralph Ursprung Filter shards for sliced search at coordinator (#16771) * Filter shards for sliced search at coordinator Prior to this commit, a sliced search would fan out to every shard, then apply a MatchNoDocsQuery filter on shards that don't correspond to the current slice. This still creates a (useless) search context on each shard for every slice, though. For a long-running sliced scroll, this can quickly exhaust the number of available scroll contexts. This change avoids fanning out to all the shards by checking at the coordinator if a shard is matched by the current slice. This should reduce the number of open scroll contexts to max(numShards, numSlices) instead of numShards * numSlices. --------- Signed-off-by: Michael Froh Upgrade HttpCore5/HttpClient5 to support ExtendedSocketOption in HttpAsyncClient (#16757) * upgrade httpcore5/httpclient5 to support ExtendedSocketOption in HttpAsyncClient Signed-off-by: kkewwei Signed-off-by: kkewwei * Use the Upgrade flow by default Signed-off-by: Andriy Redko * Update Reactor Netty to 1.1.26.Final Signed-off-by: Andriy Redko * Add SETTING_H2C_MAX_CONTENT_LENGTH to configure h2cMaxContentLength for reactor-netty4 transport Signed-off-by: Andriy Redko * Update Apache HttpCore5 to 5.3.2 Signed-off-by: Andriy Redko --------- Signed-off-by: kkewwei Signed-off-by: kkewwei Signed-off-by: Andriy Redko Co-authored-by: Andriy Redko Update version checks for backport (#17030) Signed-off-by: Michael Froh Signed-off-by: Andriy Redko Co-authored-by: Michael Froh Fix versions and breaking API changes (#17031) Signed-off-by: Andriy Redko Bump com.nimbusds:nimbus-jose-jwt from 9.47 to 10.0.1 in /test/fixtures/hdfs-fixture (#17011) * Bump com.nimbusds:nimbus-jose-jwt in /test/fixtures/hdfs-fixture Bumps [com.nimbusds:nimbus-jose-jwt](https://bitbucket.org/connect2id/nimbus-jose-jwt) from 9.47 to 10.0.1. - [Changelog](https://bitbucket.org/connect2id/nimbus-jose-jwt/src/master/CHANGELOG.txt) - [Commits](https://bitbucket.org/connect2id/nimbus-jose-jwt/branches/compare/10.0.1..9.47) --- updated-dependencies: - dependency-name: com.nimbusds:nimbus-jose-jwt dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Remove user data from logs when not in debug/trace mode (#17007) * Remove user data from logs when not in debug/trace mode Signed-off-by: Mohit Godwani Remove user data from logs when not in debug/trace mode (#17007) * Remove user data from logs when not in debug/trace mode Signed-off-by: Mohit Godwani Signed-off-by: meetvm [Bugfix] Fix cache maximum size settings not working properly with pluggable caching (#16636) * Fix cache size setting Signed-off-by: Peter Alfonsi * Changelog Signed-off-by: Peter Alfonsi * Deprecate original IRC size setting Signed-off-by: Peter Alfonsi * spotlessApply Signed-off-by: Peter Alfonsi * Addressed Ankit's comments Signed-off-by: Peter Alfonsi * Address Sagar's comment Signed-off-by: Peter Alfonsi --------- Signed-off-by: Peter Alfonsi Signed-off-by: Peter Alfonsi Signed-off-by: Ankit Jain Co-authored-by: Peter Alfonsi Co-authored-by: Ankit Jain Signed-off-by: meetvm Fix GRPC AUX_TRANSPORT_PORT and SETTING_GRPC_PORT settings and remove lingering HTTP terminology (#17037) * [GRPC] Rename AUX_TRANSPORT_PORT and SETTING_GRPC_PORT settings and remove lingering HTTP terminology Signed-off-by: Karen Xu * Update CHANGELOG Signed-off-by: Karen Xu * remove extra space in CHANGELOG Signed-off-by: Karen Xu * Update plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/Netty4GrpcServerTransport.java Co-authored-by: Andriy Redko Signed-off-by: Karen X --------- Signed-off-by: Karen Xu Signed-off-by: Karen X Co-authored-by: Andriy Redko Signed-off-by: meetvm Bump com.nimbusds:nimbus-jose-jwt from 9.47 to 10.0.1 in /test/fixtures/hdfs-fixture (#17011) * Bump com.nimbusds:nimbus-jose-jwt in /test/fixtures/hdfs-fixture Bumps [com.nimbusds:nimbus-jose-jwt](https://bitbucket.org/connect2id/nimbus-jose-jwt) from 9.47 to 10.0.1. - [Changelog](https://bitbucket.org/connect2id/nimbus-jose-jwt/src/master/CHANGELOG.txt) - [Commits](https://bitbucket.org/connect2id/nimbus-jose-jwt/branches/compare/10.0.1..9.47) --- updated-dependencies: - dependency-name: com.nimbusds:nimbus-jose-jwt dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Remove user data from logs when not in debug/trace mode (#17007) * Remove user data from logs when not in debug/trace mode Signed-off-by: Mohit Godwani [Bugfix] Fix cache maximum size settings not working properly with pluggable caching (#16636) * Fix cache size setting Signed-off-by: Peter Alfonsi * Changelog Signed-off-by: Peter Alfonsi * Deprecate original IRC size setting Signed-off-by: Peter Alfonsi * spotlessApply Signed-off-by: Peter Alfonsi * Addressed Ankit's comments Signed-off-by: Peter Alfonsi * Address Sagar's comment Signed-off-by: Peter Alfonsi --------- Signed-off-by: Peter Alfonsi Signed-off-by: Peter Alfonsi Signed-off-by: Ankit Jain Co-authored-by: Peter Alfonsi Co-authored-by: Ankit Jain Fix GRPC AUX_TRANSPORT_PORT and SETTING_GRPC_PORT settings and remove lingering HTTP terminology (#17037) * [GRPC] Rename AUX_TRANSPORT_PORT and SETTING_GRPC_PORT settings and remove lingering HTTP terminology Signed-off-by: Karen Xu * Update CHANGELOG Signed-off-by: Karen Xu * remove extra space in CHANGELOG Signed-off-by: Karen Xu * Update plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/Netty4GrpcServerTransport.java Co-authored-by: Andriy Redko Signed-off-by: Karen X --------- Signed-off-by: Karen Xu Signed-off-by: Karen X Co-authored-by: Andriy Redko Fix getTime field name to time in GetStats (#16894) (#17009) * Fix getTime field name to time in GetStats (#16894) Signed-off-by: hye-on * Update PR number in changelog Signed-off-by: hye-on * Deprecate getTime field and add time field in GetStats for backward compatibility Signed-off-by: hye-on * Add forRemoval flag to getTime field for future removal Signed-off-by: hye-on * Changed to use field instead of humanReadableField for GET_TIME in JSON response Replaced the use of builder.humanReadableField for the GET_TIME field with builder.field(Fields.GET_TIME, Objects.toString(getTime())). This prevents the duplication of the time_in_millis field. Signed-off-by: hye-on * Add test to validate getTime and time fields in _stats API response getTime and time fields are verified to be included in the _stats API response and correctly aligned. Signed-off-by: hye-on * Fix formatting in GetStats.java Signed-off-by: hye-on * Rename test file to better reflect test purpose Signed-off-by: hye-on * Test Add skip version for stats API human filter test under 2.19.99 Signed-off-by: hye-on * Remove unnecessary changelog entries Signed-off-by: hye-on * Add a line for styling purposes Signed-off-by: hye-on --------- Signed-off-by: hye-on Add Craig Perkins as OpenSearch Maintainer (#17046) Signed-off-by: Daniel Widdis Update CODEOWNERS (#17053) Signed-off-by: Daniel Widdis Improve exception handling in S3BlobContainer synchronous operations (#17049) Signed-off-by: Ashish Singh --- .github/CODEOWNERS | 20 +-- CHANGELOG.md | 4 + MAINTAINERS.md | 1 + .../common/tier/TieredSpilloverCache.java | 15 ++ .../tier/TieredSpilloverCacheSettings.java | 6 + .../cache/common/tier/MockDiskCache.java | 4 + .../tier/TieredSpilloverCacheTests.java | 135 +++++++++++++++++- .../50_wait_for_active_shards.yml | 2 +- .../reindex/60_wait_for_active_shards.yml | 2 +- .../test/update_by_query/50_consistency.yml | 2 +- .../cache/EhcacheDiskCacheSettings.java | 1 + .../cache/store/disk/EhcacheDiskCache.java | 5 + .../store/disk/EhCacheDiskCacheTests.java | 61 ++++++++ .../repositories/s3/S3BlobContainer.java | 23 ++- .../s3/S3BlobStoreContainerTests.java | 110 ++++++++++++++ .../opensearch/transport/grpc/GrpcPlugin.java | 4 +- .../grpc/Netty4GrpcServerTransport.java | 17 +-- .../grpc/Netty4GrpcServerTransportTests.java | 2 +- ...include_both_time_and_gettime_in_stats.yml | 36 +++++ .../support/WaitActiveShardCountIT.java | 4 +- .../replication/ReplicationOperation.java | 10 +- .../TransportReplicationAction.java | 2 +- .../action/update/UpdateHelper.java | 7 +- .../org/opensearch/bootstrap/Security.java | 6 +- .../common/cache/service/CacheService.java | 22 ++- .../cache/store/OpenSearchOnHeapCache.java | 14 +- .../OpenSearchOnHeapCacheSettings.java | 1 + .../org/opensearch/index/get/GetStats.java | 9 +- .../transfer/TranslogTransferManager.java | 3 +- .../indices/IndicesRequestCache.java | 74 ++++++---- .../org/opensearch/plugins/NetworkPlugin.java | 4 +- .../store/OpenSearchOnHeapCacheTests.java | 75 +++++++--- .../settings/MemorySizeSettingsTests.java | 3 + .../TranslogTransferManagerTests.java | 74 ++++++++++ .../indices/IndicesRequestCacheTests.java | 48 +++++++ 35 files changed, 696 insertions(+), 110 deletions(-) create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/60_include_both_time_and_gettime_in_stats.yml diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 18a310862dfbb..38ce0c3a3f927 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -11,27 +11,27 @@ # 3. Use the command palette to run the CODEOWNERS: Show owners of current file command, which will display all code owners for the current file. # Default ownership for all repo files -* @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jainankitk @kotwanikunal @linuxpi @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +* @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @cwperks @dblock @dbwiddis @gbbafna @jainankitk @kotwanikunal @linuxpi @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah /modules/lang-painless/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah /modules/parent-join/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah /modules/transport-netty4/ @peternied -/plugins/identity-shiro/ @peternied +/plugins/identity-shiro/ @peternied @cwperks -/server/src/internalClusterTest/java/org/opensearch/index/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah -/server/src/internalClusterTest/java/org/opensearch/search/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/server/src/internalClusterTest/java/org/opensearch/index/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @cwperks @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/server/src/internalClusterTest/java/org/opensearch/search/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @cwperks @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah /server/src/main/java/org/opensearch/extensions/ @peternied -/server/src/main/java/org/opensearch/identity/ @peternied -/server/src/main/java/org/opensearch/index/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah -/server/src/main/java/org/opensearch/search/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/server/src/main/java/org/opensearch/identity/ @peternied @cwperks +/server/src/main/java/org/opensearch/index/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @cwperks @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/server/src/main/java/org/opensearch/search/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @cwperks @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah /server/src/main/java/org/opensearch/threadpool/ @jed326 @peternied /server/src/main/java/org/opensearch/transport/ @peternied -/server/src/test/java/org/opensearch/index/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah -/server/src/test/java/org/opensearch/search/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/server/src/test/java/org/opensearch/index/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @cwperks @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/server/src/test/java/org/opensearch/search/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @cwperks @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah /.github/ @jed326 @peternied -/MAINTAINERS.md @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gaobinlong @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @peternied @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/MAINTAINERS.md @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @cwperks @dblock @dbwiddis @gaobinlong @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @peternied @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah diff --git a/CHANGELOG.md b/CHANGELOG.md index 241d88049214d..c9d7d9a60a3e5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Changes to support IP field in star tree indexing([#16641](https://github.com/opensearch-project/OpenSearch/pull/16641/)) - Support object fields in star-tree index([#16728](https://github.com/opensearch-project/OpenSearch/pull/16728/)) - Support searching from doc_value using termQueryCaseInsensitive/termQuery in flat_object/keyword field([#16974](https://github.com/opensearch-project/OpenSearch/pull/16974/)) +- Added a new `time` field to replace the deprecated `getTime` field in `GetStats`. ([#17009](https://github.com/opensearch-project/OpenSearch/pull/17009)) ### Dependencies - Bump `com.google.cloud:google-cloud-core-http` from 2.23.0 to 2.47.0 ([#16504](https://github.com/opensearch-project/OpenSearch/pull/16504)) @@ -75,6 +76,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Deprecated - Performing update operation with default pipeline or final pipeline is deprecated ([#16712](https://github.com/opensearch-project/OpenSearch/pull/16712)) +- Marked `getTime` field as deprecated in favor of the new `time` field. ([#17009](https://github.com/opensearch-project/OpenSearch/pull/17009)) ### Removed @@ -95,10 +97,12 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix case insensitive and escaped query on wildcard ([#16827](https://github.com/opensearch-project/OpenSearch/pull/16827)) - Fix _list/shards API failing when closed indices are present ([#16606](https://github.com/opensearch-project/OpenSearch/pull/16606)) - Fix remote shards balance ([#15335](https://github.com/opensearch-project/OpenSearch/pull/15335)) +- Fix max request cache size settings not working properly with pluggable caching ([#16636](https://github.com/opensearch-project/OpenSearch/pull/16636)) - Always use `constant_score` query for `match_only_text` field ([#16964](https://github.com/opensearch-project/OpenSearch/pull/16964)) - Fix Shallow copy snapshot failures on closed index ([#16868](https://github.com/opensearch-project/OpenSearch/pull/16868)) - Fix multi-value sort for unsigned long ([#16732](https://github.com/opensearch-project/OpenSearch/pull/16732)) - The `phone-search` analyzer no longer emits the tel/sip prefix, international calling code, extension numbers and unformatted input as a token ([#16993](https://github.com/opensearch-project/OpenSearch/pull/16993)) +- Fix GRPC AUX_TRANSPORT_PORT and SETTING_GRPC_PORT settings and remove lingering HTTP terminology ([#17037](https://github.com/opensearch-project/OpenSearch/pull/17037)) ### Security diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 4a8aa9305df74..93821a3da4c71 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -13,6 +13,7 @@ This document contains a list of maintainers in this repo. See [opensearch-proje | Ashish Singh | [ashking94](https://github.com/ashking94) | Amazon | | Bukhtawar Khan | [Bukhtawar](https://github.com/Bukhtawar) | Amazon | | Charlotte Henkle | [CEHENKLE](https://github.com/CEHENKLE) | Amazon | +| Craig Perkins | [cwperks](https://github.com/cwperks) | Amazon | | Dan Widdis | [dbwiddis](https://github.com/dbwiddis) | Amazon | | Daniel "dB." Doubrovkine | [dblock](https://github.com/dblock) | Amazon | | Gao Binlong | [gaobinlong](https://github.com/gaobinlong) | Amazon | diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java index 38a6915ffd10e..9879235812377 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java @@ -150,6 +150,9 @@ static class TieredSpilloverCacheSegment implements ICache { private final TieredSpilloverCacheStatsHolder statsHolder; + private final long onHeapCacheMaxWeight; + private final long diskCacheMaxWeight; + /** * This map is used to handle concurrent requests for same key in computeIfAbsent() to ensure we load the value * only once. @@ -218,6 +221,8 @@ static class TieredSpilloverCacheSegment implements ICache { cacheListMap.put(diskCache, new TierInfo(isDiskCacheEnabled, TIER_DIMENSION_VALUE_DISK)); this.caches = Collections.synchronizedMap(cacheListMap); this.policies = builder.policies; // Will never be null; builder initializes it to an empty list + this.onHeapCacheMaxWeight = onHeapCacheSizeInBytes; + this.diskCacheMaxWeight = diskCacheSizeInBytes; } // Package private for testing @@ -526,6 +531,16 @@ void updateStatsOnPut(String destinationTierValue, ICacheKey key, V value) { statsHolder.incrementSizeInBytes(dimensionValues, weigher.applyAsLong(key, value)); } + // pkg-private for testing + long getOnHeapCacheMaxWeight() { + return onHeapCacheMaxWeight; + } + + // pkg-private for testing + long getDiskCacheMaxWeight() { + return diskCacheMaxWeight; + } + /** * A class which receives removal events from the heap tier. */ diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheSettings.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheSettings.java index 122d00af3bd1e..31dc1795134e4 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheSettings.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheSettings.java @@ -85,6 +85,9 @@ public class TieredSpilloverCacheSettings { /** * Setting which defines the onHeap cache size to be used within tiered cache. + * This setting overrides size settings from the heap tier implementation. + * For example, if OpenSearchOnHeapCache is the heap tier in the request cache, and + * indices.requests.cache.opensearch_onheap.size is set, that value will be ignored in favor of this setting. * * Pattern: {cache_type}.tiered_spillover.onheap.store.size * Example: indices.request.cache.tiered_spillover.onheap.store.size @@ -96,6 +99,9 @@ public class TieredSpilloverCacheSettings { /** * Setting which defines the disk cache size to be used within tiered cache. + * This setting overrides the size setting from the disk tier implementation. + * For example, if EhcacheDiskCache is the disk tier in the request cache, and + * indices.requests.cache.ehcache_disk.max_size_in_bytes is set, that value will be ignored in favor of this setting. */ public static final Setting.AffixSetting TIERED_SPILLOVER_DISK_STORE_SIZE = Setting.suffixKeySetting( TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME + ".disk.store.size", diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java index fcddd489a27aa..78302cede402f 100644 --- a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java @@ -128,6 +128,10 @@ public void close() { } + long getMaximumWeight() { + return maxSize; + } + public static class MockDiskCacheFactory implements Factory { public static final String NAME = "mockDiskCache"; diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java index 3bb1321f9faf2..494534ac74c9f 100644 --- a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java @@ -58,6 +58,7 @@ import static org.opensearch.cache.common.tier.TieredSpilloverCache.ZERO_SEGMENT_COUNT_EXCEPTION_MESSAGE; import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.DISK_CACHE_ENABLED_SETTING_MAP; +import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.MIN_DISK_CACHE_SIZE_IN_BYTES; import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_SIZE; import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TIERED_SPILLOVER_SEGMENTS; import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP; @@ -2166,6 +2167,134 @@ public void testDropStatsForDimensions() throws Exception { assertEquals(new ImmutableCacheStats(0, 0, 0, 0, 0), tieredSpilloverCache.stats().getTotalStats()); } + public void testSegmentSizesWhenUsingFactory() { + // The TSC's tier size settings, TIERED_SPILLOVER_ONHEAP_STORE_SIZE and TIERED_SPILLOVER_DISK_STORE_SIZE, + // should always be respected, overriding the individual implementation's size settings if present + long expectedHeapSize = 256L * between(10, 20); + long expectedDiskSize = MIN_DISK_CACHE_SIZE_IN_BYTES + 256L * between(30, 40); + long heapSizeFromImplSetting = 50; + int diskSizeFromImplSetting = 50; + int numSegments = getNumberOfSegments(); + + int keyValueSize = 1; + MockCacheRemovalListener removalListener = new MockCacheRemovalListener<>(); + Settings settings = Settings.builder() + .put( + CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(), + TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME + ) + .put( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_NAME.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ).getKey(), + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME + ) + .put( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_STORE_NAME.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ).getKey(), + MockDiskCache.MockDiskCacheFactory.NAME + ) + // These two size settings should be honored + .put( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_SIZE.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ).getKey(), + expectedHeapSize + "b" + ) + .put( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_STORE_SIZE.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ).getKey(), + expectedDiskSize + ) + // The size setting from the OpenSearchOnHeap implementation should not be honored + .put( + OpenSearchOnHeapCacheSettings.MAXIMUM_SIZE_IN_BYTES.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ).getKey(), + heapSizeFromImplSetting + "b" + ) + .put(FeatureFlags.PLUGGABLE_CACHE, "true") + .put( + TIERED_SPILLOVER_SEGMENTS.getConcreteSettingForNamespace(CacheType.INDICES_REQUEST_CACHE.getSettingPrefix()).getKey(), + numSegments + ) + .build(); + String storagePath = getStoragePath(settings); + + TieredSpilloverCache tieredSpilloverCache = (TieredSpilloverCache< + String, + String>) new TieredSpilloverCache.TieredSpilloverCacheFactory().create( + new CacheConfig.Builder().setKeyType(String.class) + .setKeyType(String.class) + .setWeigher((k, v) -> keyValueSize) + .setRemovalListener(removalListener) + .setKeySerializer(new StringSerializer()) + .setValueSerializer(new StringSerializer()) + .setSettings(settings) + .setDimensionNames(dimensionNames) + .setCachedResultParser(s -> new CachedQueryResult.PolicyValues(20_000_000L)) // Values will always appear to have taken + // 20_000_000 ns = 20 ms to compute + .setClusterSettings(clusterSettings) + .setStoragePath(storagePath) + .build(), + CacheType.INDICES_REQUEST_CACHE, + Map.of( + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME, + new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(), + MockDiskCache.MockDiskCacheFactory.NAME, + // The size value passed in here acts as the "implementation setting" for the disk tier, and should also be ignored + new MockDiskCache.MockDiskCacheFactory(0, diskSizeFromImplSetting, false, keyValueSize) + ) + ); + checkSegmentSizes(tieredSpilloverCache, expectedHeapSize, expectedDiskSize); + } + + public void testSegmentSizesWhenNotUsingFactory() { + long expectedHeapSize = 256L * between(10, 20); + long expectedDiskSize = MIN_DISK_CACHE_SIZE_IN_BYTES + 256L * between(30, 40); + int heapSizeFromImplSetting = 50; + int diskSizeFromImplSetting = 50; + + Settings settings = Settings.builder() + .put( + CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(), + TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME + ) + .put(FeatureFlags.PLUGGABLE_CACHE, "true") + // The size setting from the OpenSearchOnHeapCache implementation should not be honored + .put( + OpenSearchOnHeapCacheSettings.MAXIMUM_SIZE_IN_BYTES.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ).getKey(), + heapSizeFromImplSetting + "b" + ) + .build(); + + int keyValueSize = 1; + MockCacheRemovalListener removalListener = new MockCacheRemovalListener<>(); + int numSegments = getNumberOfSegments(); + CacheConfig cacheConfig = getCacheConfig(1, settings, removalListener, numSegments); + TieredSpilloverCache tieredSpilloverCache = getTieredSpilloverCache( + new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(), + new MockDiskCache.MockDiskCacheFactory(0, diskSizeFromImplSetting, true, keyValueSize), + cacheConfig, + null, + removalListener, + numSegments, + expectedHeapSize, + expectedDiskSize + ); + checkSegmentSizes(tieredSpilloverCache, expectedHeapSize, expectedDiskSize); + } + + private void checkSegmentSizes(TieredSpilloverCache cache, long expectedHeapSize, long expectedDiskSize) { + TieredSpilloverCache.TieredSpilloverCacheSegment segment = cache.tieredSpilloverCacheSegments[0]; + assertEquals(expectedHeapSize / cache.getNumberOfSegments(), segment.getOnHeapCacheMaxWeight()); + assertEquals(expectedDiskSize / cache.getNumberOfSegments(), segment.getDiskCacheMaxWeight()); + } + private List getMockDimensions() { List dims = new ArrayList<>(); for (String dimensionName : dimensionNames) { @@ -2455,9 +2584,9 @@ private void verifyComputeIfAbsentThrowsException( MockCacheRemovalListener removalListener = new MockCacheRemovalListener<>(); Settings settings = Settings.builder() .put( - OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) - .get(MAXIMUM_SIZE_IN_BYTES_KEY) - .getKey(), + TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_SIZE.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ).getKey(), onHeapCacheSize * keyValueSize + "b" ) .build(); diff --git a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/delete_by_query/50_wait_for_active_shards.yml b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/delete_by_query/50_wait_for_active_shards.yml index ea8ed4df3e748..39cf36847f25d 100644 --- a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/delete_by_query/50_wait_for_active_shards.yml +++ b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/delete_by_query/50_wait_for_active_shards.yml @@ -25,7 +25,7 @@ match_all: {} - match: - failures.0.cause.reason: /Not.enough.active.copies.to.meet.shard.count.of.\[4\].\(have.1,.needed.4\)..Timeout\:.\[1s\],.request:.+/ + failures.0.cause.reason: /Not.enough.active.copies.to.meet.shard.count.of.\[4\].\(have.1,.needed.4\)..Timeout\:.\[1s\]/ - do: indices.refresh: {} diff --git a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/60_wait_for_active_shards.yml b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/60_wait_for_active_shards.yml index 3498e555d2879..a580c55a95130 100644 --- a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/60_wait_for_active_shards.yml +++ b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/60_wait_for_active_shards.yml @@ -25,7 +25,7 @@ dest: index: dest - match: - failures.0.cause.reason: /Not.enough.active.copies.to.meet.shard.count.of.\[4\].\(have.1,.needed.4\)\..Timeout\:.\[1s\],.request:.+/ + failures.0.cause.reason: /Not.enough.active.copies.to.meet.shard.count.of.\[4\].\(have.1,.needed.4\)\..Timeout\:.\[1s\]/ - do: reindex: diff --git a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/50_consistency.yml b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/50_consistency.yml index 4a067580b54d3..e97eacc3c9c25 100644 --- a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/50_consistency.yml +++ b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/50_consistency.yml @@ -21,7 +21,7 @@ wait_for_active_shards: 4 timeout: 1s - match: - failures.0.cause.reason: /Not.enough.active.copies.to.meet.shard.count.of.\[4\].\(have.1,.needed.4\)..Timeout\:.\[1s\],.request:.+/ + failures.0.cause.reason: /Not.enough.active.copies.to.meet.shard.count.of.\[4\].\(have.1,.needed.4\)..Timeout\:.\[1s\]/ - do: update_by_query: diff --git a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheDiskCacheSettings.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheDiskCacheSettings.java index cbc104f2d0b00..e4c9dd1e96c3c 100644 --- a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheDiskCacheSettings.java +++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheDiskCacheSettings.java @@ -101,6 +101,7 @@ public class EhcacheDiskCacheSettings { /** * Disk cache max size setting. + * If this cache is used as a tier in a TieredSpilloverCache, this setting is ignored. */ public static final Setting.AffixSetting DISK_CACHE_MAX_SIZE_IN_BYTES_SETTING = Setting.suffixKeySetting( EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + ".max_size_in_bytes", diff --git a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java index 0fa0f8162bb98..33c27eb301ad1 100644 --- a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java +++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java @@ -680,6 +680,11 @@ private V deserializeValue(ByteArrayWrapper binary) { return valueSerializer.deserialize(binary.value); } + // Pkg-private for testing. + long getMaximumWeight() { + return maxWeightInBytes; + } + /** * Factory to create an ehcache disk cache. */ diff --git a/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java index a0d0aa4ec4914..4e879af052c15 100644 --- a/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java +++ b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java @@ -20,11 +20,13 @@ import org.opensearch.common.cache.RemovalNotification; import org.opensearch.common.cache.serializer.BytesReferenceSerializer; import org.opensearch.common.cache.serializer.Serializer; +import org.opensearch.common.cache.settings.CacheSettings; import org.opensearch.common.cache.stats.ImmutableCacheStats; import org.opensearch.common.cache.store.config.CacheConfig; import org.opensearch.common.metrics.CounterMetric; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; @@ -1201,6 +1203,65 @@ public void testEhcacheCloseWithDestroyCacheMethodThrowingException() throws Exc ehcacheDiskCache.close(); } + public void testWithCacheConfigSizeSettings() throws Exception { + // The cache should get its size from the config if present, and otherwise should get it from the setting. + long maxSizeFromSetting = between(MINIMUM_MAX_SIZE_IN_BYTES + 1000, MINIMUM_MAX_SIZE_IN_BYTES + 2000); + long maxSizeFromConfig = between(MINIMUM_MAX_SIZE_IN_BYTES + 3000, MINIMUM_MAX_SIZE_IN_BYTES + 4000); + + EhcacheDiskCache cache = setupMaxSizeTest(maxSizeFromSetting, maxSizeFromConfig, false); + assertEquals(maxSizeFromSetting, cache.getMaximumWeight()); + + cache = setupMaxSizeTest(maxSizeFromSetting, maxSizeFromConfig, true); + assertEquals(maxSizeFromConfig, cache.getMaximumWeight()); + } + + // Modified from OpenSearchOnHeapCacheTests. Can't reuse, as we can't add a dependency on the server.test module. + private EhcacheDiskCache setupMaxSizeTest(long maxSizeFromSetting, long maxSizeFromConfig, boolean putSizeInConfig) + throws Exception { + MockRemovalListener listener = new MockRemovalListener<>(); + try (NodeEnvironment env = newNodeEnvironment(Settings.builder().build())) { + Settings settings = Settings.builder() + .put(FeatureFlags.PLUGGABLE_CACHE, true) + .put( + CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(), + EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + ) + .put( + EhcacheDiskCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(DISK_MAX_SIZE_IN_BYTES_KEY) + .getKey(), + maxSizeFromSetting + ) + .put( + EhcacheDiskCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(DISK_STORAGE_PATH_KEY) + .getKey(), + env.nodePaths()[0].indicesPath.toString() + "/request_cache/" + 0 + ) + .build(); + + CacheConfig.Builder cacheConfigBuilder = new CacheConfig.Builder().setKeyType(String.class) + .setValueType(String.class) + .setKeySerializer(new StringSerializer()) + .setValueSerializer(new StringSerializer()) + .setWeigher(getWeigher()) + .setRemovalListener(listener) + .setSettings(settings) + .setDimensionNames(List.of(dimensionName)) + .setStatsTrackingEnabled(true); + if (putSizeInConfig) { + cacheConfigBuilder.setMaxSizeInBytes(maxSizeFromConfig); + } + + ICache.Factory cacheFactory = new EhcacheDiskCache.EhcacheDiskCacheFactory(); + return (EhcacheDiskCache) cacheFactory.create( + cacheConfigBuilder.build(), + CacheType.INDICES_REQUEST_CACHE, + null + ); + } + } + static class MockEhcahceDiskCache extends EhcacheDiskCache { public MockEhcahceDiskCache(Builder builder) { diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java index 8690a5c91680a..d5cf201b171bb 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java @@ -99,6 +99,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Function; import java.util.stream.Collectors; @@ -373,17 +374,31 @@ public void writeBlobAtomic(String blobName, InputStream inputStream, long blobS } @Override - public DeleteResult delete() { + public DeleteResult delete() throws IOException { PlainActionFuture future = new PlainActionFuture<>(); deleteAsync(future); - return future.actionGet(); + return getFutureValue(future); } @Override - public void deleteBlobsIgnoringIfNotExists(List blobNames) { + public void deleteBlobsIgnoringIfNotExists(List blobNames) throws IOException { PlainActionFuture future = new PlainActionFuture<>(); deleteBlobsAsyncIgnoringIfNotExists(blobNames, future); - future.actionGet(); + getFutureValue(future); + } + + private T getFutureValue(PlainActionFuture future) throws IOException { + try { + return future.get(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new IllegalStateException("Future got interrupted", e); + } catch (ExecutionException e) { + if (e.getCause() instanceof IOException) { + throw (IOException) e.getCause(); + } + throw new RuntimeException(e.getCause()); + } } @Override diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java index 53371cd1529ce..d3725642760dc 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java @@ -1947,6 +1947,116 @@ public void onFailure(Exception e) { assertEquals(simulatedFailure, exceptionRef.get().getCause()); } + public void testDeleteWithInterruptedException() throws Exception { + final String bucketName = randomAlphaOfLengthBetween(1, 10); + final BlobPath blobPath = new BlobPath(); + final S3BlobStore blobStore = mock(S3BlobStore.class); + when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final AmazonAsyncS3Reference asyncClientReference = mock(AmazonAsyncS3Reference.class); + when(blobStore.asyncClientReference()).thenReturn(asyncClientReference); + when(asyncClientReference.get()).thenReturn(AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null)); + + // Mock the list operation to block indefinitely + final ListObjectsV2Publisher listPublisher = mock(ListObjectsV2Publisher.class); + doAnswer(invocation -> { + Thread.currentThread().interrupt(); + return null; + }).when(listPublisher).subscribe(ArgumentMatchers.>any()); + + when(s3AsyncClient.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listPublisher); + + final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); + + IllegalStateException e = expectThrows(IllegalStateException.class, blobContainer::delete); + assertEquals("Future got interrupted", e.getMessage()); + assertTrue(Thread.interrupted()); // Clear interrupted state + } + + public void testDeleteWithExecutionException() throws Exception { + final String bucketName = randomAlphaOfLengthBetween(1, 10); + final BlobPath blobPath = new BlobPath(); + final S3BlobStore blobStore = mock(S3BlobStore.class); + when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final AmazonAsyncS3Reference asyncClientReference = mock(AmazonAsyncS3Reference.class); + when(blobStore.asyncClientReference()).thenReturn(asyncClientReference); + when(asyncClientReference.get()).thenReturn(AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null)); + + RuntimeException simulatedError = new RuntimeException("Simulated error"); + final ListObjectsV2Publisher listPublisher = mock(ListObjectsV2Publisher.class); + doAnswer(invocation -> { + Subscriber subscriber = invocation.getArgument(0); + subscriber.onError(simulatedError); + return null; + }).when(listPublisher).subscribe(ArgumentMatchers.>any()); + + when(s3AsyncClient.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listPublisher); + + final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); + + IOException e = expectThrows(IOException.class, blobContainer::delete); + assertEquals("Failed to list objects for deletion", e.getMessage()); + assertEquals(simulatedError, e.getCause()); + } + + public void testDeleteBlobsIgnoringIfNotExistsWithInterruptedException() throws Exception { + final String bucketName = randomAlphaOfLengthBetween(1, 10); + final BlobPath blobPath = new BlobPath(); + final S3BlobStore blobStore = mock(S3BlobStore.class); + when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + when(blobStore.getBulkDeletesSize()).thenReturn(5); + + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final AmazonAsyncS3Reference asyncClientReference = mock(AmazonAsyncS3Reference.class); + when(blobStore.asyncClientReference()).thenReturn(asyncClientReference); + when(asyncClientReference.get()).thenReturn(AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null)); + + // Mock deleteObjects to block indefinitely + when(s3AsyncClient.deleteObjects(any(DeleteObjectsRequest.class))).thenAnswer(invocation -> { + Thread.currentThread().interrupt(); + return null; + }); + + final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); + List blobNames = Arrays.asList("test1", "test2"); + + IllegalStateException e = expectThrows(IllegalStateException.class, () -> blobContainer.deleteBlobsIgnoringIfNotExists(blobNames)); + assertEquals("Future got interrupted", e.getMessage()); + assertTrue(Thread.interrupted()); // Clear interrupted state + } + + public void testDeleteBlobsIgnoringIfNotExistsWithExecutionException() throws Exception { + final String bucketName = randomAlphaOfLengthBetween(1, 10); + final BlobPath blobPath = new BlobPath(); + final S3BlobStore blobStore = mock(S3BlobStore.class); + when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + when(blobStore.getBulkDeletesSize()).thenReturn(5); + + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final AmazonAsyncS3Reference asyncClientReference = mock(AmazonAsyncS3Reference.class); + when(blobStore.asyncClientReference()).thenReturn(asyncClientReference); + when(asyncClientReference.get()).thenReturn(AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null)); + + RuntimeException simulatedError = new RuntimeException("Simulated delete error"); + CompletableFuture failedFuture = new CompletableFuture<>(); + failedFuture.completeExceptionally(simulatedError); + when(s3AsyncClient.deleteObjects(any(DeleteObjectsRequest.class))).thenReturn(failedFuture); + + final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); + List blobNames = Arrays.asList("test1", "test2"); + + IOException e = expectThrows(IOException.class, () -> blobContainer.deleteBlobsIgnoringIfNotExists(blobNames)); + assertEquals("Failed to delete blobs " + blobNames, e.getMessage()); + assertEquals(simulatedError, e.getCause().getCause()); + } + private void mockObjectResponse(S3AsyncClient s3AsyncClient, String bucketName, String blobName, int objectSize) { final InputStream inputStream = new ByteArrayInputStream(randomByteArrayOfLength(objectSize)); diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/GrpcPlugin.java b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/GrpcPlugin.java index 0a464e135350b..7f02983010f98 100644 --- a/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/GrpcPlugin.java +++ b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/GrpcPlugin.java @@ -25,7 +25,7 @@ import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.GRPC_TRANSPORT_SETTING_KEY; import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_BIND_HOST; import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_HOST; -import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PORTS; +import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PORT; import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PUBLISH_HOST; import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PUBLISH_PORT; import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_WORKER_COUNT; @@ -58,7 +58,7 @@ public Map> getAuxTransports( @Override public List> getSettings() { return List.of( - SETTING_GRPC_PORTS, + SETTING_GRPC_PORT, SETTING_GRPC_HOST, SETTING_GRPC_PUBLISH_HOST, SETTING_GRPC_BIND_HOST, diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/Netty4GrpcServerTransport.java b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/Netty4GrpcServerTransport.java index 61c0722772b92..1fb6a0bca03ea 100644 --- a/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/Netty4GrpcServerTransport.java +++ b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/Netty4GrpcServerTransport.java @@ -63,9 +63,9 @@ public class Netty4GrpcServerTransport extends NetworkPlugin.AuxTransport { /** * Port range on which to bind. - * Note this setting is configured through AffixSetting AUX_TRANSPORT_PORTS where the aux transport type matches the GRPC_TRANSPORT_SETTING_KEY. + * Note this setting is configured through AffixSetting AUX_TRANSPORT_PORT where the aux transport type matches the GRPC_TRANSPORT_SETTING_KEY. */ - public static final Setting SETTING_GRPC_PORTS = AUX_TRANSPORT_PORTS.getConcreteSettingForNamespace( + public static final Setting SETTING_GRPC_PORT = AUX_TRANSPORT_PORT.getConcreteSettingForNamespace( GRPC_TRANSPORT_SETTING_KEY ); @@ -134,20 +134,21 @@ public class Netty4GrpcServerTransport extends NetworkPlugin.AuxTransport { * @param networkService the bind/publish addresses. */ public Netty4GrpcServerTransport(Settings settings, List services, NetworkService networkService) { + logger.debug("Initializing Netty4GrpcServerTransport with settings = {}", settings); this.settings = Objects.requireNonNull(settings); this.services = Objects.requireNonNull(services); this.networkService = Objects.requireNonNull(networkService); - final List httpBindHost = SETTING_GRPC_BIND_HOST.get(settings); - this.bindHosts = (httpBindHost.isEmpty() ? NetworkService.GLOBAL_NETWORK_BIND_HOST_SETTING.get(settings) : httpBindHost).toArray( + final List grpcBindHost = SETTING_GRPC_BIND_HOST.get(settings); + this.bindHosts = (grpcBindHost.isEmpty() ? NetworkService.GLOBAL_NETWORK_BIND_HOST_SETTING.get(settings) : grpcBindHost).toArray( Strings.EMPTY_ARRAY ); - final List httpPublishHost = SETTING_GRPC_PUBLISH_HOST.get(settings); - this.publishHosts = (httpPublishHost.isEmpty() ? NetworkService.GLOBAL_NETWORK_PUBLISH_HOST_SETTING.get(settings) : httpPublishHost) + final List grpcPublishHost = SETTING_GRPC_PUBLISH_HOST.get(settings); + this.publishHosts = (grpcPublishHost.isEmpty() ? NetworkService.GLOBAL_NETWORK_PUBLISH_HOST_SETTING.get(settings) : grpcPublishHost) .toArray(Strings.EMPTY_ARRAY); - this.port = SETTING_GRPC_PORTS.get(settings); + this.port = SETTING_GRPC_PORT.get(settings); this.nettyEventLoopThreads = SETTING_GRPC_WORKER_COUNT.get(settings); } @@ -229,7 +230,7 @@ private void bindServer() { + publishInetAddress + "). " + "Please specify a unique port by setting " - + SETTING_GRPC_PORTS.getKey() + + SETTING_GRPC_PORT.getKey() + " or " + SETTING_GRPC_PUBLISH_PORT.getKey() ); diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/transport/grpc/Netty4GrpcServerTransportTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/transport/grpc/Netty4GrpcServerTransportTests.java index ebeff62c2c23c..8cf44eebb293e 100644 --- a/plugins/transport-grpc/src/test/java/org/opensearch/transport/grpc/Netty4GrpcServerTransportTests.java +++ b/plugins/transport-grpc/src/test/java/org/opensearch/transport/grpc/Netty4GrpcServerTransportTests.java @@ -44,6 +44,6 @@ public void test() { } private static Settings createSettings() { - return Settings.builder().put(Netty4GrpcServerTransport.SETTING_GRPC_PORTS.getKey(), getPortRange()).build(); + return Settings.builder().put(Netty4GrpcServerTransport.SETTING_GRPC_PORT.getKey(), getPortRange()).build(); } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/60_include_both_time_and_gettime_in_stats.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/60_include_both_time_and_gettime_in_stats.yml new file mode 100644 index 0000000000000..d5e3e7554b400 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/60_include_both_time_and_gettime_in_stats.yml @@ -0,0 +1,36 @@ +--- +setup: + - do: + indices.create: + index: test1 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + wait_for_active_shards: all + + - do: + index: + index: test1 + id: 1 + body: { "foo": "bar" } + + - do: + indices.refresh: + index: test1 + +--- +"Test _stats API includes both time and getTime metrics with human filter": + - skip: + version: " - 2.19.99" + reason: "this change is added in 3.0.0" + + - do: + indices.stats: + metric: [ get ] + human: true + + - is_true: _all.primaries.get.time + - is_true: _all.primaries.get.getTime + - match: { _all.primaries.get.time: "0s" } + - match: { _all.primaries.get.getTime: "0s" } diff --git a/server/src/internalClusterTest/java/org/opensearch/action/support/WaitActiveShardCountIT.java b/server/src/internalClusterTest/java/org/opensearch/action/support/WaitActiveShardCountIT.java index 08cffac8aac5d..c4ffbccf0ab99 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/support/WaitActiveShardCountIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/support/WaitActiveShardCountIT.java @@ -76,7 +76,7 @@ public void testReplicationWaitsForActiveShardCount() throws Exception { assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE)); assertThat( e.getMessage(), - startsWith("[test][0] Not enough active copies to meet shard count of [2] (have 1, needed 2). Timeout: [100ms], request:") + startsWith("[test][0] Not enough active copies to meet shard count of [2] (have 1, needed 2). Timeout: [100ms]") ); // but really, all is well } @@ -120,7 +120,7 @@ public void testReplicationWaitsForActiveShardCount() throws Exception { startsWith( "[test][0] Not enough active copies to meet shard count of [" + ActiveShardCount.ALL - + "] (have 2, needed 3). Timeout: [100ms], request:" + + "] (have 2, needed 3). Timeout: [100ms]" ) ); // but really, all is well diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java index 9f69d41d83f5b..12d3502184ac4 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java @@ -141,15 +141,7 @@ public void execute() throws Exception { final ShardRouting primaryRouting = primary.routingEntry(); final ShardId primaryId = primaryRouting.shardId(); if (activeShardCountFailure != null) { - finishAsFailed( - new UnavailableShardsException( - primaryId, - "{} Timeout: [{}], request: [{}]", - activeShardCountFailure, - request.timeout(), - request - ) - ); + finishAsFailed(new UnavailableShardsException(primaryId, "{} Timeout: [{}]", activeShardCountFailure, request.timeout())); return; } diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java index 49a96603f6802..637a7a31d78cc 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java @@ -1246,7 +1246,7 @@ void finishOnSuccess(Response response) { } void retryBecauseUnavailable(ShardId shardId, String message) { - retry(new UnavailableShardsException(shardId, "{} Timeout: [{}], request: [{}]", message, request.timeout(), request)); + retry(new UnavailableShardsException(shardId, "{} Timeout: [{}]", message, request.timeout())); } } diff --git a/server/src/main/java/org/opensearch/action/update/UpdateHelper.java b/server/src/main/java/org/opensearch/action/update/UpdateHelper.java index 19c32f9336df8..c02ec1fbb9cf0 100644 --- a/server/src/main/java/org/opensearch/action/update/UpdateHelper.java +++ b/server/src/main/java/org/opensearch/action/update/UpdateHelper.java @@ -58,6 +58,7 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.script.Script; import org.opensearch.script.ScriptService; +import org.opensearch.script.ScriptType; import org.opensearch.script.UpdateScript; import org.opensearch.search.lookup.SourceLookup; @@ -128,7 +129,11 @@ Tuple> executeScriptedUpsert(Map portsRanges = new HashSet<>(); for (String auxType : AUX_TRANSPORT_TYPES_SETTING.get(settings)) { - Setting auxTypePortSettings = AUX_TRANSPORT_PORTS.getConcreteSettingForNamespace(auxType); + Setting auxTypePortSettings = AUX_TRANSPORT_PORT.getConcreteSettingForNamespace(auxType); if (auxTypePortSettings.exists(settings)) { portsRanges.add(auxTypePortSettings.get(settings)); } else { diff --git a/server/src/main/java/org/opensearch/common/cache/service/CacheService.java b/server/src/main/java/org/opensearch/common/cache/service/CacheService.java index 01da78ecec52e..da006264094d2 100644 --- a/server/src/main/java/org/opensearch/common/cache/service/CacheService.java +++ b/server/src/main/java/org/opensearch/common/cache/service/CacheService.java @@ -46,11 +46,8 @@ public CacheService(Map cacheStoreTypeFactories, Setting } public ICache createCache(CacheConfig config, CacheType cacheType) { - Setting cacheSettingForCacheType = CacheSettings.CACHE_TYPE_STORE_NAME.getConcreteSettingForNamespace( - cacheType.getSettingPrefix() - ); - String storeName = cacheSettingForCacheType.get(settings); - if (!FeatureFlags.PLUGGABLE_CACHE_SETTING.get(settings) || (storeName == null || storeName.isBlank())) { + String storeName = getStoreNameFromSetting(cacheType, settings); + if (!pluggableCachingEnabled(cacheType, settings)) { // Condition 1: In case feature flag is off, we default to onHeap. // Condition 2: In case storeName is not explicitly mentioned, we assume user is looking to use older // settings, so we again fallback to onHeap to maintain backward compatibility. @@ -74,4 +71,19 @@ public NodeCacheStats stats(CommonStatsFlags flags) { } return new NodeCacheStats(statsMap, flags); } + + /** + * Check if pluggable caching is on, and if a store type is present for this cache type. + */ + public static boolean pluggableCachingEnabled(CacheType cacheType, Settings settings) { + String storeName = getStoreNameFromSetting(cacheType, settings); + return FeatureFlags.PLUGGABLE_CACHE_SETTING.get(settings) && storeName != null && !storeName.isBlank(); + } + + private static String getStoreNameFromSetting(CacheType cacheType, Settings settings) { + Setting cacheSettingForCacheType = CacheSettings.CACHE_TYPE_STORE_NAME.getConcreteSettingForNamespace( + cacheType.getSettingPrefix() + ); + return cacheSettingForCacheType.get(settings); + } } diff --git a/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java b/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java index 571383a9fce6a..e1039c5d9ee55 100644 --- a/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java +++ b/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java @@ -17,6 +17,7 @@ import org.opensearch.common.cache.RemovalListener; import org.opensearch.common.cache.RemovalNotification; import org.opensearch.common.cache.RemovalReason; +import org.opensearch.common.cache.service.CacheService; import org.opensearch.common.cache.settings.CacheSettings; import org.opensearch.common.cache.stats.CacheStatsHolder; import org.opensearch.common.cache.stats.DefaultCacheStatsHolder; @@ -80,7 +81,7 @@ public OpenSearchOnHeapCache(Builder builder) { this.weigher = builder.getWeigher(); } - // package private for testing + // pkg-private for testing long getMaximumWeight() { return this.maximumWeight; } @@ -192,8 +193,12 @@ public ICache create(CacheConfig config, CacheType cacheType, ); long maxSizeInBytes = ((ByteSizeValue) settingList.get(MAXIMUM_SIZE_IN_BYTES_KEY).get(settings)).getBytes(); - if (config.getMaxSizeInBytes() > 0) { // If this is passed from upstream(like tieredCache), then use this - // instead. + if (config.getMaxSizeInBytes() > 0) { + /* + Use the cache config value if present. + This can be passed down from the TieredSpilloverCache when creating individual segments, + but is not passed in from the IRC if pluggable caching is on. + */ builder.setMaximumWeightInBytes(config.getMaxSizeInBytes()); } else { builder.setMaximumWeightInBytes(maxSizeInBytes); @@ -204,8 +209,7 @@ public ICache create(CacheConfig config, CacheType cacheType, builder.setNumberOfSegments(-1); // By default it will use 256 segments. } - String storeName = cacheSettingForCacheType.get(settings); - if (!FeatureFlags.PLUGGABLE_CACHE_SETTING.get(settings) || (storeName == null || storeName.isBlank())) { + if (!CacheService.pluggableCachingEnabled(cacheType, settings)) { // For backward compatibility as the user intent is to use older settings. builder.setMaximumWeightInBytes(config.getMaxSizeInBytes()); builder.setExpireAfterAccess(config.getExpireAfterAccess()); diff --git a/server/src/main/java/org/opensearch/common/cache/store/settings/OpenSearchOnHeapCacheSettings.java b/server/src/main/java/org/opensearch/common/cache/store/settings/OpenSearchOnHeapCacheSettings.java index 5a2964ad011bf..8ba356f9e0597 100644 --- a/server/src/main/java/org/opensearch/common/cache/store/settings/OpenSearchOnHeapCacheSettings.java +++ b/server/src/main/java/org/opensearch/common/cache/store/settings/OpenSearchOnHeapCacheSettings.java @@ -26,6 +26,7 @@ public class OpenSearchOnHeapCacheSettings { /** * Setting to define maximum size for the cache as a percentage of heap memory available. + * If this cache is used as a tier in a TieredSpilloverCache, this setting is ignored. * * Setting pattern: {cache_type}.opensearch_onheap.size */ diff --git a/server/src/main/java/org/opensearch/index/get/GetStats.java b/server/src/main/java/org/opensearch/index/get/GetStats.java index a366014fe228e..55f14294d774b 100644 --- a/server/src/main/java/org/opensearch/index/get/GetStats.java +++ b/server/src/main/java/org/opensearch/index/get/GetStats.java @@ -41,6 +41,7 @@ import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; +import java.util.Objects; /** * Stats for a search get @@ -137,6 +138,7 @@ public long current() { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.GET); builder.field(Fields.TOTAL, getCount()); + builder.field(Fields.GET_TIME, Objects.toString(getTime())); builder.humanReadableField(Fields.TIME_IN_MILLIS, Fields.TIME, getTime()); builder.field(Fields.EXISTS_TOTAL, existsCount); builder.humanReadableField(Fields.EXISTS_TIME_IN_MILLIS, Fields.EXISTS_TIME, getExistsTime()); @@ -155,7 +157,12 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws static final class Fields { static final String GET = "get"; static final String TOTAL = "total"; - static final String TIME = "getTime"; + /** + * Deprecated field name for time. Use {@link #TIME} instead. + */ + @Deprecated(forRemoval = true) + static final String GET_TIME = "getTime"; + static final String TIME = "time"; static final String TIME_IN_MILLIS = "time_in_millis"; static final String EXISTS_TOTAL = "exists_total"; static final String EXISTS_TIME = "exists_time"; diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java index 924669d0e46a9..1e621d6cb7688 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java @@ -206,7 +206,8 @@ public boolean transferSnapshot(TransferSnapshot transferSnapshot, TranslogTrans } catch (Exception ex) { logger.error(() -> new ParameterizedMessage("Transfer failed for snapshot {}", transferSnapshot), ex); captureStatsOnUploadFailure(); - translogTransferListener.onUploadFailed(transferSnapshot, ex); + Exception exWithoutSuppressed = new TranslogUploadFailedException(ex.getMessage()); + translogTransferListener.onUploadFailed(transferSnapshot, exWithoutSuppressed); return false; } } diff --git a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java index 3d158cb60a208..4f42cd8fe8672 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java +++ b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java @@ -124,10 +124,18 @@ public final class IndicesRequestCache implements RemovalListener INDICES_CACHE_QUERY_SIZE = Setting.memorySizeSetting( "indices.requests.cache.size", "1%", - Property.NodeScope + Property.NodeScope, + Property.Deprecated ); public static final Setting INDICES_CACHE_QUERY_EXPIRE = Setting.positiveTimeSetting( "indices.requests.cache.expire", @@ -166,7 +174,6 @@ public final class IndicesRequestCache implements RemovalListener registeredClosedListeners = ConcurrentCollections.newConcurrentMap(); - private final ByteSizeValue size; private final TimeValue expire; private final ICache cache; private final ClusterService clusterService; @@ -187,10 +194,7 @@ public final class IndicesRequestCache implements RemovalListener, BytesReference> weigher = (k, v) -> k.ramBytesUsed(k.key.ramBytesUsed()) + v.ramBytesUsed(); this.cacheCleanupManager = new IndicesRequestCacheCleanupManager( threadPool, INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING.get(settings), @@ -200,30 +204,42 @@ public final class IndicesRequestCache implements RemovalListener().setSettings(settings) - .setWeigher(weigher) - .setValueType(BytesReference.class) - .setKeyType(Key.class) - .setRemovalListener(this) - .setMaxSizeInBytes(sizeInBytes) // for backward compatibility - .setExpireAfterAccess(expire) // for backward compatibility - .setDimensionNames(List.of(INDEX_DIMENSION_NAME, SHARD_ID_DIMENSION_NAME)) - .setCachedResultParser((bytesReference) -> { - try { - return CachedQueryResult.getPolicyValues(bytesReference); - } catch (IOException e) { - // Set took time to -1, which will always be rejected by the policy. - return new CachedQueryResult.PolicyValues(-1); - } - }) - .setKeySerializer(new IRCKeyWriteableSerializer()) - .setValueSerializer(new BytesReferenceSerializer()) - .setClusterSettings(clusterService.getClusterSettings()) - .setStoragePath(nodeEnvironment.nodePaths()[0].path.toString() + "/request_cache") - .build(), - CacheType.INDICES_REQUEST_CACHE - ); + + CacheConfig config = getCacheConfig(settings, nodeEnvironment); + this.cache = cacheService.createCache(config, CacheType.INDICES_REQUEST_CACHE); + } + + // pkg-private for testing + CacheConfig getCacheConfig(Settings settings, NodeEnvironment nodeEnvironment) { + long sizeInBytes = INDICES_CACHE_QUERY_SIZE.get(settings).getBytes(); + ToLongBiFunction, BytesReference> weigher = (k, v) -> k.ramBytesUsed(k.key.ramBytesUsed()) + v.ramBytesUsed(); + CacheConfig.Builder configBuilder = new CacheConfig.Builder().setSettings(settings) + .setWeigher(weigher) + .setValueType(BytesReference.class) + .setKeyType(Key.class) + .setRemovalListener(this) + .setExpireAfterAccess(expire) // for backward compatibility + .setDimensionNames(List.of(INDEX_DIMENSION_NAME, SHARD_ID_DIMENSION_NAME)) + .setCachedResultParser((bytesReference) -> { + try { + return CachedQueryResult.getPolicyValues(bytesReference); + } catch (IOException e) { + // Set took time to -1, which will always be rejected by the policy. + return new CachedQueryResult.PolicyValues(-1); + } + }) + .setKeySerializer(new IRCKeyWriteableSerializer()) + .setValueSerializer(new BytesReferenceSerializer()) + .setClusterSettings(clusterService.getClusterSettings()) + .setStoragePath(nodeEnvironment.nodePaths()[0].path.toString() + "/request_cache"); + + if (!CacheService.pluggableCachingEnabled(CacheType.INDICES_REQUEST_CACHE, settings)) { + // If pluggable caching is not enabled, use the max size based on the IRC setting into the config. + // If pluggable caching is enabled, cache implementations instead determine their own sizes based on their own implementation + // size settings. + configBuilder.setMaxSizeInBytes(sizeInBytes); + } + return configBuilder.build(); } // package private for testing diff --git a/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java b/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java index 516aa94534f94..4442189373c93 100644 --- a/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java @@ -79,9 +79,9 @@ abstract class AuxTransport extends AbstractLifecycleComponent { public static final String AUX_SETTINGS_PREFIX = "aux.transport."; public static final String AUX_TRANSPORT_TYPES_KEY = AUX_SETTINGS_PREFIX + "types"; public static final String AUX_PORT_DEFAULTS = "9400-9500"; - public static final Setting.AffixSetting AUX_TRANSPORT_PORTS = affixKeySetting( + public static final Setting.AffixSetting AUX_TRANSPORT_PORT = affixKeySetting( AUX_SETTINGS_PREFIX, - "ports", + "port", key -> new Setting<>(key, AUX_PORT_DEFAULTS, PortsRange::new, Setting.Property.NodeScope) ); diff --git a/server/src/test/java/org/opensearch/common/cache/store/OpenSearchOnHeapCacheTests.java b/server/src/test/java/org/opensearch/common/cache/store/OpenSearchOnHeapCacheTests.java index 45a7b273eb41e..5a989ad8ab777 100644 --- a/server/src/test/java/org/opensearch/common/cache/store/OpenSearchOnHeapCacheTests.java +++ b/server/src/test/java/org/opensearch/common/cache/store/OpenSearchOnHeapCacheTests.java @@ -15,6 +15,7 @@ import org.opensearch.common.cache.LoadAwareCacheLoader; import org.opensearch.common.cache.RemovalListener; import org.opensearch.common.cache.RemovalNotification; +import org.opensearch.common.cache.settings.CacheSettings; import org.opensearch.common.cache.stats.ImmutableCacheStats; import org.opensearch.common.cache.stats.ImmutableCacheStatsHolder; import org.opensearch.common.cache.store.config.CacheConfig; @@ -105,35 +106,69 @@ public void testStatsWithoutPluggableCaches() throws Exception { } } - public void testWithCacheConfigSettings() { - MockRemovalListener listener = new MockRemovalListener<>(); - int maxKeys = between(10, 50); - ICache.Factory onHeapCacheFactory = new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(); - Settings settings = Settings.builder() - .put( - OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) - .get(MAXIMUM_SIZE_IN_BYTES_KEY) - .getKey(), - 1000 + "b" // Setting some random value which shouldn't be honored. - ) + public void testWithCacheConfigSizeSettings_WhenPluggableCachingOff() { + // The "pluggable caching off" case can happen when the PLUGGABLE_CACHE setting is false, or if the store name is blank. + // The cache should get its size from the config, not the setting, in either case. + Settings.Builder settingsBuilder = Settings.builder().put(FeatureFlags.PLUGGABLE_CACHE, false); + long maxSizeFromSetting = between(1000, 2000); + long maxSizeFromConfig = between(3000, 4000); + OpenSearchOnHeapCache onHeapCache = setupMaxSizeTest(settingsBuilder, maxSizeFromSetting, maxSizeFromConfig, true); + assertEquals(maxSizeFromConfig, onHeapCache.getMaximumWeight()); + + Settings.Builder storeNameBlankSettingsBuilder = Settings.builder().put(FeatureFlags.PLUGGABLE_CACHE, true); + onHeapCache = setupMaxSizeTest(storeNameBlankSettingsBuilder, maxSizeFromSetting, maxSizeFromConfig, true); + assertEquals(maxSizeFromConfig, onHeapCache.getMaximumWeight()); + } + + public void testWithCacheConfigSettings_WhenPluggableCachingOn() { + // When pluggable caching is on, the cache should get its size from the config if present, and otherwise should get it from the + // setting. + Settings.Builder settingsBuilder = Settings.builder() .put(FeatureFlags.PLUGGABLE_CACHE, true) - .build(); + .put( + CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(), + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME + ); + long maxSizeFromSetting = between(1000, 2000); + long maxSizeFromConfig = between(3000, 4000); + OpenSearchOnHeapCache onHeapCache = setupMaxSizeTest(settingsBuilder, maxSizeFromSetting, maxSizeFromConfig, false); + assertEquals(maxSizeFromSetting, onHeapCache.getMaximumWeight()); + + onHeapCache = setupMaxSizeTest(settingsBuilder, maxSizeFromSetting, maxSizeFromConfig, true); + assertEquals(maxSizeFromConfig, onHeapCache.getMaximumWeight()); + } - CacheConfig cacheConfig = new CacheConfig.Builder().setKeyType(String.class) + private OpenSearchOnHeapCache setupMaxSizeTest( + Settings.Builder settingsBuilder, + long maxSizeFromSetting, + long maxSizeFromConfig, + boolean putSizeInConfig + ) { + MockRemovalListener listener = new MockRemovalListener<>(); + settingsBuilder.put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + maxSizeFromSetting + "b" + ); + + CacheConfig.Builder cacheConfigBuilder = new CacheConfig.Builder().setKeyType(String.class) .setValueType(String.class) .setWeigher((k, v) -> keyValueSize) .setRemovalListener(listener) - .setSettings(settings) + .setSettings(settingsBuilder.build()) .setDimensionNames(dimensionNames) - .setMaxSizeInBytes(maxKeys * keyValueSize) // this should get honored - .setStatsTrackingEnabled(true) - .build(); - OpenSearchOnHeapCache onHeapCache = (OpenSearchOnHeapCache) onHeapCacheFactory.create( - cacheConfig, + .setStatsTrackingEnabled(true); + if (putSizeInConfig) { + cacheConfigBuilder.setMaxSizeInBytes(maxSizeFromConfig); + } + + ICache.Factory onHeapCacheFactory = new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(); + return (OpenSearchOnHeapCache) onHeapCacheFactory.create( + cacheConfigBuilder.build(), CacheType.INDICES_REQUEST_CACHE, null ); - assertEquals(maxKeys * keyValueSize, onHeapCache.getMaximumWeight()); } private void assertZeroStats(ImmutableCacheStatsHolder stats) { diff --git a/server/src/test/java/org/opensearch/common/settings/MemorySizeSettingsTests.java b/server/src/test/java/org/opensearch/common/settings/MemorySizeSettingsTests.java index 78782112be844..c90924cfc0fd1 100644 --- a/server/src/test/java/org/opensearch/common/settings/MemorySizeSettingsTests.java +++ b/server/src/test/java/org/opensearch/common/settings/MemorySizeSettingsTests.java @@ -81,6 +81,9 @@ public void testIndicesRequestCacheSetting() { "indices.requests.cache.size", new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.01)) ); + assertWarnings( + "[indices.requests.cache.size] setting was deprecated in OpenSearch and will be removed in a future release! See the breaking changes documentation for the next major version." + ); } public void testCircuitBreakerSettings() { diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java index ed0d6b7d50706..77dfd5b27581d 100644 --- a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java +++ b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java @@ -206,6 +206,80 @@ public void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) { assertEquals(4, fileTransferTracker.allUploaded().size()); } + public void testTransferSnapshotOnFileTransferUploadFail() throws Exception { + AtomicInteger fileTransferSucceeded = new AtomicInteger(); + AtomicInteger fileTransferFailed = new AtomicInteger(); + AtomicInteger translogTransferSucceeded = new AtomicInteger(); + AtomicInteger translogTransferFailed = new AtomicInteger(); + + doAnswer(invocationOnMock -> { + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; + Set transferFileSnapshots = (Set) invocationOnMock.getArguments()[0]; + + TransferFileSnapshot actualFileSnapshot = transferFileSnapshots.iterator().next(); + FileTransferException testException = new FileTransferException( + actualFileSnapshot, + new RuntimeException("FileTransferUploadNeedsToFail-Exception") + ); + + listener.onFailure(testException); + transferFileSnapshots.stream().skip(1).forEach(listener::onResponse); + return null; + }).when(transferService).uploadBlobs(anySet(), anyMap(), any(ActionListener.class), any(WritePriority.class)); + + FileTransferTracker fileTransferTracker = new FileTransferTracker( + new ShardId("index", "indexUUid", 0), + remoteTranslogTransferTracker + ) { + @Override + public void onSuccess(TransferFileSnapshot fileSnapshot) { + fileTransferSucceeded.incrementAndGet(); + super.onSuccess(fileSnapshot); + } + + @Override + public void onFailure(TransferFileSnapshot fileSnapshot, Exception e) { + fileTransferFailed.incrementAndGet(); + super.onFailure(fileSnapshot, e); + } + }; + + TranslogTransferManager translogTransferManager = new TranslogTransferManager( + shardId, + transferService, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), + fileTransferTracker, + remoteTranslogTransferTracker, + DefaultRemoteStoreSettings.INSTANCE, + isTranslogMetadataEnabled + ); + + SetOnce exception = new SetOnce<>(); + assertFalse(translogTransferManager.transferSnapshot(createTransferSnapshot(), new TranslogTransferListener() { + @Override + public void onUploadComplete(TransferSnapshot transferSnapshot) { + translogTransferSucceeded.incrementAndGet(); + } + + @Override + public void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) { + translogTransferFailed.incrementAndGet(); + exception.set(ex); + } + })); + + assertNotNull(exception.get()); + assertTrue(exception.get() instanceof TranslogUploadFailedException); + assertEquals("Failed to upload 1 files during transfer", exception.get().getMessage()); + assertEquals(0, exception.get().getSuppressed().length); + assertEquals(3, fileTransferSucceeded.get()); + assertEquals(1, fileTransferFailed.get()); + assertEquals(0, translogTransferSucceeded.get()); + assertEquals(1, translogTransferFailed.get()); + assertEquals(3, fileTransferTracker.allUploaded().size()); + } + public void testTransferSnapshotOnUploadTimeout() throws Exception { doAnswer(invocationOnMock -> { Set transferFileSnapshots = invocationOnMock.getArgument(0); diff --git a/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java b/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java index 1a3aece74b3e2..e83ca247b6a1d 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java @@ -53,12 +53,16 @@ import org.opensearch.cluster.routing.ShardRoutingHelper; import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.common.CheckedSupplier; +import org.opensearch.common.cache.CacheType; import org.opensearch.common.cache.ICacheKey; import org.opensearch.common.cache.RemovalNotification; import org.opensearch.common.cache.RemovalReason; import org.opensearch.common.cache.module.CacheModule; +import org.opensearch.common.cache.settings.CacheSettings; import org.opensearch.common.cache.stats.ImmutableCacheStats; import org.opensearch.common.cache.stats.ImmutableCacheStatsHolder; +import org.opensearch.common.cache.store.OpenSearchOnHeapCache; +import org.opensearch.common.cache.store.config.CacheConfig; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; import org.opensearch.common.settings.Settings; @@ -852,6 +856,42 @@ public void testAddingToCleanupKeyToCountMapWorksAppropriatelyWithMultipleThread assertFalse(concurrentModificationExceptionDetected.get()); } + public void testCacheMaxSize_WhenPluggableCachingOff() throws Exception { + // If pluggable caching is off, the IRC should put a max size value into the cache config that it uses to create its cache. + threadPool = getThreadPool(); + long cacheSize = 1000; + Settings settings = Settings.builder().put(INDICES_CACHE_QUERY_SIZE.getKey(), cacheSize + "b").build(); + cache = getIndicesRequestCache(settings); + CacheConfig config; + try (NodeEnvironment env = newNodeEnvironment(settings)) { + // For the purposes of this test it doesn't matter if the node environment matches the one used in the constructor + config = cache.getCacheConfig(settings, env); + } + assertEquals(cacheSize, (long) config.getMaxSizeInBytes()); + allowDeprecationWarning(); + } + + public void testCacheMaxSize_WhenPluggableCachingOn() throws Exception { + // If pluggable caching is on, and a store name is present, the IRC should NOT put a max size value into the cache config. + threadPool = getThreadPool(); + Settings settings = Settings.builder() + .put(INDICES_CACHE_QUERY_SIZE.getKey(), 1000 + "b") + .put(FeatureFlags.PLUGGABLE_CACHE, true) + .put( + CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(), + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME + ) + .build(); + cache = getIndicesRequestCache(settings); + CacheConfig config; + try (NodeEnvironment env = newNodeEnvironment(settings)) { + // For the purposes of this test it doesn't matter if the node environment matches the one used in the constructor + config = cache.getCacheConfig(settings, env); + } + assertEquals(0, (long) config.getMaxSizeInBytes()); + allowDeprecationWarning(); + } + private IndicesRequestCache getIndicesRequestCache(Settings settings) throws IOException { IndicesService indicesService = getInstanceFromNode(IndicesService.class); try (NodeEnvironment env = newNodeEnvironment(settings)) { @@ -1095,6 +1135,7 @@ public void testEviction() throws Exception { assertEquals(2, cache.count()); assertEquals(1, indexShard.requestCache().stats().getEvictions()); IOUtils.close(reader, secondReader, thirdReader, environment); + allowDeprecationWarning(); } public void testClearAllEntityIdentity() throws Exception { @@ -1372,6 +1413,7 @@ public void testGetOrComputeConcurrentlyWithMultipleIndices() throws Exception { } IOUtils.close(cache); executorService.shutdownNow(); + allowDeprecationWarning(); } public void testDeleteAndCreateIndexShardOnSameNodeAndVerifyStats() throws Exception { @@ -1540,6 +1582,12 @@ public static String generateString(int length) { return sb.toString(); } + private void allowDeprecationWarning() { + assertWarnings( + "[indices.requests.cache.size] setting was deprecated in OpenSearch and will be removed in a future release! See the breaking changes documentation for the next major version." + ); + } + private class TestBytesReference extends AbstractBytesReference { int dummyValue;