diff --git a/CHANGELOG.md b/CHANGELOG.md index 7dbc2bfe54510..ec946c3b766ac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -120,4 +120,4 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.4...HEAD -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.4...2.x \ No newline at end of file +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.4...2.x diff --git a/server/src/main/java/org/opensearch/LegacyESVersion.java b/server/src/main/java/org/opensearch/LegacyESVersion.java index 8cd60084ad710..e9333ffb58148 100644 --- a/server/src/main/java/org/opensearch/LegacyESVersion.java +++ b/server/src/main/java/org/opensearch/LegacyESVersion.java @@ -48,6 +48,10 @@ */ public class LegacyESVersion extends Version { + public static final LegacyESVersion V_6_0_0 = new LegacyESVersion(6000099, org.apache.lucene.util.Version.fromBits(7, 0, 0)); + public static final LegacyESVersion V_6_5_0 = new LegacyESVersion(6050099, org.apache.lucene.util.Version.fromBits(7, 0, 0)); + public static final LegacyESVersion V_7_2_0 = new LegacyESVersion(7020099, org.apache.lucene.util.Version.LUCENE_8_0_0); + // todo move back to Version.java if retiring legacy version support protected static final ImmutableOpenIntMap idToVersion; protected static final ImmutableOpenMap stringToVersion; @@ -237,4 +241,22 @@ public String toString() { } return sb.toString(); } + + @Override + protected Version computeMinIndexCompatVersion() { + final int prevLuceneVersionMajor = this.luceneVersion.major - 1; + final int bwcMajor; + if (major == 5) { + bwcMajor = 2; // we jumped from 2 to 5 + } else if (major == 7) { + return LegacyESVersion.fromId(6000026); + } else { + bwcMajor = major - 1; + } + final int bwcMinor = 0; + return new LegacyESVersion( + bwcMajor * 1000000 + bwcMinor * 10000 + 99, + org.apache.lucene.util.Version.fromBits(prevLuceneVersionMajor, 0, 0) + ); + } } diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index 8dd6776b2d5f4..77286c42dc4c2 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -316,8 +316,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws * lazily once. */ static class DeclaredVersionsHolder { - // use LegacyESVersion.class since it inherits Version fields - protected static final List DECLARED_VERSIONS = Collections.unmodifiableList(getDeclaredVersions(LegacyESVersion.class)); + protected static final List DECLARED_VERSIONS = Collections.unmodifiableList(getDeclaredVersions(Version.class)); } // lazy initialized because we don't yet have the declared versions ready when instantiating the cached Version @@ -394,7 +393,7 @@ public Version minimumIndexCompatibilityVersion() { return res; } - private Version computeMinIndexCompatVersion() { + protected Version computeMinIndexCompatVersion() { final int bwcMajor; if (major == 5) { bwcMajor = 2; // we jumped from 2 to 5 diff --git a/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java b/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java index f36fd2d42f980..167a1cc0fab98 100644 --- a/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java +++ b/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java @@ -401,8 +401,7 @@ public Builder addBlocks(IndexMetadata indexMetadata) { if (IndexMetadata.INDEX_BLOCKS_READ_ONLY_ALLOW_DELETE_SETTING.get(indexMetadata.getSettings())) { addIndexBlock(indexName, IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK); } - if (IndexModule.Type.REMOTE_SNAPSHOT.getSettingsKey() - .equals(indexMetadata.getSettings().get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey()))) { + if (IndexModule.Type.REMOTE_SNAPSHOT.match(indexMetadata.getSettings().get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey()))) { addIndexBlock(indexName, IndexMetadata.REMOTE_READ_ONLY_ALLOW_DELETE); } return this; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index d32b933b558f0..0161f4376c168 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -37,6 +37,7 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.opensearch.Assertions; +import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.admin.indices.rollover.RolloverInfo; import org.opensearch.action.support.ActiveShardCount; @@ -1840,13 +1841,17 @@ public static IndexMetadata fromXContent(XContentParser parser) throws IOExcepti throw new IllegalArgumentException("Unexpected token " + token); } } - if (Assertions.ENABLED) { + + final Version indexCreatedVersion = Version.indexCreated(builder.settings); + // Reference: + // https://github.com/opensearch-project/OpenSearch/blob/4dde0f2a3b445b2fc61dab29c5a2178967f4a3e3/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java#L1620-L1628 + if (Assertions.ENABLED && indexCreatedVersion.onOrAfter(LegacyESVersion.V_6_5_0)) { assert mappingVersion : "mapping version should be present for indices"; - } - if (Assertions.ENABLED) { assert settingsVersion : "settings version should be present for indices"; } - if (Assertions.ENABLED) { + // Reference: + // https://github.com/opensearch-project/OpenSearch/blob/2e4b27b243d8bd2c515f66cf86c6d1d6a601307f/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java#L1824 + if (Assertions.ENABLED && indexCreatedVersion.onOrAfter(LegacyESVersion.V_7_2_0)) { assert aliasesVersion : "aliases version should be present for indices"; } return builder.build(); diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingPool.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingPool.java index 1a3c366694221..a4ff237460e28 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RoutingPool.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingPool.java @@ -61,7 +61,7 @@ public static RoutingPool getShardPool(ShardRouting shard, RoutingAllocation all */ public static RoutingPool getIndexPool(IndexMetadata indexMetadata) { Settings indexSettings = indexMetadata.getSettings(); - if (IndexModule.Type.REMOTE_SNAPSHOT.getSettingsKey().equals(indexSettings.get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey()))) { + if (IndexModule.Type.REMOTE_SNAPSHOT.match(indexSettings.get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey()))) { return REMOTE_CAPABLE; } return LOCAL_ONLY; diff --git a/server/src/main/java/org/opensearch/common/lucene/Lucene.java b/server/src/main/java/org/opensearch/common/lucene/Lucene.java index 1f9fe917158b9..a575f8622c151 100644 --- a/server/src/main/java/org/opensearch/common/lucene/Lucene.java +++ b/server/src/main/java/org/opensearch/common/lucene/Lucene.java @@ -55,6 +55,7 @@ import org.apache.lucene.index.SegmentCommitInfo; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.index.SegmentReader; +import org.apache.lucene.index.StandardDirectoryReader; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.FieldDoc; @@ -134,6 +135,25 @@ public static SegmentInfos readSegmentInfos(Directory directory) throws IOExcept return SegmentInfos.readLatestCommit(directory); } + /** + * A variant of {@link #readSegmentInfos(Directory)} that supports reading indices written by + * older major versions of Lucene. The underlying implementation is a workaround since the + * "expert" readLatestCommit API is currently package-private in Lucene. First, all commits in + * the given {@link Directory} are listed - this result includes older Lucene commits. Then, + * the latest index commit is opened via {@link DirectoryReader} by including a minimum supported + * Lucene major version based on the minimum compatibility of the given {@link org.opensearch.Version}. + */ + public static SegmentInfos readSegmentInfosExtendedCompatibility(Directory directory, org.opensearch.Version minimumVersion) + throws IOException { + // This list is sorted from oldest to latest + List indexCommits = DirectoryReader.listCommits(directory); + IndexCommit latestCommit = indexCommits.get(indexCommits.size() - 1); + final int minSupportedLuceneMajor = minimumVersion.minimumIndexCompatibilityVersion().luceneVersion.major; + try (StandardDirectoryReader reader = (StandardDirectoryReader) DirectoryReader.open(latestCommit, minSupportedLuceneMajor, null)) { + return reader.getSegmentInfos(); + } + } + /** * Returns an iterable that allows to iterate over all files in this segments info */ diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index 704bd78d4b556..72b7349180bad 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -41,6 +41,13 @@ public class FeatureFlags { */ public static final String SEARCHABLE_SNAPSHOT = "opensearch.experimental.feature.searchable_snapshot.enabled"; + /** + * Gates the ability for Searchable Snapshots to read snapshots that are older than the + * guaranteed backward compatibility for OpenSearch (one prior major version) on a best effort basis. + */ + public static final String SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY = + "opensearch.experimental.feature.searchable_snapshot.extended_compatibility.enabled"; + /** * Gates the functionality of extensions. * Once the feature is ready for production release, this feature flag can be removed. diff --git a/server/src/main/java/org/opensearch/index/IndexModule.java b/server/src/main/java/org/opensearch/index/IndexModule.java index e9125256438a5..5b9b051af8e8b 100644 --- a/server/src/main/java/org/opensearch/index/IndexModule.java +++ b/server/src/main/java/org/opensearch/index/IndexModule.java @@ -461,11 +461,19 @@ public boolean match(String setting) { } /** - * Convenience method to check whether the given IndexSettings contains - * an {@link #INDEX_STORE_TYPE_SETTING} set to the value of this type. + * Convenience method to check whether the given {@link IndexSettings} + * object contains an {@link #INDEX_STORE_TYPE_SETTING} set to the value of this type. */ public boolean match(IndexSettings settings) { - return match(INDEX_STORE_TYPE_SETTING.get(settings.getSettings())); + return match(settings.getSettings()); + } + + /** + * Convenience method to check whether the given {@link Settings} + * object contains an {@link #INDEX_STORE_TYPE_SETTING} set to the value of this type. + */ + public boolean match(Settings settings) { + return match(INDEX_STORE_TYPE_SETTING.get(settings)); } } diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index be7e63a5c9068..dc54ace237070 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -45,6 +45,7 @@ import org.opensearch.common.unit.ByteSizeUnit; import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.translog.Translog; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.ingest.IngestService; @@ -59,11 +60,13 @@ import java.util.function.Function; import java.util.function.UnaryOperator; +import static org.opensearch.common.util.FeatureFlags.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY; import static org.opensearch.index.mapper.MapperService.INDEX_MAPPING_DEPTH_LIMIT_SETTING; import static org.opensearch.index.mapper.MapperService.INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING; import static org.opensearch.index.mapper.MapperService.INDEX_MAPPING_NESTED_DOCS_LIMIT_SETTING; import static org.opensearch.index.mapper.MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING; import static org.opensearch.index.mapper.MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING; +import static org.opensearch.index.store.remote.directory.RemoteSnapshotDirectory.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY_MINIMUM_VERSION; /** * This class encapsulates all index level settings and handles settings updates. @@ -585,6 +588,9 @@ public final class IndexSettings { private final boolean isRemoteTranslogStoreEnabled; private final String remoteStoreTranslogRepository; private final String remoteStoreRepository; + private final boolean isRemoteSnapshot; + private Version extendedCompatibilitySnapshotVersion; + // volatile fields are updated via #updateIndexMetadata(IndexMetadata) under lock private volatile Settings settings; private volatile IndexMetadata indexMetadata; @@ -748,6 +754,14 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti isRemoteTranslogStoreEnabled = settings.getAsBoolean(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_ENABLED, false); remoteStoreTranslogRepository = settings.get(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY); remoteStoreRepository = settings.get(IndexMetadata.SETTING_REMOTE_STORE_REPOSITORY); + isRemoteSnapshot = IndexModule.Type.REMOTE_SNAPSHOT.match(this.settings); + + if (isRemoteSnapshot && FeatureFlags.isEnabled(SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY)) { + extendedCompatibilitySnapshotVersion = SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY_MINIMUM_VERSION; + } else { + extendedCompatibilitySnapshotVersion = Version.CURRENT.minimumIndexCompatibilityVersion(); + } + this.searchThrottled = INDEX_SEARCH_THROTTLED.get(settings); this.queryStringLenient = QUERY_STRING_LENIENT_SETTING.get(settings); this.queryStringAnalyzeWildcard = QUERY_STRING_ANALYZE_WILDCARD.get(nodeSettings); @@ -1017,6 +1031,23 @@ public String getRemoteStoreTranslogRepository() { return remoteStoreTranslogRepository; } + /** + * Returns true if this is remote/searchable snapshot + */ + public boolean isRemoteSnapshot() { + return isRemoteSnapshot; + } + + /** + * If this is a remote snapshot and the extended compatibility + * feature flag is enabled, this returns the minimum {@link Version} + * supported. In all other cases, the return value is the + * {@link Version#minimumIndexCompatibilityVersion()} of {@link Version#CURRENT}. + */ + public Version getExtendedCompatibilitySnapshotVersion() { + return extendedCompatibilitySnapshotVersion; + } + /** * Returns the node settings. The settings returned from {@link #getSettings()} are a merged version of the * index settings and the node settings where node settings are overwritten by index settings. diff --git a/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java index dceb26bc33aa7..dcc98c36675b4 100644 --- a/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java @@ -39,6 +39,7 @@ import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.store.Directory; import org.apache.lucene.store.Lock; +import org.opensearch.Version; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; @@ -89,6 +90,7 @@ public class ReadOnlyEngine extends Engine { private final CompletionStatsCache completionStatsCache; private final boolean requireCompleteHistory; private final TranslogManager translogManager; + private final Version minimumSupportedVersion; protected volatile TranslogStats translogStats; @@ -115,6 +117,8 @@ public ReadOnlyEngine( ) { super(config); this.requireCompleteHistory = requireCompleteHistory; + // fetch the minimum Version for extended backward compatibility use-cases + this.minimumSupportedVersion = config.getIndexSettings().getExtendedCompatibilitySnapshotVersion(); try { Store store = config.getStore(); store.incRef(); @@ -126,7 +130,11 @@ public ReadOnlyEngine( // we obtain the IW lock even though we never modify the index. // yet this makes sure nobody else does. including some testing tools that try to be messy indexWriterLock = obtainLock ? directory.obtainLock(IndexWriter.WRITE_LOCK_NAME) : null; - this.lastCommittedSegmentInfos = Lucene.readSegmentInfos(directory); + if (isExtendedCompatibility()) { + this.lastCommittedSegmentInfos = Lucene.readSegmentInfosExtendedCompatibility(directory, this.minimumSupportedVersion); + } else { + this.lastCommittedSegmentInfos = Lucene.readSegmentInfos(directory); + } if (seqNoStats == null) { seqNoStats = buildSeqNoStats(config, lastCommittedSegmentInfos); ensureMaxSeqNoEqualsToGlobalCheckpoint(seqNoStats); @@ -215,7 +223,17 @@ protected final OpenSearchDirectoryReader wrapReader( protected DirectoryReader open(IndexCommit commit) throws IOException { assert Transports.assertNotTransportThread("opening index commit of a read-only engine"); - return new SoftDeletesDirectoryReaderWrapper(DirectoryReader.open(commit), Lucene.SOFT_DELETES_FIELD); + DirectoryReader reader; + if (isExtendedCompatibility()) { + reader = DirectoryReader.open(commit, this.minimumSupportedVersion.luceneVersion.major, null); + } else { + reader = DirectoryReader.open(commit); + } + return new SoftDeletesDirectoryReaderWrapper(reader, Lucene.SOFT_DELETES_FIELD); + } + + private boolean isExtendedCompatibility() { + return Version.CURRENT.minimumIndexCompatibilityVersion().onOrAfter(this.minimumSupportedVersion); } @Override diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 4be11badd0879..8b85665a6f06d 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -2067,7 +2067,7 @@ public void openEngineAndRecoverFromTranslog() throws IOException { }; // Do not load the global checkpoint if this is a remote snapshot index - if (IndexModule.Type.REMOTE_SNAPSHOT.match(indexSettings) == false) { + if (indexSettings.isRemoteSnapshot() == false) { loadGlobalCheckpointToReplicationTracker(); } @@ -2126,7 +2126,7 @@ private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier) t } private boolean assertSequenceNumbersInCommit() throws IOException { - final Map userData = SegmentInfos.readLatestCommit(store.directory()).getUserData(); + final Map userData = fetchUserData(); assert userData.containsKey(SequenceNumbers.LOCAL_CHECKPOINT_KEY) : "commit point doesn't contains a local checkpoint"; assert userData.containsKey(MAX_SEQ_NO) : "commit point doesn't contains a maximum sequence number"; assert userData.containsKey(Engine.HISTORY_UUID_KEY) : "commit point doesn't contains a history uuid"; @@ -2141,6 +2141,16 @@ private boolean assertSequenceNumbersInCommit() throws IOException { return true; } + private Map fetchUserData() throws IOException { + if (indexSettings.isRemoteSnapshot() && indexSettings.getExtendedCompatibilitySnapshotVersion() != null) { + // Inefficient method to support reading old Lucene indexes + return Lucene.readSegmentInfosExtendedCompatibility(store.directory(), indexSettings.getExtendedCompatibilitySnapshotVersion()) + .getUserData(); + } else { + return SegmentInfos.readLatestCommit(store.directory()).getUserData(); + } + } + private void onNewEngine(Engine newEngine) { assert Thread.holdsLock(engineMutex); refreshListeners.setCurrentRefreshLocationSupplier(newEngine.translogManager()::getTranslogLastWriteLocation); diff --git a/server/src/main/java/org/opensearch/index/store/Store.java b/server/src/main/java/org/opensearch/index/store/Store.java index 3354f7e8dbacb..de12d9b75dc12 100644 --- a/server/src/main/java/org/opensearch/index/store/Store.java +++ b/server/src/main/java/org/opensearch/index/store/Store.java @@ -221,7 +221,11 @@ public Directory directory() { public SegmentInfos readLastCommittedSegmentsInfo() throws IOException { failIfCorrupted(); try { - return readSegmentsInfo(null, directory()); + if (indexSettings.isRemoteSnapshot() && indexSettings.getExtendedCompatibilitySnapshotVersion() != null) { + return readSegmentInfosExtendedCompatibility(directory(), indexSettings.getExtendedCompatibilitySnapshotVersion()); + } else { + return readSegmentsInfo(null, directory()); + } } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) { markStoreCorrupted(ex); throw ex; @@ -229,7 +233,9 @@ public SegmentInfos readLastCommittedSegmentsInfo() throws IOException { } /** - * Returns the segments info for the given commit or for the latest commit if the given commit is null + * Returns the segments info for the given commit or for the latest commit if the given commit is null. + * This method will throw an exception if the index is older than the standard backwards compatibility + * policy ( current major - 1). See also {@link #readSegmentInfosExtendedCompatibility(Directory, org.opensearch.Version)}. * * @throws IOException if the index is corrupted or the segments file is not present */ @@ -245,7 +251,27 @@ private static SegmentInfos readSegmentsInfo(IndexCommit commit, Directory direc } catch (Exception ex) { throw new CorruptIndexException("Hit unexpected exception while reading segment infos", "commit(" + commit + ")", ex); } + } + /** + * Returns the segments info for the latest commit in the given directory. Unlike + * {@link #readSegmentsInfo(IndexCommit, Directory)}, this method supports reading + * older Lucene indices on a best-effort basis. + * + * @throws IOException if the index is corrupted or the segments file is not present + */ + private static SegmentInfos readSegmentInfosExtendedCompatibility(Directory directory, org.opensearch.Version minimumVersion) + throws IOException { + try { + return Lucene.readSegmentInfosExtendedCompatibility(directory, minimumVersion); + } catch (EOFException eof) { + // TODO this should be caught by lucene - EOF is almost certainly an index corruption + throw new CorruptIndexException("Read past EOF while reading segment infos", "", eof); + } catch (IOException exception) { + throw exception; // IOExceptions like too many open files are not necessarily a corruption - just bubble it up + } catch (Exception ex) { + throw new CorruptIndexException("Hit unexpected exception while reading segment infos", "", ex); + } } final void ensureOpen() { diff --git a/server/src/main/java/org/opensearch/index/store/remote/directory/RemoteSnapshotDirectory.java b/server/src/main/java/org/opensearch/index/store/remote/directory/RemoteSnapshotDirectory.java index 3a2749a6d325b..840dc37569081 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/directory/RemoteSnapshotDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/remote/directory/RemoteSnapshotDirectory.java @@ -22,6 +22,8 @@ import org.apache.lucene.store.IndexOutput; import org.apache.lucene.store.Lock; import org.apache.lucene.store.NoLockFactory; +import org.opensearch.LegacyESVersion; +import org.opensearch.Version; import org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; import org.opensearch.index.store.remote.file.OnDemandBlockSnapshotIndexInput; import org.opensearch.index.store.remote.file.OnDemandVirtualFileSnapshotIndexInput; @@ -35,6 +37,9 @@ * @opensearch.internal */ public final class RemoteSnapshotDirectory extends Directory { + + public static final Version SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY_MINIMUM_VERSION = LegacyESVersion.V_6_0_0; + private static final String VIRTUAL_FILE_PREFIX = BlobStoreRepository.VIRTUAL_DATA_BLOB_PREFIX; private final Map fileInfoMap; diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index e015bd11b11db..4644a09f3e03c 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -911,7 +911,7 @@ private EngineFactory getEngineFactory(final IndexSettings idxSettings) { if (idxSettings.isSegRepEnabled()) { return new NRTReplicationEngineFactory(); } - if (IndexModule.Type.REMOTE_SNAPSHOT.match(idxSettings)) { + if (idxSettings.isRemoteSnapshot()) { return config -> new ReadOnlyEngine(config, new SeqNoStats(0, 0, 0), new TranslogStats(), true, Function.identity(), false); } return new InternalEngineFactory(); diff --git a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java index 4cc599d968624..1c5025ad834f7 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java @@ -53,7 +53,6 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.CancellableThreads; import org.opensearch.common.util.concurrent.AbstractRunnable; -import org.opensearch.index.IndexModule; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.engine.RecoveryEngineException; import org.opensearch.index.mapper.MapperException; @@ -245,7 +244,7 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi logger.trace("{} preparing shard for peer recovery", recoveryTarget.shardId()); indexShard.prepareForIndexRecovery(); final boolean hasRemoteTranslog = recoveryTarget.state().getPrimary() == false && indexShard.isRemoteTranslogEnabled(); - final boolean hasNoTranslog = IndexModule.Type.REMOTE_SNAPSHOT.match(indexShard.indexSettings()); + final boolean hasNoTranslog = indexShard.indexSettings().isRemoteSnapshot(); final boolean verifyTranslog = (hasRemoteTranslog || hasNoTranslog) == false; final long startingSeqNo = indexShard.recoverLocallyAndFetchStartSeqNo(!hasRemoteTranslog); assert startingSeqNo == UNASSIGNED_SEQ_NO || recoveryTarget.state().getStage() == RecoveryState.Stage.TRANSLOG diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java index 5ae3a5d58e858..c8cc5c4409e6b 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java @@ -44,7 +44,6 @@ import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.util.CancellableThreads; -import org.opensearch.index.IndexModule; import org.opensearch.index.engine.Engine; import org.opensearch.index.mapper.MapperException; import org.opensearch.index.seqno.ReplicationTracker; @@ -365,7 +364,7 @@ public void cleanFiles( // their own commit points and therefore do not modify the commit user data // in their store. In these cases, reuse the primary's translog UUID. final boolean reuseTranslogUUID = indexShard.indexSettings().isSegRepEnabled() - || IndexModule.Type.REMOTE_SNAPSHOT.match(indexShard.indexSettings()); + || indexShard.indexSettings().isRemoteSnapshot(); if (reuseTranslogUUID) { final String translogUUID = store.getMetadata().getCommitUserData().get(TRANSLOG_UUID_KEY); Translog.createEmptyTranslog( diff --git a/server/src/main/java/org/opensearch/repositories/IndexMetaDataGenerations.java b/server/src/main/java/org/opensearch/repositories/IndexMetaDataGenerations.java index e8f96bb313dd1..25cb0eaf43455 100644 --- a/server/src/main/java/org/opensearch/repositories/IndexMetaDataGenerations.java +++ b/server/src/main/java/org/opensearch/repositories/IndexMetaDataGenerations.java @@ -92,7 +92,9 @@ public String getIndexMetaBlobId(String metaIdentifier) { } /** - * Get the blob id by {@link SnapshotId} and {@link IndexId}. + * Get the blob id by {@link SnapshotId} and {@link IndexId}. If none is found, we fall back to the value + * of {@link SnapshotId#getUUID()} to allow for extended backwards compatibility use-cases with + * {@link org.opensearch.LegacyESVersion} versions which used the snapshot UUID as the index metadata blob id. * * @param snapshotId Snapshot Id * @param indexId Index Id @@ -100,7 +102,11 @@ public String getIndexMetaBlobId(String metaIdentifier) { */ public String indexMetaBlobId(SnapshotId snapshotId, IndexId indexId) { final String identifier = lookup.getOrDefault(snapshotId, Collections.emptyMap()).get(indexId); - return identifiers.get(identifier); + if (identifier == null) { + return snapshotId.getUUID(); + } else { + return identifiers.get(identifier); + } } /** diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index 3f0ec8ff05956..f064d7b70c9ef 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -2419,7 +2419,6 @@ public void snapshotShard( newSnapshotsList.add(point); } final BlobStoreIndexShardSnapshots updatedBlobStoreIndexShardSnapshots = new BlobStoreIndexShardSnapshots(newSnapshotsList); - final Runnable afterWriteSnapBlob; // When using shard generations we can safely write the index-${uuid} blob before writing out any of the actual data // for this shard since the uuid named blob will simply not be referenced in case of error and thus we will never // reference a generation that has not had all its files fully upload. @@ -2437,7 +2436,6 @@ public void snapshotShard( e ); } - afterWriteSnapBlob = () -> {}; final StepListener> allFilesUploadedListener = new StepListener<>(); allFilesUploadedListener.whenComplete(v -> { final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.moveToFinalize(snapshotIndexCommit.getGeneration()); @@ -2462,7 +2460,6 @@ public void snapshotShard( } catch (IOException e) { throw new IndexShardSnapshotFailedException(shardId, "Failed to write commit point", e); } - afterWriteSnapBlob.run(); snapshotStatus.moveToDone(threadPool.absoluteTimeInMillis(), indexGeneration); listener.onResponse(indexGeneration); }, listener::onFailure); diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/FileRestoreContext.java b/server/src/main/java/org/opensearch/repositories/blobstore/FileRestoreContext.java index d6cffcfbb8db8..8217e73c01a3c 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/FileRestoreContext.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/FileRestoreContext.java @@ -43,6 +43,7 @@ import org.opensearch.index.snapshots.blobstore.SnapshotFiles; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.index.store.remote.directory.RemoteSnapshotDirectory; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.snapshots.SnapshotId; @@ -198,6 +199,10 @@ public void restore(SnapshotFiles snapshotFiles, Store store, ActionListener shardsBuilder = ImmutableOpenMap .builder(); - final Version minIndexCompatibilityVersion = currentState.getNodes() - .getMaxNodeVersion() - .minimumIndexCompatibilityVersion(); + for (Map.Entry indexEntry : indices.entrySet()) { String renamedIndexName = indexEntry.getKey(); String index = indexEntry.getValue(); @@ -445,7 +445,7 @@ public ClusterState execute(ClusterState currentState) { request.ignoreIndexSettings() ); final boolean isSearchableSnapshot = FeatureFlags.isEnabled(FeatureFlags.SEARCHABLE_SNAPSHOT) - && IndexModule.Type.REMOTE_SNAPSHOT.getSettingsKey().equals(request.storageType().toString()); + && IndexModule.Type.REMOTE_SNAPSHOT.match(request.storageType().toString()); if (isSearchableSnapshot) { snapshotIndexMetadata = addSnapshotToIndexSettings( snapshotIndexMetadata, @@ -460,6 +460,15 @@ public ClusterState execute(ClusterState currentState) { repositoryData.resolveIndexId(index), isSearchableSnapshot ); + final Version minIndexCompatibilityVersion; + if (isSearchableSnapshot && isSearchableSnapshotsExtendedCompatibilityEnabled()) { + minIndexCompatibilityVersion = SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY_MINIMUM_VERSION + .minimumIndexCompatibilityVersion(); + } else { + minIndexCompatibilityVersion = currentState.getNodes() + .getMaxNodeVersion() + .minimumIndexCompatibilityVersion(); + } try { snapshotIndexMetadata = metadataIndexUpgradeService.upgradeIndexMetadata( snapshotIndexMetadata, @@ -1238,4 +1247,9 @@ private static IndexMetadata addSnapshotToIndexSettings(IndexMetadata metadata, .build(); return IndexMetadata.builder(metadata).settings(newSettings).build(); } + + private static boolean isSearchableSnapshotsExtendedCompatibilityEnabled() { + return org.opensearch.Version.CURRENT.after(org.opensearch.Version.V_2_4_0) + && FeatureFlags.isEnabled(SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY); + } } diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotUtils.java b/server/src/main/java/org/opensearch/snapshots/SnapshotUtils.java index 5c2efba008652..2be7cf9d4dbb3 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotUtils.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotUtils.java @@ -173,7 +173,7 @@ public static void validateSnapshotsBackingAnyIndex( for (ObjectCursor cursor : metadata.values()) { IndexMetadata indexMetadata = cursor.value; String storeType = indexMetadata.getSettings().get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey()); - if (IndexModule.Type.REMOTE_SNAPSHOT.getSettingsKey().equals(storeType)) { + if (IndexModule.Type.REMOTE_SNAPSHOT.match(storeType)) { String snapshotId = indexMetadata.getSettings().get(IndexSettings.SEARCHABLE_SNAPSHOT_ID_UUID.getKey()); if (uuidToSnapshotId.get(snapshotId) != null) { snapshotsToBeNotDeleted.add(uuidToSnapshotId.get(snapshotId).getName()); diff --git a/server/src/test/java/org/opensearch/VersionTests.java b/server/src/test/java/org/opensearch/VersionTests.java index bb5ab4228e264..cb6874b00c161 100644 --- a/server/src/test/java/org/opensearch/VersionTests.java +++ b/server/src/test/java/org/opensearch/VersionTests.java @@ -119,10 +119,9 @@ public void testMax() { } public void testMinimumIndexCompatibilityVersion() { - // note: all Legacy compatibility support will be removed in OpenSearch 3.0 - assertEquals(LegacyESVersion.fromId(7000099), Version.fromId(2000099).minimumIndexCompatibilityVersion()); - assertEquals(LegacyESVersion.fromId(7000099), Version.fromId(2010000).minimumIndexCompatibilityVersion()); - assertEquals(LegacyESVersion.fromId(7000099), Version.fromId(2000001).minimumIndexCompatibilityVersion()); + assertEquals(LegacyESVersion.fromId(7000099), Version.fromId(2000099 ^ MASK).minimumIndexCompatibilityVersion()); + assertEquals(LegacyESVersion.fromId(7000099), Version.fromId(2010000 ^ MASK).minimumIndexCompatibilityVersion()); + assertEquals(LegacyESVersion.fromId(7000099), Version.fromId(2000001 ^ MASK).minimumIndexCompatibilityVersion()); } public void testVersionConstantPresent() { diff --git a/server/src/test/java/org/opensearch/common/lucene/LuceneTests.java b/server/src/test/java/org/opensearch/common/lucene/LuceneTests.java index 2b54455f589fd..966c47b2f42d9 100644 --- a/server/src/test/java/org/opensearch/common/lucene/LuceneTests.java +++ b/server/src/test/java/org/opensearch/common/lucene/LuceneTests.java @@ -31,6 +31,10 @@ package org.opensearch.common.lucene; +import org.apache.lucene.document.LatLonPoint; +import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.IndexFormatTooOldException; +import org.apache.lucene.index.StandardDirectoryReader; import org.apache.lucene.tests.analysis.MockAnalyzer; import org.apache.lucene.analysis.core.KeywordAnalyzer; import org.apache.lucene.document.Document; @@ -71,8 +75,11 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.MMapDirectory; import org.apache.lucene.tests.store.MockDirectoryWrapper; +import org.apache.lucene.tests.util.TestUtil; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; +import org.opensearch.LegacyESVersion; +import org.opensearch.Version; import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.internal.io.IOUtils; @@ -87,6 +94,7 @@ import java.io.IOException; import java.io.StringReader; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; @@ -318,6 +326,92 @@ public void testNumDocs() throws IOException { dir.close(); } + /** + * Tests whether old segments are readable and queryable based on the data documented + * in the README here. + * + * @throws IOException + */ + public void testReadSegmentInfosExtendedCompatibility() throws IOException { + final String pathToTestIndex = "/indices/bwc/es-6.3.0/testIndex-es-6.3.0.zip"; + final Version minVersion = LegacyESVersion.V_6_0_0; + Path tmp = createTempDir(); + TestUtil.unzip(getClass().getResourceAsStream(pathToTestIndex), tmp); + try (MockDirectoryWrapper dir = newMockFSDirectory(tmp)) { + // The standard API will throw an exception + expectThrows(IndexFormatTooOldException.class, () -> Lucene.readSegmentInfos(dir)); + SegmentInfos si = Lucene.readSegmentInfosExtendedCompatibility(dir, minVersion); + assertEquals(1, Lucene.getNumDocs(si)); + IndexCommit indexCommit = Lucene.getIndexCommit(si, dir); + // uses the "expert" Lucene API + try ( + StandardDirectoryReader reader = (StandardDirectoryReader) DirectoryReader.open( + indexCommit, + minVersion.minimumIndexCompatibilityVersion().luceneVersion.major, + null + ) + ) { + IndexSearcher searcher = newSearcher(reader); + // radius too small, should get no results + assertFalse(Lucene.exists(searcher, LatLonPoint.newDistanceQuery("testLocation", 48.57532, -112.87695, 2))); + assertTrue(Lucene.exists(searcher, LatLonPoint.newDistanceQuery("testLocation", 48.57532, -112.87695, 20000))); + } + } + } + + /** + * Since the implementation in {@link Lucene#readSegmentInfosExtendedCompatibility(Directory, Version)} + * is a workaround, this test verifies that the response from this method is equivalent to + * {@link Lucene#readSegmentInfos(Directory)} if the version is N-1 + */ + public void testReadSegmentInfosExtendedCompatibilityBaseCase() throws IOException { + MockDirectoryWrapper dir = newMockDirectory(); + IndexWriterConfig iwc = newIndexWriterConfig(); + IndexWriter writer = new IndexWriter(dir, iwc); + Document doc = new Document(); + doc.add(new TextField("id", "1", random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); + writer.addDocument(doc); + writer.commit(); + SegmentInfos expectedSI = Lucene.readSegmentInfos(dir); + SegmentInfos actualSI = Lucene.readSegmentInfosExtendedCompatibility(dir, Version.CURRENT); + assertEquals(Lucene.getNumDocs(expectedSI), Lucene.getNumDocs(actualSI)); + assertEquals(expectedSI.getGeneration(), actualSI.getGeneration()); + assertEquals(expectedSI.getSegmentsFileName(), actualSI.getSegmentsFileName()); + assertEquals(expectedSI.getVersion(), actualSI.getVersion()); + assertEquals(expectedSI.getCommitLuceneVersion(), actualSI.getCommitLuceneVersion()); + assertEquals(expectedSI.getMinSegmentLuceneVersion(), actualSI.getMinSegmentLuceneVersion()); + assertEquals(expectedSI.getIndexCreatedVersionMajor(), actualSI.getIndexCreatedVersionMajor()); + assertEquals(expectedSI.getUserData(), actualSI.getUserData()); + + int numDocsToIndex = randomIntBetween(10, 50); + List deleteTerms = new ArrayList<>(); + for (int i = 0; i < numDocsToIndex; i++) { + doc = new Document(); + doc.add(new TextField("id", "doc_" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); + deleteTerms.add(new Term("id", "doc_" + i)); + writer.addDocument(doc); + } + int numDocsToDelete = randomIntBetween(0, numDocsToIndex); + Collections.shuffle(deleteTerms, random()); + for (int i = 0; i < numDocsToDelete; i++) { + Term remove = deleteTerms.remove(0); + writer.deleteDocuments(remove); + } + writer.commit(); + expectedSI = Lucene.readSegmentInfos(dir); + actualSI = Lucene.readSegmentInfosExtendedCompatibility(dir, Version.CURRENT); + assertEquals(Lucene.getNumDocs(expectedSI), Lucene.getNumDocs(actualSI)); + assertEquals(expectedSI.getGeneration(), actualSI.getGeneration()); + assertEquals(expectedSI.getSegmentsFileName(), actualSI.getSegmentsFileName()); + assertEquals(expectedSI.getVersion(), actualSI.getVersion()); + assertEquals(expectedSI.getCommitLuceneVersion(), actualSI.getCommitLuceneVersion()); + assertEquals(expectedSI.getMinSegmentLuceneVersion(), actualSI.getMinSegmentLuceneVersion()); + assertEquals(expectedSI.getIndexCreatedVersionMajor(), actualSI.getIndexCreatedVersionMajor()); + assertEquals(expectedSI.getUserData(), actualSI.getUserData()); + writer.close(); + dir.close(); + } + public void testCount() throws Exception { Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir); diff --git a/server/src/test/java/org/opensearch/common/settings/SettingsModuleTests.java b/server/src/test/java/org/opensearch/common/settings/SettingsModuleTests.java index 8b53e5fe51635..b145ec0d79311 100644 --- a/server/src/test/java/org/opensearch/common/settings/SettingsModuleTests.java +++ b/server/src/test/java/org/opensearch/common/settings/SettingsModuleTests.java @@ -34,8 +34,9 @@ import org.opensearch.common.inject.ModuleTestCase; import org.opensearch.common.settings.Setting.Property; -import org.opensearch.common.util.FeatureFlagTests; import org.hamcrest.Matchers; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.test.FeatureFlagSetter; import java.util.Arrays; @@ -239,46 +240,48 @@ public void testOldMaxClauseCountSetting() { ); } - public void testDynamicNodeSettingsRegistration() { - FeatureFlagTests.enableFeature(); - Settings settings = Settings.builder().put("some.custom.setting", "2.0").build(); - SettingsModule module = new SettingsModule(settings, Setting.floatSetting("some.custom.setting", 1.0f, Property.NodeScope)); - assertNotNull(module.getClusterSettings().get("some.custom.setting")); - // For unregistered setting the value is expected to be null - assertNull(module.getClusterSettings().get("some.custom.setting2")); - assertInstanceBinding(module, Settings.class, (s) -> s == settings); + public void testDynamicNodeSettingsRegistration() throws Exception { + try (FeatureFlagSetter f = FeatureFlagSetter.set(FeatureFlags.EXTENSIONS)) { + Settings settings = Settings.builder().put("some.custom.setting", "2.0").build(); + SettingsModule module = new SettingsModule(settings, Setting.floatSetting("some.custom.setting", 1.0f, Property.NodeScope)); + assertNotNull(module.getClusterSettings().get("some.custom.setting")); + // For unregistered setting the value is expected to be null + assertNull(module.getClusterSettings().get("some.custom.setting2")); + assertInstanceBinding(module, Settings.class, (s) -> s == settings); - assertTrue(module.registerDynamicSetting(Setting.floatSetting("some.custom.setting2", 1.0f, Property.NodeScope))); - assertNotNull(module.getClusterSettings().get("some.custom.setting2")); - // verify if some.custom.setting still exists - assertNotNull(module.getClusterSettings().get("some.custom.setting")); + assertTrue(module.registerDynamicSetting(Setting.floatSetting("some.custom.setting2", 1.0f, Property.NodeScope))); + assertNotNull(module.getClusterSettings().get("some.custom.setting2")); + // verify if some.custom.setting still exists + assertNotNull(module.getClusterSettings().get("some.custom.setting")); - // verify exception is thrown when setting registration fails - expectThrows( - SettingsException.class, - () -> module.registerDynamicSetting(Setting.floatSetting("some.custom.setting", 1.0f, Property.NodeScope)) - ); + // verify exception is thrown when setting registration fails + expectThrows( + SettingsException.class, + () -> module.registerDynamicSetting(Setting.floatSetting("some.custom.setting", 1.0f, Property.NodeScope)) + ); + } } - public void testDynamicIndexSettingsRegistration() { - FeatureFlagTests.enableFeature(); - Settings settings = Settings.builder().put("some.custom.setting", "2.0").build(); - SettingsModule module = new SettingsModule(settings, Setting.floatSetting("some.custom.setting", 1.0f, Property.NodeScope)); - assertNotNull(module.getClusterSettings().get("some.custom.setting")); - // For unregistered setting the value is expected to be null - assertNull(module.getIndexScopedSettings().get("index.custom.setting2")); - assertInstanceBinding(module, Settings.class, (s) -> s == settings); + public void testDynamicIndexSettingsRegistration() throws Exception { + try (FeatureFlagSetter f = FeatureFlagSetter.set(FeatureFlags.EXTENSIONS)) { + Settings settings = Settings.builder().put("some.custom.setting", "2.0").build(); + SettingsModule module = new SettingsModule(settings, Setting.floatSetting("some.custom.setting", 1.0f, Property.NodeScope)); + assertNotNull(module.getClusterSettings().get("some.custom.setting")); + // For unregistered setting the value is expected to be null + assertNull(module.getIndexScopedSettings().get("index.custom.setting2")); + assertInstanceBinding(module, Settings.class, (s) -> s == settings); - assertTrue(module.registerDynamicSetting(Setting.floatSetting("index.custom.setting2", 1.0f, Property.IndexScope))); - assertNotNull(module.getIndexScopedSettings().get("index.custom.setting2")); + assertTrue(module.registerDynamicSetting(Setting.floatSetting("index.custom.setting2", 1.0f, Property.IndexScope))); + assertNotNull(module.getIndexScopedSettings().get("index.custom.setting2")); - // verify if some.custom.setting still exists - assertNotNull(module.getClusterSettings().get("some.custom.setting")); + // verify if some.custom.setting still exists + assertNotNull(module.getClusterSettings().get("some.custom.setting")); - // verify exception is thrown when setting registration fails - expectThrows( - SettingsException.class, - () -> module.registerDynamicSetting(Setting.floatSetting("index.custom.setting2", 1.0f, Property.IndexScope)) - ); + // verify exception is thrown when setting registration fails + expectThrows( + SettingsException.class, + () -> module.registerDynamicSetting(Setting.floatSetting("index.custom.setting2", 1.0f, Property.IndexScope)) + ); + } } } diff --git a/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java b/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java index 05ede515e042c..ca16efdf11d7d 100644 --- a/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java +++ b/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java @@ -8,31 +8,23 @@ package org.opensearch.common.util; -import org.junit.BeforeClass; -import org.opensearch.common.SuppressForbidden; +import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.OpenSearchTestCase; -import java.security.AccessController; -import java.security.PrivilegedAction; - public class FeatureFlagTests extends OpenSearchTestCase { - @SuppressForbidden(reason = "sets the feature flag") - @BeforeClass - public static void enableFeature() { - AccessController.doPrivileged((PrivilegedAction) () -> System.setProperty(FeatureFlags.REPLICATION_TYPE, "true")); - AccessController.doPrivileged((PrivilegedAction) () -> System.setProperty(FeatureFlags.REMOTE_STORE, "true")); - AccessController.doPrivileged((PrivilegedAction) () -> System.setProperty(FeatureFlags.EXTENSIONS, "true")); - } + private final String FLAG_PREFIX = "opensearch.experimental.feature."; - public void testReplicationTypeFeatureFlag() { - String replicationTypeFlag = FeatureFlags.REPLICATION_TYPE; - assertNotNull(System.getProperty(replicationTypeFlag)); - assertTrue(FeatureFlags.isEnabled(replicationTypeFlag)); + public void testFeatureFlagSet() throws Exception { + final String testFlag = FLAG_PREFIX + "testFlag"; + try (FeatureFlagSetter f = FeatureFlagSetter.set(testFlag)) { + assertNotNull(System.getProperty(testFlag)); + assertTrue(FeatureFlags.isEnabled(testFlag)); + } } public void testMissingFeatureFlag() { - String testFlag = "missingFeatureFlag"; + final String testFlag = FLAG_PREFIX + "testFlag"; assertNull(System.getProperty(testFlag)); assertFalse(FeatureFlags.isEnabled(testFlag)); } @@ -42,11 +34,4 @@ public void testNonBooleanFeatureFlag() { assertNotNull(System.getProperty(javaVersionProperty)); assertFalse(FeatureFlags.isEnabled(javaVersionProperty)); } - - public void testRemoteStoreFeatureFlag() { - String remoteStoreFlag = FeatureFlags.REMOTE_STORE; - assertNotNull(System.getProperty(remoteStoreFlag)); - assertTrue(FeatureFlags.isEnabled(remoteStoreFlag)); - } - } diff --git a/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java b/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java index a8d4050329c9c..44cf3a38f01d1 100644 --- a/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java +++ b/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java @@ -61,7 +61,7 @@ import org.opensearch.common.settings.WriteableSetting.SettingType; import org.opensearch.common.settings.SettingsModule; import org.opensearch.common.transport.TransportAddress; -import org.opensearch.common.util.FeatureFlagTests; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.env.Environment; @@ -76,6 +76,7 @@ import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.plugins.PluginInfo; import org.opensearch.rest.RestController; +import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.MockLogAppender; import org.opensearch.test.OpenSearchTestCase; @@ -91,6 +92,7 @@ public class ExtensionsManagerTests extends OpenSearchTestCase { + private FeatureFlagSetter featureFlagSetter; private TransportService transportService; private RestController restController; private SettingsModule settingsModule; @@ -138,7 +140,7 @@ public class ExtensionsManagerTests extends OpenSearchTestCase { @Before public void setup() throws Exception { - FeatureFlagTests.enableFeature(); + featureFlagSetter = FeatureFlagSetter.set(FeatureFlags.EXTENSIONS); Settings settings = Settings.builder().put("cluster.name", "test").build(); transport = new MockNioTransport( settings, @@ -208,6 +210,7 @@ public void tearDown() throws Exception { transportService.close(); client.close(); ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); + featureFlagSetter.close(); } public void testDiscover() throws Exception { diff --git a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java index 34087c7fa8df9..957eb337f5c85 100644 --- a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java @@ -34,6 +34,7 @@ import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.SuppressForbidden; import org.opensearch.common.settings.AbstractScopedSettings; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Setting; @@ -41,8 +42,10 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.translog.Translog; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; @@ -57,6 +60,7 @@ import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.core.StringContains.containsString; import static org.hamcrest.object.HasToString.hasToString; +import static org.opensearch.index.store.remote.directory.RemoteSnapshotDirectory.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY_MINIMUM_VERSION; public class IndexSettingsTests extends OpenSearchTestCase { @@ -995,4 +999,46 @@ public void testSetRemoteTranslogRepositoryFailsWhenEmptyString() { ); assertEquals("Setting index.remote_store.translog.repository should be provided with non-empty repository ID", iae.getMessage()); } + + @SuppressForbidden(reason = "sets the SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY feature flag") + public void testExtendedCompatibilityVersionForRemoteSnapshot() throws Exception { + try (FeatureFlagSetter f = FeatureFlagSetter.set(FeatureFlags.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY)) { + IndexMetadata metadata = newIndexMeta( + "index", + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.REMOTE_SNAPSHOT.getSettingsKey()) + .build() + ); + IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); + assertTrue(settings.isRemoteSnapshot()); + assertEquals(SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY_MINIMUM_VERSION, settings.getExtendedCompatibilitySnapshotVersion()); + } + } + + public void testExtendedCompatibilityVersionForNonRemoteSnapshot() { + IndexMetadata metadata = newIndexMeta( + "index", + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.FS.getSettingsKey()) + .build() + ); + IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); + assertFalse(settings.isRemoteSnapshot()); + assertEquals(Version.CURRENT.minimumIndexCompatibilityVersion(), settings.getExtendedCompatibilitySnapshotVersion()); + } + + public void testExtendedCompatibilityVersionWithoutFeatureFlag() { + IndexMetadata metadata = newIndexMeta( + "index", + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.REMOTE_SNAPSHOT.getSettingsKey()) + .build() + ); + IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); + assertTrue(settings.isRemoteSnapshot()); + assertEquals(Version.CURRENT.minimumIndexCompatibilityVersion(), settings.getExtendedCompatibilitySnapshotVersion()); + } } diff --git a/server/src/test/java/org/opensearch/index/engine/ReadOnlyEngineTests.java b/server/src/test/java/org/opensearch/index/engine/ReadOnlyEngineTests.java index 7a3bd0ef6c302..2ce7c62cbdfbd 100644 --- a/server/src/test/java/org/opensearch/index/engine/ReadOnlyEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/ReadOnlyEngineTests.java @@ -32,19 +32,30 @@ package org.opensearch.index.engine; import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.index.IndexReader; import org.apache.lucene.tests.util.LuceneTestCase; +import org.apache.lucene.tests.util.TestUtil; import org.opensearch.Version; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.internal.io.IOUtils; +import org.opensearch.index.IndexModule; +import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.ParsedDocument; import org.opensearch.index.seqno.SeqNoStats; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.store.Store; import org.opensearch.index.translog.TranslogStats; +import org.opensearch.test.FeatureFlagSetter; +import org.opensearch.test.IndexSettingsModule; import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.file.Path; import java.util.List; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Function; @@ -228,6 +239,49 @@ public void testReadOnly() throws IOException { } } + public void testReadOldIndices() throws Exception { + IOUtils.close(engine, store); + // The index has one document in it, so the checkpoint cannot be NO_OPS_PERFORMED + final AtomicLong globalCheckpoint = new AtomicLong(0); + final String pathToTestIndex = "/indices/bwc/es-6.3.0/testIndex-es-6.3.0.zip"; + Path tmp = createTempDir(); + TestUtil.unzip(getClass().getResourceAsStream(pathToTestIndex), tmp); + try (FeatureFlagSetter f = FeatureFlagSetter.set(FeatureFlags.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY)) { + final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings( + "index", + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, org.opensearch.Version.CURRENT) + .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.REMOTE_SNAPSHOT.getSettingsKey()) + .build() + ); + try (Store store = createStore(newFSDirectory(tmp))) { + EngineConfig config = config(indexSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get); + try ( + ReadOnlyEngine readOnlyEngine = new ReadOnlyEngine(config, null, new TranslogStats(), true, Function.identity(), true) + ) { + assertVisibleCount(readOnlyEngine, 1, false); + } + } + } + } + + public void testReadOldIndicesFailure() throws IOException { + IOUtils.close(engine, store); + // The index has one document in it, so the checkpoint cannot be NO_OPS_PERFORMED + final AtomicLong globalCheckpoint = new AtomicLong(0); + final String pathToTestIndex = "/indices/bwc/es-6.3.0/testIndex-es-6.3.0.zip"; + Path tmp = createTempDir(); + TestUtil.unzip(getClass().getResourceAsStream(pathToTestIndex), tmp); + try (Store store = createStore(newFSDirectory(tmp))) { + EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get); + try { + new ReadOnlyEngine(config, null, new TranslogStats(), true, Function.identity(), true); + } catch (UncheckedIOException e) { + assertEquals(IndexFormatTooOldException.class, e.getCause().getClass()); + } + } + } + /** * Test that {@link ReadOnlyEngine#verifyEngineBeforeIndexClosing()} never fails * whatever the value of the global checkpoint to check is. diff --git a/server/src/test/java/org/opensearch/index/store/StoreTests.java b/server/src/test/java/org/opensearch/index/store/StoreTests.java index dc6cf4c187f61..7f5340096ab86 100644 --- a/server/src/test/java/org/opensearch/index/store/StoreTests.java +++ b/server/src/test/java/org/opensearch/index/store/StoreTests.java @@ -67,15 +67,18 @@ import org.hamcrest.Matchers; import org.opensearch.ExceptionsHelper; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.SuppressForbidden; import org.opensearch.common.UUIDs; import org.opensearch.common.io.stream.InputStreamStreamInput; import org.opensearch.common.io.stream.OutputStreamStreamOutput; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.internal.io.IOUtils; import org.opensearch.env.ShardLock; import org.opensearch.index.Index; +import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; import org.opensearch.index.engine.Engine; import org.opensearch.index.seqno.ReplicationTracker; @@ -84,6 +87,7 @@ import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.indices.store.TransportNodesListShardStoreMetadata; import org.opensearch.test.DummyShardLock; +import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.OpenSearchTestCase; @@ -116,6 +120,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; +import static org.opensearch.index.store.remote.directory.RemoteSnapshotDirectory.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY_MINIMUM_VERSION; import static org.opensearch.test.VersionUtils.randomVersion; public class StoreTests extends OpenSearchTestCase { @@ -1257,6 +1262,49 @@ public void testSegmentReplicationDiff() { assertTrue(diff.identical.isEmpty()); } + @SuppressForbidden(reason = "sets the SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY feature flag") + public void testReadSegmentsFromOldIndices() throws Exception { + int expectedIndexCreatedVersionMajor = SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY_MINIMUM_VERSION.luceneVersion.major; + final String pathToTestIndex = "/indices/bwc/es-6.3.0/testIndex-es-6.3.0.zip"; + Path tmp = createTempDir(); + TestUtil.unzip(getClass().getResourceAsStream(pathToTestIndex), tmp); + final ShardId shardId = new ShardId("index", "_na_", 1); + Store store = null; + + try (FeatureFlagSetter f = FeatureFlagSetter.set(FeatureFlags.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY)) { + IndexSettings indexSettings = IndexSettingsModule.newIndexSettings( + "index", + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, org.opensearch.Version.CURRENT) + .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.REMOTE_SNAPSHOT.getSettingsKey()) + .build() + ); + store = new Store(shardId, indexSettings, StoreTests.newMockFSDirectory(tmp), new DummyShardLock(shardId)); + assertEquals(expectedIndexCreatedVersionMajor, store.readLastCommittedSegmentsInfo().getIndexCreatedVersionMajor()); + } finally { + if (store != null) { + store.close(); + } + } + } + + public void testReadSegmentsFromOldIndicesFailure() throws IOException { + final String pathToTestIndex = "/indices/bwc/es-6.3.0/testIndex-es-6.3.0.zip"; + final ShardId shardId = new ShardId("index", "_na_", 1); + Path tmp = createTempDir(); + TestUtil.unzip(getClass().getResourceAsStream(pathToTestIndex), tmp); + IndexSettings indexSettings = IndexSettingsModule.newIndexSettings( + "index", + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, org.opensearch.Version.CURRENT) + .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.FS.getSettingsKey()) + .build() + ); + Store store = new Store(shardId, indexSettings, StoreTests.newMockFSDirectory(tmp), new DummyShardLock(shardId)); + assertThrows(IndexFormatTooOldException.class, store::readLastCommittedSegmentsInfo); + store.close(); + } + private void commitRandomDocs(Store store) throws IOException { IndexWriter writer = indexRandomDocs(store); writer.commit(); diff --git a/server/src/test/java/org/opensearch/repositories/IndexMetadataGenerationsTests.java b/server/src/test/java/org/opensearch/repositories/IndexMetadataGenerationsTests.java new file mode 100644 index 0000000000000..fda330ba7a7f3 --- /dev/null +++ b/server/src/test/java/org/opensearch/repositories/IndexMetadataGenerationsTests.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.repositories; + +import org.junit.Before; +import org.opensearch.snapshots.SnapshotId; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +public class IndexMetadataGenerationsTests extends OpenSearchTestCase { + + private final int MAX_TEST_INDICES = 10; + private final String SNAPSHOT = "snapshot"; + private final String INDEX_PREFIX = "index-"; + private final String BLOB_ID_PREFIX = "blob-"; + private IndexMetaDataGenerations indexMetaDataGenerations; + + @Before + public void setUp() throws Exception { + super.setUp(); + final int numIndices = randomIntBetween(1, MAX_TEST_INDICES); + Map indexMap = createIndexMetadataMap(1, numIndices); + Map identifierMap = createIdentifierMapFromIndexMetadata(indexMap, BLOB_ID_PREFIX); + Map> lookupMap = Collections.singletonMap(new SnapshotId(SNAPSHOT, SNAPSHOT), indexMap); + indexMetaDataGenerations = new IndexMetaDataGenerations(lookupMap, identifierMap); + } + + public void testEmpty() { + assertTrue(IndexMetaDataGenerations.EMPTY.isEmpty()); + assertNull(IndexMetaDataGenerations.EMPTY.getIndexMetaBlobId("test")); + } + + public void testBaseCase() { + assertFalse(indexMetaDataGenerations.isEmpty()); + assertEquals(BLOB_ID_PREFIX + 1, indexMetaDataGenerations.getIndexMetaBlobId(String.valueOf(1))); + } + + public void testIndexMetaBlobId() { + SnapshotId snapshotId = new SnapshotId(SNAPSHOT, SNAPSHOT); + IndexId indexId = new IndexId(INDEX_PREFIX + 1, INDEX_PREFIX + 1); + assertEquals(BLOB_ID_PREFIX + 1, indexMetaDataGenerations.indexMetaBlobId(snapshotId, indexId)); + } + + public void testIndexMetaBlobIdFallback() { + SnapshotId snapshotId = new SnapshotId(SNAPSHOT, SNAPSHOT); + IndexId indexId = new IndexId("missingIndex", "missingIndex"); + assertEquals(SNAPSHOT, indexMetaDataGenerations.indexMetaBlobId(snapshotId, indexId)); + + final String randomString = randomAlphaOfLength(8); + snapshotId = new SnapshotId(randomString, randomString); + assertEquals(randomString, indexMetaDataGenerations.indexMetaBlobId(snapshotId, indexId)); + } + + public void testWithAddedSnapshot() { + // Construct a new snapshot + SnapshotId newSnapshot = new SnapshotId("newSnapshot", "newSnapshot"); + final String newIndexMetadataPrefix = "newIndexMetadata-"; + final String newBlobIdPrefix = "newBlob-"; + final int numIndices = randomIntBetween(2, MAX_TEST_INDICES); + Map newLookupMap = createIndexMetadataMap(2, numIndices); + Map identifierMap = createIdentifierMapFromIndexMetadata(newLookupMap, "newBlob-"); + + // Add the snapshot and verify that values have been updated as expected + IndexMetaDataGenerations updated = indexMetaDataGenerations.withAddedSnapshot(newSnapshot, newLookupMap, identifierMap); + assertEquals(newBlobIdPrefix + 2, updated.getIndexMetaBlobId(String.valueOf(2))); + assertEquals(newBlobIdPrefix + 2, updated.indexMetaBlobId(newSnapshot, new IndexId(INDEX_PREFIX + 2, INDEX_PREFIX + 2))); + // The first index should remain unchanged + assertEquals(BLOB_ID_PREFIX + 1, updated.getIndexMetaBlobId(String.valueOf(1))); + } + + public void testWithRemovedSnapshot() { + Set snapshotToRemove = Collections.singleton(new SnapshotId(SNAPSHOT, SNAPSHOT)); + assertEquals(IndexMetaDataGenerations.EMPTY, indexMetaDataGenerations.withRemovedSnapshots(snapshotToRemove)); + } + + private Map createIndexMetadataMap(int indexCountLowerBound, int numIndices) { + final int indexCountUpperBound = indexCountLowerBound + numIndices; + Map map = new HashMap<>(); + for (int i = indexCountLowerBound; i <= indexCountUpperBound; i++) { + map.put(new IndexId(INDEX_PREFIX + i, INDEX_PREFIX + i), String.valueOf(i)); + } + return map; + } + + private Map createIdentifierMapFromIndexMetadata(Map indexMetadataMap, String blobIdPrefix) { + return indexMetadataMap.values().stream().collect(Collectors.toMap(k -> k, v -> blobIdPrefix + v)); + } +} diff --git a/server/src/test/resources/indices/bwc/es-6.3.0/README.md b/server/src/test/resources/indices/bwc/es-6.3.0/README.md new file mode 100644 index 0000000000000..a9f969475aaad --- /dev/null +++ b/server/src/test/resources/indices/bwc/es-6.3.0/README.md @@ -0,0 +1,57 @@ +# README for _testIndex-es-6.3.0.zip_ + +This zip file holds a Lucene index created using ElasticSearch 6.3.0. +It was created by running the underlying commands against a single-node cluster, +then compressing the contents of the underlying Lucene index directory i.e. +the files under `/data/nodes/0/indices//0/index`. +The index contains one document. + +## Commands + +``` +curl -X PUT -H 'Content-Type: application/json' 'localhost:9200/testindex?pretty' -d' +{ + "settings": { + "number_of_shards": 1, + "number_of_replicas": 0 + }, + "mappings": { + "testData": { + "properties": { + "id": { "type": "keyword" }, + "isTestData": { "type": "boolean" }, + "testNum": { "type": "short" }, + "testRange": {"type": "integer_range" }, + "testMessage": { + "type": "text", + "fields": { + "length": { + "type": "token_count", + "analyzer": "standard" + } + } + }, + "testBlob": { "type": "binary", "index": false }, + "testDate": { "type": "date" }, + "testLocation": { "type": "geo_point"} + } + } + } +}' + +curl -X POST "localhost:9200/testindex/testData/?pretty" -H 'Content-Type: application/json' -d' +{ + "id": "testData1", + "isTestData": true, + "testNum": 99, + "testRange": { + "gte": 0, + "lte": 100 + }, + "testMessage": "The OpenSearch Project", + "testBlob": "VGhlIE9wZW5TZWFyY2ggUHJvamVjdA==", + "testDate": "1970-01-02", + "testLocation": "48.553532,-113.022881" +} +' +``` diff --git a/server/src/test/resources/indices/bwc/es-6.3.0/testIndex-es-6.3.0.zip b/server/src/test/resources/indices/bwc/es-6.3.0/testIndex-es-6.3.0.zip new file mode 100644 index 0000000000000..db86a76153b25 Binary files /dev/null and b/server/src/test/resources/indices/bwc/es-6.3.0/testIndex-es-6.3.0.zip differ diff --git a/test/framework/src/main/java/org/opensearch/test/FeatureFlagSetter.java b/test/framework/src/main/java/org/opensearch/test/FeatureFlagSetter.java new file mode 100644 index 0000000000000..26e884e707964 --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/test/FeatureFlagSetter.java @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test; + +import org.opensearch.common.SuppressForbidden; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +/** + * Helper class that wraps the lifecycle of setting and finally clearing of + * a {@link org.opensearch.common.util.FeatureFlags} string in an {@link AutoCloseable}. + */ +public class FeatureFlagSetter implements AutoCloseable { + + private final String flag; + + private FeatureFlagSetter(String flag) { + this.flag = flag; + } + + @SuppressForbidden(reason = "Enables setting of feature flags") + public static final FeatureFlagSetter set(String flag) { + AccessController.doPrivileged((PrivilegedAction) () -> System.setProperty(flag, "true")); + return new FeatureFlagSetter(flag); + } + + @SuppressForbidden(reason = "Clears the set feature flag on close") + @Override + public void close() throws Exception { + AccessController.doPrivileged((PrivilegedAction) () -> System.clearProperty(this.flag)); + } +}