From 8aa3185e18bc725c1aa74ee55dcdf3c0cdc512e3 Mon Sep 17 00:00:00 2001
From: Sooraj Sinha <81695996+soosinha@users.noreply.github.com>
Date: Wed, 18 Dec 2024 11:42:21 +0530
Subject: [PATCH 01/61] Change Remote state read thread pool to Fixed type
 (#16850)

* Change Remote state read thread pool to Fixed type

Signed-off-by: Sooraj Sinha <soosinha@amazon.com>
---
 .../TransportClusterManagerNodeAction.java         |  9 ++++++++-
 .../coordination/PublicationTransportHandler.java  |  2 +-
 .../gateway/remote/RemoteClusterStateService.java  | 14 ++++++++++++++
 .../java/org/opensearch/threadpool/ThreadPool.java |  4 ++--
 .../remote/RemoteClusterStateServiceTests.java     |  8 ++++++++
 .../threadpool/ScalingThreadPoolTests.java         |  1 -
 6 files changed, 33 insertions(+), 5 deletions(-)

diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java
index 819e09312a0df..558b7370749d5 100644
--- a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java
+++ b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java
@@ -430,6 +430,13 @@ private ClusterState getStateFromLocalNode(GetTermVersionResponse termVersionRes
 
             if (remoteClusterStateService != null && termVersionResponse.isStatePresentInRemote()) {
                 try {
+                    logger.info(
+                        () -> new ParameterizedMessage(
+                            "Term version checker downloading full cluster state for term {}, version {}",
+                            termVersion.getTerm(),
+                            termVersion.getVersion()
+                        )
+                    );
                     ClusterStateTermVersion clusterStateTermVersion = termVersionResponse.getClusterStateTermVersion();
                     Optional<ClusterMetadataManifest> clusterMetadataManifest = remoteClusterStateService
                         .getClusterMetadataManifestByTermVersion(
@@ -454,7 +461,7 @@ private ClusterState getStateFromLocalNode(GetTermVersionResponse termVersionRes
                         return clusterStateFromRemote;
                     }
                 } catch (Exception e) {
-                    logger.trace("Error while fetching from remote cluster state", e);
+                    logger.error("Error while fetching from remote cluster state", e);
                 }
             }
             return null;
diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java
index 7275d72f2db9f..4ad5b80038048 100644
--- a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java
+++ b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java
@@ -258,7 +258,7 @@ PublishWithJoinResponse handleIncomingRemotePublishRequest(RemotePublishRequest
             }
 
             if (applyFullState == true) {
-                logger.debug(
+                logger.info(
                     () -> new ParameterizedMessage(
                         "Downloading full cluster state for term {}, version {}, stateUUID {}",
                         manifest.getClusterTerm(),
diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java
index e7eac0ae67714..778ab3e56cf76 100644
--- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java
+++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java
@@ -1494,8 +1494,22 @@ public ClusterState getClusterStateForManifest(
         try {
             ClusterState stateFromCache = remoteClusterStateCache.getState(clusterName, manifest);
             if (stateFromCache != null) {
+                logger.trace(
+                    () -> new ParameterizedMessage(
+                        "Found cluster state in cache for term {} and version {}",
+                        manifest.getClusterTerm(),
+                        manifest.getStateVersion()
+                    )
+                );
                 return stateFromCache;
             }
+            logger.info(
+                () -> new ParameterizedMessage(
+                    "Cluster state not found in cache for term {} and version {}",
+                    manifest.getClusterTerm(),
+                    manifest.getStateVersion()
+                )
+            );
 
             final ClusterState clusterState;
             final long startTimeNanos = relativeTimeNanosSupplier.getAsLong();
diff --git a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java
index 269a4c87dfb72..59d3b110aeca8 100644
--- a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java
+++ b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java
@@ -198,7 +198,7 @@ public static ThreadPoolType fromType(String type) {
         map.put(Names.REMOTE_PURGE, ThreadPoolType.SCALING);
         map.put(Names.REMOTE_REFRESH_RETRY, ThreadPoolType.SCALING);
         map.put(Names.REMOTE_RECOVERY, ThreadPoolType.SCALING);
-        map.put(Names.REMOTE_STATE_READ, ThreadPoolType.SCALING);
+        map.put(Names.REMOTE_STATE_READ, ThreadPoolType.FIXED);
         map.put(Names.INDEX_SEARCHER, ThreadPoolType.RESIZABLE);
         map.put(Names.REMOTE_STATE_CHECKSUM, ThreadPoolType.FIXED);
         THREAD_POOL_TYPES = Collections.unmodifiableMap(map);
@@ -306,7 +306,7 @@ public ThreadPool(
         );
         builders.put(
             Names.REMOTE_STATE_READ,
-            new ScalingExecutorBuilder(Names.REMOTE_STATE_READ, 1, boundedBy(4 * allocatedProcessors, 4, 32), TimeValue.timeValueMinutes(5))
+            new FixedExecutorBuilder(settings, Names.REMOTE_STATE_READ, boundedBy(4 * allocatedProcessors, 4, 32), 120000)
         );
         builders.put(
             Names.INDEX_SEARCHER,
diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java
index be07aa0d05e9f..e3684178a18ea 100644
--- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java
+++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java
@@ -2354,6 +2354,14 @@ public void testReadLatestClusterStateFromCache() throws IOException {
             .getState(clusterState.getClusterName().value(), expectedManifest);
         assertEquals(stateFromCache.getMetadata(), state.getMetadata());
 
+        ClusterState stateFromCache2 = remoteClusterStateService.getClusterStateForManifest(
+            clusterState.getClusterName().value(),
+            expectedManifest,
+            "nodeA",
+            true
+        );
+        assertEquals(stateFromCache2.getMetadata(), state.getMetadata());
+
         final ClusterMetadataManifest notExistMetadata = ClusterMetadataManifest.builder()
             .indices(List.of())
             .clusterTerm(1L)
diff --git a/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java b/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java
index b4726bab50198..23c21648b1263 100644
--- a/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java
+++ b/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java
@@ -156,7 +156,6 @@ private int expectedSize(final String threadPoolName, final int numberOfProcesso
         sizes.put(ThreadPool.Names.REMOTE_PURGE, ThreadPool::halfAllocatedProcessors);
         sizes.put(ThreadPool.Names.REMOTE_REFRESH_RETRY, ThreadPool::halfAllocatedProcessors);
         sizes.put(ThreadPool.Names.REMOTE_RECOVERY, ThreadPool::twiceAllocatedProcessors);
-        sizes.put(ThreadPool.Names.REMOTE_STATE_READ, n -> ThreadPool.boundedBy(4 * n, 4, 32));
         return sizes.get(threadPoolName).apply(numberOfProcessors);
     }
 

From b5f651f648d955b2e943fe3a17a95e8fb33a3980 Mon Sep 17 00:00:00 2001
From: Andriy Redko <andriy.redko@aiven.io>
Date: Wed, 18 Dec 2024 09:20:59 -0500
Subject: [PATCH 02/61] [Backport] [2.18] Update Apache Lucene to 9.12.1
 (#16846) (#16870) (#16877)

* Update Apache Lucene to 9.12.1 (#16846)

Signed-off-by: Andriy Redko <andriy.redko@aiven.io>
(cherry picked from commit bc4f44bd4d2a0bec6d6ded797e63ba99df6197a6)

* Added bwc version 1.3.21 (#16845)

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Signed-off-by: Andriy Redko <andriy.redko@aiven.io>
Co-authored-by: opensearch-ci-bot <83309141+opensearch-ci-bot@users.noreply.github.com>

---------

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Signed-off-by: Andriy Redko <andriy.redko@aiven.io>
Co-authored-by: opensearch-ci-bot <83309141+opensearch-ci-bot@users.noreply.github.com>
(cherry picked from commit 97f6d84d306521947d621e93085f340e5e666c65)
---
 libs/core/src/main/java/org/opensearch/Version.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java
index 62c9cb36727b2..dd804fcc6db70 100644
--- a/libs/core/src/main/java/org/opensearch/Version.java
+++ b/libs/core/src/main/java/org/opensearch/Version.java
@@ -112,8 +112,8 @@ public class Version implements Comparable<Version>, ToXContentFragment {
     public static final Version V_2_17_1 = new Version(2170199, org.apache.lucene.util.Version.LUCENE_9_11_1);
     public static final Version V_2_17_2 = new Version(2170299, org.apache.lucene.util.Version.LUCENE_9_11_1);
     public static final Version V_2_18_0 = new Version(2180099, org.apache.lucene.util.Version.LUCENE_9_12_0);
-    public static final Version V_2_18_1 = new Version(2180199, org.apache.lucene.util.Version.LUCENE_9_12_0);
-    public static final Version V_2_19_0 = new Version(2190099, org.apache.lucene.util.Version.LUCENE_9_12_0);
+    public static final Version V_2_18_1 = new Version(2180199, org.apache.lucene.util.Version.LUCENE_9_12_1);
+    public static final Version V_2_19_0 = new Version(2190099, org.apache.lucene.util.Version.LUCENE_9_12_1);
     public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_12_1);
     public static final Version CURRENT = V_3_0_0;
 

From e6d71d290e56e93920f8b8efa5ddbf885699607e Mon Sep 17 00:00:00 2001
From: Bharathwaj G <bharath78910@gmail.com>
Date: Thu, 19 Dec 2024 17:58:09 +0530
Subject: [PATCH 03/61] Changes to support IP field in star tree indexing
 (#16641)

* Changes to support IP
---------

Signed-off-by: bharath-techie <bharath78910@gmail.com>
---
 .../index/mapper/StarTreeMapperIT.java        | 22 +++--
 .../Composite912DocValuesWriter.java          | 30 +++++--
 .../datacube/DimensionFactory.java            | 15 ++--
 .../datacube/DimensionType.java               | 12 ++-
 .../compositeindex/datacube/IpDimension.java  | 82 +++++++++++++++++++
 ...rdDimension.java => OrdinalDimension.java} | 10 +--
 .../index/mapper/IpFieldMapper.java           |  7 ++
 .../index/mapper/KeywordFieldMapper.java      |  2 +-
 .../StarTreeKeywordDocValuesFormatTests.java  | 37 +++++++--
 .../StarTreeBuilderFlushFlowTests.java        | 11 +--
 .../StarTreeBuilderMergeFlowTests.java        |  2 +-
 .../builder/StarTreeBuilderTestCase.java      |  9 +-
 .../index/mapper/StarTreeMapperTests.java     |  8 +-
 13 files changed, 204 insertions(+), 43 deletions(-)
 create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/IpDimension.java
 rename server/src/main/java/org/opensearch/index/compositeindex/datacube/{KeywordDimension.java => OrdinalDimension.java} (87%)

diff --git a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java
index c91c4d7bbb63b..3f9053576329c 100644
--- a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java
@@ -56,7 +56,7 @@ public class StarTreeMapperIT extends OpenSearchIntegTestCase {
         .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(512, ByteSizeUnit.MB))
         .build();
 
-    private static XContentBuilder createMinimalTestMapping(boolean invalidDim, boolean invalidMetric, boolean ipdim) {
+    private static XContentBuilder createMinimalTestMapping(boolean invalidDim, boolean invalidMetric, boolean wildcard) {
         try {
             return jsonBuilder().startObject()
                 .startObject("composite")
@@ -68,7 +68,7 @@ private static XContentBuilder createMinimalTestMapping(boolean invalidDim, bool
                 .endObject()
                 .startArray("ordered_dimensions")
                 .startObject()
-                .field("name", getDim(invalidDim, ipdim))
+                .field("name", getDim(invalidDim, wildcard))
                 .endObject()
                 .startObject()
                 .field("name", "keyword_dv")
@@ -102,8 +102,16 @@ private static XContentBuilder createMinimalTestMapping(boolean invalidDim, bool
                 .field("type", "keyword")
                 .field("doc_values", false)
                 .endObject()
+                .startObject("ip_no_dv")
+                .field("type", "ip")
+                .field("doc_values", false)
+                .endObject()
                 .startObject("ip")
                 .field("type", "ip")
+                .field("doc_values", true)
+                .endObject()
+                .startObject("wildcard")
+                .field("type", "wildcard")
                 .field("doc_values", false)
                 .endObject()
                 .endObject()
@@ -362,11 +370,11 @@ private XContentBuilder getMappingWithDuplicateFields(boolean isDuplicateDim, bo
         return mapping;
     }
 
-    private static String getDim(boolean hasDocValues, boolean isKeyword) {
+    private static String getDim(boolean hasDocValues, boolean isWildCard) {
         if (hasDocValues) {
-            return random().nextBoolean() ? "numeric" : "keyword";
-        } else if (isKeyword) {
-            return "ip";
+            return random().nextBoolean() ? "numeric" : random().nextBoolean() ? "keyword" : "ip_no_dv";
+        } else if (isWildCard) {
+            return "wildcard";
         }
         return "numeric_dv";
     }
@@ -748,7 +756,7 @@ public void testUnsupportedDim() {
             () -> prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createMinimalTestMapping(false, false, true)).get()
         );
         assertEquals(
-            "Failed to parse mapping [_doc]: unsupported field type associated with dimension [ip] as part of star tree field [startree-1]",
+            "Failed to parse mapping [_doc]: unsupported field type associated with dimension [wildcard] as part of star tree field [startree-1]",
             ex.getMessage()
         );
     }
diff --git a/server/src/main/java/org/opensearch/index/codec/composite/composite912/Composite912DocValuesWriter.java b/server/src/main/java/org/opensearch/index/codec/composite/composite912/Composite912DocValuesWriter.java
index 904d6a7aba5c6..ca52d8bf4bca0 100644
--- a/server/src/main/java/org/opensearch/index/codec/composite/composite912/Composite912DocValuesWriter.java
+++ b/server/src/main/java/org/opensearch/index/codec/composite/composite912/Composite912DocValuesWriter.java
@@ -33,9 +33,11 @@
 import org.opensearch.index.compositeindex.datacube.startree.builder.StarTreesBuilder;
 import org.opensearch.index.compositeindex.datacube.startree.index.CompositeIndexValues;
 import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues;
+import org.opensearch.index.fielddata.IndexNumericFieldData;
+import org.opensearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData;
 import org.opensearch.index.mapper.CompositeMappedFieldType;
 import org.opensearch.index.mapper.DocCountFieldMapper;
-import org.opensearch.index.mapper.KeywordFieldMapper;
+import org.opensearch.index.mapper.MappedFieldType;
 import org.opensearch.index.mapper.MapperService;
 
 import java.io.IOException;
@@ -44,6 +46,7 @@
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Locale;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -262,22 +265,38 @@ public SortedSetDocValues getSortedSet(FieldInfo field) {
                         return DocValues.emptySortedSet();
                     }
                 });
-            }
-            // TODO : change this logic to evaluate for sortedNumericField specifically
-            else {
+            } else if (isSortedNumericField(compositeField)) {
                 fieldProducerMap.put(compositeField, new EmptyDocValuesProducer() {
                     @Override
                     public SortedNumericDocValues getSortedNumeric(FieldInfo field) {
                         return DocValues.emptySortedNumeric();
                     }
                 });
+            } else {
+                throw new IllegalStateException(
+                    String.format(Locale.ROOT, "Unsupported DocValues field associated with the composite field : %s", compositeField)
+                );
             }
         }
         compositeFieldSet.remove(compositeField);
     }
 
     private boolean isSortedSetField(String field) {
-        return mapperService.fieldType(field) instanceof KeywordFieldMapper.KeywordFieldType;
+        MappedFieldType ft = mapperService.fieldType(field);
+        assert ft.isAggregatable();
+        return ft.fielddataBuilder(
+            "",
+            () -> { throw new UnsupportedOperationException("SearchLookup not available"); }
+        ) instanceof SortedSetOrdinalsIndexFieldData.Builder;
+    }
+
+    private boolean isSortedNumericField(String field) {
+        MappedFieldType ft = mapperService.fieldType(field);
+        assert ft.isAggregatable();
+        return ft.fielddataBuilder(
+            "",
+            () -> { throw new UnsupportedOperationException("SearchLookup not available"); }
+        ) instanceof IndexNumericFieldData.Builder;
     }
 
     @Override
@@ -370,5 +389,4 @@ private static SegmentWriteState getSegmentWriteState(SegmentWriteState segmentW
             segmentWriteState.segmentSuffix
         );
     }
-
 }
diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionFactory.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionFactory.java
index e834706e2fa9d..b1e78d78d3ad2 100644
--- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionFactory.java
+++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionFactory.java
@@ -24,7 +24,8 @@
 import java.util.stream.Collectors;
 
 import static org.opensearch.index.compositeindex.datacube.DateDimension.CALENDAR_INTERVALS;
-import static org.opensearch.index.compositeindex.datacube.KeywordDimension.KEYWORD;
+import static org.opensearch.index.compositeindex.datacube.IpDimension.IP;
+import static org.opensearch.index.compositeindex.datacube.OrdinalDimension.ORDINAL;
 
 /**
  * Dimension factory class mainly used to parse and create dimension from the mappings
@@ -44,8 +45,10 @@ public static Dimension parseAndCreateDimension(
                 return parseAndCreateDateDimension(name, dimensionMap, c);
             case NumericDimension.NUMERIC:
                 return new NumericDimension(name);
-            case KEYWORD:
-                return new KeywordDimension(name);
+            case ORDINAL:
+                return new OrdinalDimension(name);
+            case IP:
+                return new IpDimension(name);
             default:
                 throw new IllegalArgumentException(
                     String.format(Locale.ROOT, "unsupported field type associated with dimension [%s] as part of star tree field", name)
@@ -69,8 +72,10 @@ public static Dimension parseAndCreateDimension(
                 return parseAndCreateDateDimension(name, dimensionMap, c);
             case NUMERIC:
                 return new NumericDimension(name);
-            case KEYWORD:
-                return new KeywordDimension(name);
+            case ORDINAL:
+                return new OrdinalDimension(name);
+            case IP:
+                return new IpDimension(name);
             default:
                 throw new IllegalArgumentException(
                     String.format(Locale.ROOT, "unsupported field type associated with star tree dimension [%s]", name)
diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionType.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionType.java
index d327f8ca1fa1e..f7911e72f36fc 100644
--- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionType.java
+++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionType.java
@@ -30,8 +30,14 @@ public enum DimensionType {
     DATE,
 
     /**
-     * Represents a keyword dimension type.
-     * This is used for dimensions that contain keyword ordinals.
+     * Represents dimension types which uses ordinals.
+     * This is used for dimensions that contain sortedSet ordinals.
      */
-    KEYWORD
+    ORDINAL,
+
+    /**
+     * Represents an IP dimension type.
+     * This is used for dimensions that contain IP ordinals.
+     */
+    IP
 }
diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/IpDimension.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/IpDimension.java
new file mode 100644
index 0000000000000..9c3682bd2e0ea
--- /dev/null
+++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/IpDimension.java
@@ -0,0 +1,82 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.index.compositeindex.datacube;
+
+import org.apache.lucene.index.DocValuesType;
+import org.opensearch.common.annotation.ExperimentalApi;
+import org.opensearch.core.xcontent.XContentBuilder;
+import org.opensearch.index.mapper.CompositeDataCubeFieldType;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Objects;
+import java.util.function.Consumer;
+
+/**
+ * Composite index keyword dimension class
+ *
+ * @opensearch.experimental
+ */
+@ExperimentalApi
+public class IpDimension implements Dimension {
+    public static final String IP = "ip";
+    private final String field;
+
+    public IpDimension(String field) {
+        this.field = field;
+    }
+
+    @Override
+    public String getField() {
+        return field;
+    }
+
+    @Override
+    public int getNumSubDimensions() {
+        return 1;
+    }
+
+    @Override
+    public void setDimensionValues(Long value, Consumer<Long> dimSetter) {
+        // This will set the keyword dimension value's ordinal
+        dimSetter.accept(value);
+    }
+
+    @Override
+    public List<String> getSubDimensionNames() {
+        return List.of(field);
+    }
+
+    @Override
+    public DocValuesType getDocValuesType() {
+        return DocValuesType.SORTED_SET;
+    }
+
+    @Override
+    public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+        builder.startObject();
+        builder.field(CompositeDataCubeFieldType.NAME, field);
+        builder.field(CompositeDataCubeFieldType.TYPE, IP);
+        builder.endObject();
+        return builder;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) return true;
+        if (o == null || getClass() != o.getClass()) return false;
+        IpDimension dimension = (IpDimension) o;
+        return Objects.equals(field, dimension.getField());
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hash(field);
+    }
+}
diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/KeywordDimension.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/OrdinalDimension.java
similarity index 87%
rename from server/src/main/java/org/opensearch/index/compositeindex/datacube/KeywordDimension.java
rename to server/src/main/java/org/opensearch/index/compositeindex/datacube/OrdinalDimension.java
index 58e248fd548d6..9cb4cd78bdaac 100644
--- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/KeywordDimension.java
+++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/OrdinalDimension.java
@@ -24,11 +24,11 @@
  * @opensearch.experimental
  */
 @ExperimentalApi
-public class KeywordDimension implements Dimension {
-    public static final String KEYWORD = "keyword";
+public class OrdinalDimension implements Dimension {
+    public static final String ORDINAL = "ordinal";
     private final String field;
 
-    public KeywordDimension(String field) {
+    public OrdinalDimension(String field) {
         this.field = field;
     }
 
@@ -62,7 +62,7 @@ public DocValuesType getDocValuesType() {
     public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
         builder.startObject();
         builder.field(CompositeDataCubeFieldType.NAME, field);
-        builder.field(CompositeDataCubeFieldType.TYPE, KEYWORD);
+        builder.field(CompositeDataCubeFieldType.TYPE, ORDINAL);
         builder.endObject();
         return builder;
     }
@@ -71,7 +71,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
     public boolean equals(Object o) {
         if (this == o) return true;
         if (o == null || getClass() != o.getClass()) return false;
-        KeywordDimension dimension = (KeywordDimension) o;
+        OrdinalDimension dimension = (OrdinalDimension) o;
         return Objects.equals(field, dimension.getField());
     }
 
diff --git a/server/src/main/java/org/opensearch/index/mapper/IpFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/IpFieldMapper.java
index e23a48f94f450..1283aa302c111 100644
--- a/server/src/main/java/org/opensearch/index/mapper/IpFieldMapper.java
+++ b/server/src/main/java/org/opensearch/index/mapper/IpFieldMapper.java
@@ -52,6 +52,7 @@
 import org.opensearch.common.logging.DeprecationLogger;
 import org.opensearch.common.network.InetAddresses;
 import org.opensearch.common.network.NetworkAddress;
+import org.opensearch.index.compositeindex.datacube.DimensionType;
 import org.opensearch.index.fielddata.IndexFieldData;
 import org.opensearch.index.fielddata.ScriptDocValues;
 import org.opensearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData;
@@ -68,6 +69,7 @@
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
 import java.util.function.BiFunction;
 import java.util.function.Supplier;
 
@@ -161,6 +163,11 @@ public IpFieldMapper build(BuilderContext context) {
             );
         }
 
+        @Override
+        public Optional<DimensionType> getSupportedDataCubeDimensionType() {
+            return Optional.of(DimensionType.IP);
+        }
+
     }
 
     public static final TypeParser PARSER = new TypeParser((n, c) -> {
diff --git a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java
index df14a5811f6a0..90e43c818e137 100644
--- a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java
+++ b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java
@@ -259,7 +259,7 @@ public KeywordFieldMapper build(BuilderContext context) {
 
         @Override
         public Optional<DimensionType> getSupportedDataCubeDimensionType() {
-            return Optional.of(DimensionType.KEYWORD);
+            return Optional.of(DimensionType.ORDINAL);
         }
     }
 
diff --git a/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/StarTreeKeywordDocValuesFormatTests.java b/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/StarTreeKeywordDocValuesFormatTests.java
index 402ed1dbee98a..5603fe4e30f9f 100644
--- a/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/StarTreeKeywordDocValuesFormatTests.java
+++ b/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/StarTreeKeywordDocValuesFormatTests.java
@@ -10,6 +10,7 @@
 
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.InetAddressPoint;
 import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.document.SortedNumericDocValuesField;
 import org.apache.lucene.document.SortedSetDocValuesField;
@@ -25,6 +26,7 @@
 import org.apache.lucene.tests.util.TestUtil;
 import org.apache.lucene.util.BytesRef;
 import org.opensearch.common.lucene.Lucene;
+import org.opensearch.common.network.InetAddresses;
 import org.opensearch.core.xcontent.XContentBuilder;
 import org.opensearch.index.codec.composite.CompositeIndexFieldInfo;
 import org.opensearch.index.codec.composite.CompositeIndexReader;
@@ -36,6 +38,8 @@
 import org.opensearch.index.mapper.NumberFieldMapper;
 
 import java.io.IOException;
+import java.net.InetAddress;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
@@ -65,12 +69,15 @@ public void testStarTreeKeywordDocValues() throws IOException {
         doc.add(new SortedNumericDocValuesField("sndv", 1));
         doc.add(new SortedSetDocValuesField("keyword1", new BytesRef("text1")));
         doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text2")));
+        doc.add(new SortedSetDocValuesField("ip1", new BytesRef(InetAddressPoint.encode(InetAddresses.forString("10.10.10.10")))));
         iw.addDocument(doc);
         doc = new Document();
         doc.add(new StringField("_id", "2", Field.Store.NO));
         doc.add(new SortedNumericDocValuesField("sndv", 1));
         doc.add(new SortedSetDocValuesField("keyword1", new BytesRef("text11")));
         doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text22")));
+        doc.add(new SortedSetDocValuesField("ip1", new BytesRef(InetAddressPoint.encode(InetAddresses.forString("10.10.10.11")))));
+
         iw.addDocument(doc);
         iw.flush();
         iw.deleteDocuments(new Term("_id", "2"));
@@ -80,12 +87,14 @@ public void testStarTreeKeywordDocValues() throws IOException {
         doc.add(new SortedNumericDocValuesField("sndv", 2));
         doc.add(new SortedSetDocValuesField("keyword1", new BytesRef("text1")));
         doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text2")));
+        doc.add(new SortedSetDocValuesField("ip1", new BytesRef(InetAddressPoint.encode(InetAddresses.forString("10.10.10.10")))));
         iw.addDocument(doc);
         doc = new Document();
         doc.add(new StringField("_id", "4", Field.Store.NO));
         doc.add(new SortedNumericDocValuesField("sndv", 2));
         doc.add(new SortedSetDocValuesField("keyword1", new BytesRef("text11")));
         doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text22")));
+        doc.add(new SortedSetDocValuesField("ip1", new BytesRef(InetAddressPoint.encode(InetAddresses.forString("10.10.10.11")))));
         iw.addDocument(doc);
         iw.flush();
         iw.deleteDocuments(new Term("_id", "4"));
@@ -166,6 +175,9 @@ public void testStarTreeKeywordDocValuesWithDeletions() throws IOException {
 
                 doc.add(new SortedSetDocValuesField("keyword2", new BytesRef(keyword2Value)));
                 map.put(keyword1Value + "-" + keyword2Value, sndvValue + map.getOrDefault(keyword1Value + "-" + keyword2Value, 0));
+                doc.add(
+                    new SortedSetDocValuesField("ip1", new BytesRef(InetAddressPoint.encode(InetAddresses.forString("10.10.10." + i))))
+                );
                 iw.addDocument(doc);
                 documents.put(id, doc);
             }
@@ -221,9 +233,7 @@ public void testStarTreeKeywordDocValuesWithDeletions() throws IOException {
                 SortedSetStarTreeValuesIterator k1 = (SortedSetStarTreeValuesIterator) starTreeValues.getDimensionValuesIterator(
                     "keyword1"
                 );
-                SortedSetStarTreeValuesIterator k2 = (SortedSetStarTreeValuesIterator) starTreeValues.getDimensionValuesIterator(
-                    "keyword2"
-                );
+                SortedSetStarTreeValuesIterator k2 = (SortedSetStarTreeValuesIterator) starTreeValues.getDimensionValuesIterator("ip1");
                 for (StarTreeDocument starDoc : actualStarTreeDocuments) {
                     String keyword1 = null;
                     if (starDoc.dimensions[0] != null) {
@@ -232,7 +242,11 @@ public void testStarTreeKeywordDocValuesWithDeletions() throws IOException {
 
                     String keyword2 = null;
                     if (starDoc.dimensions[1] != null) {
-                        keyword2 = k2.lookupOrd(starDoc.dimensions[1]).utf8ToString();
+                        BytesRef encoded = k2.lookupOrd(starDoc.dimensions[1]);
+                        InetAddress address = InetAddressPoint.decode(
+                            Arrays.copyOfRange(encoded.bytes, encoded.offset, encoded.offset + encoded.length)
+                        );
+                        keyword2 = InetAddresses.toAddrString(address);
                     }
                     double metric = (double) starDoc.metrics[0];
                     if (map.containsKey(keyword1 + "-" + keyword2)) {
@@ -254,21 +268,28 @@ public void testStarKeywordDocValuesWithMissingDocs() throws IOException {
         Document doc = new Document();
         doc.add(new SortedNumericDocValuesField("sndv", 1));
         doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text2")));
+        doc.add(new SortedSetDocValuesField("ip1", new BytesRef(InetAddressPoint.encode(InetAddresses.forString("10.10.10.10")))));
+
         iw.addDocument(doc);
         doc = new Document();
         doc.add(new SortedNumericDocValuesField("sndv", 1));
         doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text22")));
+        doc.add(new SortedSetDocValuesField("ip1", new BytesRef(InetAddressPoint.encode(InetAddresses.forString("10.10.10.11")))));
         iw.addDocument(doc);
         iw.forceMerge(1);
         doc = new Document();
         doc.add(new SortedNumericDocValuesField("sndv", 2));
         doc.add(new SortedSetDocValuesField("keyword1", new BytesRef("text1")));
         doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text2")));
+        doc.add(new SortedSetDocValuesField("ip1", new BytesRef(InetAddressPoint.encode(InetAddresses.forString("10.10.10.10")))));
+
         iw.addDocument(doc);
         doc = new Document();
         doc.add(new SortedNumericDocValuesField("sndv", 2));
         doc.add(new SortedSetDocValuesField("keyword1", new BytesRef("text11")));
         doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text22")));
+        doc.add(new SortedSetDocValuesField("ip1", new BytesRef(InetAddressPoint.encode(InetAddresses.forString("10.10.10.11")))));
+
         iw.addDocument(doc);
         iw.forceMerge(1);
         iw.close();
@@ -340,11 +361,14 @@ public void testStarKeywordDocValuesWithMissingDocsInSegment() throws IOExceptio
         doc.add(new SortedNumericDocValuesField("sndv", 2));
         doc.add(new SortedSetDocValuesField("keyword1", new BytesRef("text1")));
         doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text2")));
+        doc.add(new SortedSetDocValuesField("ip1", new BytesRef(InetAddressPoint.encode(InetAddresses.forString("10.10.10.10")))));
         iw.addDocument(doc);
         doc = new Document();
         doc.add(new SortedNumericDocValuesField("sndv", 2));
         doc.add(new SortedSetDocValuesField("keyword1", new BytesRef("text11")));
         doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text22")));
+        doc.add(new SortedSetDocValuesField("ip1", new BytesRef(InetAddressPoint.encode(InetAddresses.forString("10.10.10.11")))));
+
         iw.addDocument(doc);
         iw.forceMerge(1);
         iw.close();
@@ -538,7 +562,7 @@ protected XContentBuilder getMapping() throws IOException {
             b.field("name", "keyword1");
             b.endObject();
             b.startObject();
-            b.field("name", "keyword2");
+            b.field("name", "ip1");
             b.endObject();
             b.endArray();
             b.startArray("metrics");
@@ -566,6 +590,9 @@ protected XContentBuilder getMapping() throws IOException {
             b.startObject("keyword2");
             b.field("type", "keyword");
             b.endObject();
+            b.startObject("ip1");
+            b.field("type", "ip");
+            b.endObject();
             b.endObject();
         });
     }
diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderFlushFlowTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderFlushFlowTests.java
index 440268f1f803c..70cc20fe4a9f6 100644
--- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderFlushFlowTests.java
+++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderFlushFlowTests.java
@@ -20,10 +20,11 @@
 import org.opensearch.index.codec.composite.LuceneDocValuesConsumerFactory;
 import org.opensearch.index.codec.composite.composite912.Composite912DocValuesFormat;
 import org.opensearch.index.compositeindex.datacube.Dimension;
-import org.opensearch.index.compositeindex.datacube.KeywordDimension;
+import org.opensearch.index.compositeindex.datacube.IpDimension;
 import org.opensearch.index.compositeindex.datacube.Metric;
 import org.opensearch.index.compositeindex.datacube.MetricStat;
 import org.opensearch.index.compositeindex.datacube.NumericDimension;
+import org.opensearch.index.compositeindex.datacube.OrdinalDimension;
 import org.opensearch.index.compositeindex.datacube.startree.StarTreeDocument;
 import org.opensearch.index.compositeindex.datacube.startree.StarTreeField;
 import org.opensearch.index.compositeindex.datacube.startree.StarTreeFieldConfiguration;
@@ -426,7 +427,7 @@ public void testFlushFlowForKeywords() throws IOException {
         );
         List<Integer> metricsWithField = List.of(0, 1, 2, 3, 4, 5);
 
-        compositeField = getStarTreeFieldWithKeywordField();
+        compositeField = getStarTreeFieldWithKeywordField(random().nextBoolean());
         SortedSetStarTreeValuesIterator d1sndv = new SortedSetStarTreeValuesIterator(getSortedSetMock(dimList, docsWithField));
         SortedSetStarTreeValuesIterator d2sndv = new SortedSetStarTreeValuesIterator(getSortedSetMock(dimList2, docsWithField2));
         SortedNumericStarTreeValuesIterator m1sndv = new SortedNumericStarTreeValuesIterator(
@@ -531,9 +532,9 @@ private StarTreeField getStarTreeFieldWithMultipleMetrics() {
         return new StarTreeField("sf", dims, metrics, c);
     }
 
-    private StarTreeField getStarTreeFieldWithKeywordField() {
-        Dimension d1 = new KeywordDimension("field1");
-        Dimension d2 = new KeywordDimension("field3");
+    private StarTreeField getStarTreeFieldWithKeywordField(boolean isIp) {
+        Dimension d1 = isIp ? new IpDimension("field1") : new OrdinalDimension("field1");
+        Dimension d2 = isIp ? new IpDimension("field3") : new OrdinalDimension("field3");
         Metric m1 = new Metric("field2", List.of(MetricStat.SUM));
         Metric m2 = new Metric("field2", List.of(MetricStat.VALUE_COUNT));
         Metric m3 = new Metric("field2", List.of(MetricStat.AVG));
diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderMergeFlowTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderMergeFlowTests.java
index be16961e781db..74ecff04076b1 100644
--- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderMergeFlowTests.java
+++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderMergeFlowTests.java
@@ -1831,7 +1831,7 @@ public void testMergeFlowWithKeywords() throws IOException {
         List<Long> metricsList2 = List.of(0L, 1L, 2L, 3L, 4L);
         List<Integer> metricsWithField2 = List.of(0, 1, 2, 3, 4);
 
-        compositeField = getStarTreeFieldWithKeywords();
+        compositeField = getStarTreeFieldWithKeywords(random().nextBoolean());
         StarTreeValues starTreeValues = getStarTreeValuesWithKeywords(
             getSortedSetMock(dimList, docsWithField),
             getSortedSetMock(dimList2, docsWithField2),
diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderTestCase.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderTestCase.java
index 9c9beaea4f52c..cca987b6f9b16 100644
--- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderTestCase.java
+++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderTestCase.java
@@ -32,10 +32,11 @@
 import org.opensearch.index.compositeindex.datacube.DataCubeDateTimeUnit;
 import org.opensearch.index.compositeindex.datacube.DateDimension;
 import org.opensearch.index.compositeindex.datacube.Dimension;
-import org.opensearch.index.compositeindex.datacube.KeywordDimension;
+import org.opensearch.index.compositeindex.datacube.IpDimension;
 import org.opensearch.index.compositeindex.datacube.Metric;
 import org.opensearch.index.compositeindex.datacube.MetricStat;
 import org.opensearch.index.compositeindex.datacube.NumericDimension;
+import org.opensearch.index.compositeindex.datacube.OrdinalDimension;
 import org.opensearch.index.compositeindex.datacube.startree.StarTreeDocument;
 import org.opensearch.index.compositeindex.datacube.startree.StarTreeField;
 import org.opensearch.index.compositeindex.datacube.startree.StarTreeFieldConfiguration;
@@ -352,9 +353,9 @@ protected StarTreeMetadata getStarTreeMetadata(
         );
     }
 
-    protected StarTreeField getStarTreeFieldWithKeywords() {
-        Dimension d1 = new KeywordDimension("field1");
-        Dimension d2 = new KeywordDimension("field3");
+    protected StarTreeField getStarTreeFieldWithKeywords(boolean ip) {
+        Dimension d1 = ip ? new IpDimension("field1") : new OrdinalDimension("field1");
+        Dimension d2 = ip ? new IpDimension("field3") : new OrdinalDimension("field3");
         Metric m1 = new Metric("field2", List.of(MetricStat.VALUE_COUNT, MetricStat.SUM));
         List<Dimension> dims = List.of(d1, d2);
         List<Metric> metrics = List.of(m1);
diff --git a/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java
index 8ec34b3eb660c..333cdbcab05c5 100644
--- a/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java
+++ b/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java
@@ -1085,6 +1085,9 @@ private XContentBuilder getInvalidMapping(
                 b.startObject();
                 b.field("name", "keyword1");
                 b.endObject();
+                b.startObject();
+                b.field("name", "ip1");
+                b.endObject();
             }
             b.endArray();
             b.startArray("metrics");
@@ -1117,7 +1120,7 @@ private XContentBuilder getInvalidMapping(
             if (!invalidDimType) {
                 b.field("type", "integer");
             } else {
-                b.field("type", "ip");
+                b.field("type", "wildcard");
             }
             b.endObject();
             b.startObject("metric_field");
@@ -1130,6 +1133,9 @@ private XContentBuilder getInvalidMapping(
             b.startObject("keyword1");
             b.field("type", "keyword");
             b.endObject();
+            b.startObject("ip1");
+            b.field("type", "ip");
+            b.endObject();
             b.endObject();
         });
     }

From f4fd707469d49f9de3f76cbde60567e36ef5f7bd Mon Sep 17 00:00:00 2001
From: Andrew Ross <andrross@amazon.com>
Date: Thu, 19 Dec 2024 15:35:13 -0600
Subject: [PATCH 04/61] Remove the events-correlation-engine plugin (#16885)

We [reached agreement][1] to move this plugin to a separate repository. The
implementation was never completed here, and this code was never backported to
any release, so it is safe to remove.

[1]: https://github.com/opensearch-project/OpenSearch/pull/7771#issuecomment-2193974273

Signed-off-by: Andrew Ross <andrross@amazon.com>
---
 gradle/missing-javadoc.gradle                 |   1 -
 .../events-correlation-engine/build.gradle    |  21 -
 .../EventsCorrelationPluginTransportIT.java   | 177 --------
 .../CorrelationVectorsEngineIT.java           | 312 --------------
 .../EventsCorrelationPluginRestIT.java        | 154 -------
 .../correlation/EventsCorrelationPlugin.java  | 142 -------
 .../core/index/CorrelationParamsContext.java  | 148 -------
 .../correlation/core/index/VectorField.java   |  51 ---
 .../BasePerFieldCorrelationVectorsFormat.java | 104 -----
 .../index/codec/CorrelationCodecService.java  |  38 --
 .../index/codec/CorrelationCodecVersion.java  | 103 -----
 .../correlation990/CorrelationCodec.java      |  46 --
 .../PerFieldCorrelationVectorsFormat.java     |  35 --
 .../codec/correlation990/package-info.java    |  12 -
 .../core/index/codec/package-info.java        |  12 -
 .../mapper/CorrelationVectorFieldMapper.java  | 173 --------
 .../core/index/mapper/VectorFieldMapper.java  | 399 ------------------
 .../core/index/mapper/package-info.java       |  12 -
 .../correlation/core/index/package-info.java  |  12 -
 .../index/query/CorrelationQueryBuilder.java  | 332 ---------------
 .../index/query/CorrelationQueryFactory.java  | 142 -------
 .../core/index/query/package-info.java        |  12 -
 .../plugin/correlation/package-info.java      |  12 -
 .../action/IndexCorrelationRuleAction.java    |  32 --
 .../action/IndexCorrelationRuleRequest.java   | 101 -----
 .../action/IndexCorrelationRuleResponse.java  |  94 -----
 .../rules/action/package-info.java            |  12 -
 .../rules/model/CorrelationQuery.java         | 197 ---------
 .../rules/model/CorrelationRule.java          | 244 -----------
 .../correlation/rules/model/package-info.java |  12 -
 .../RestIndexCorrelationRuleAction.java       | 111 -----
 .../rules/resthandler/package-info.java       |  12 -
 .../TransportIndexCorrelationRuleAction.java  | 234 ----------
 .../rules/transport/package-info.java         |  12 -
 .../settings/EventsCorrelationSettings.java   |  47 ---
 .../correlation/settings/package-info.java    |  12 -
 .../utils/CorrelationRuleIndices.java         |  83 ----
 .../plugin/correlation/utils/IndexUtils.java  | 139 ------
 .../correlation/utils/package-info.java       |  12 -
 .../services/org.apache.lucene.codecs.Codec   |   1 -
 .../resources/mappings/correlation-rules.json |  60 ---
 .../EventsCorrelationPluginTests.java         |  19 -
 .../index/CorrelationParamsContextTests.java  | 170 --------
 .../core/index/VectorFieldTests.java          |  83 ----
 .../correlation990/CorrelationCodecTests.java | 121 ------
 .../CorrelationVectorFieldMapperTests.java    | 310 --------------
 .../query/CorrelationQueryBuilderTests.java   | 269 ------------
 .../EventsCorrelationSettingsTests.java       |  58 ---
 48 files changed, 4895 deletions(-)
 delete mode 100644 plugins/events-correlation-engine/build.gradle
 delete mode 100644 plugins/events-correlation-engine/src/internalClusterTest/java/org/opensearch/plugin/correlation/EventsCorrelationPluginTransportIT.java
 delete mode 100644 plugins/events-correlation-engine/src/javaRestTest/java/org/opensearch/plugin/correlation/CorrelationVectorsEngineIT.java
 delete mode 100644 plugins/events-correlation-engine/src/javaRestTest/java/org/opensearch/plugin/correlation/EventsCorrelationPluginRestIT.java
 delete mode 100644 plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/EventsCorrelationPlugin.java
 delete mode 100644 plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/CorrelationParamsContext.java
 delete mode 100644 plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/VectorField.java
 delete mode 100644 plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/BasePerFieldCorrelationVectorsFormat.java
 delete mode 100644 plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/CorrelationCodecService.java
 delete mode 100644 plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/CorrelationCodecVersion.java
 delete mode 100644 plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/CorrelationCodec.java
 delete mode 100644 plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/PerFieldCorrelationVectorsFormat.java
 delete mode 100644 plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/package-info.java
 delete mode 100644 plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/package-info.java
 delete mode 100644 plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapper.java
 delete mode 100644 plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/VectorFieldMapper.java
 delete mode 100644 plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/package-info.java
 delete mode 100644 plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/package-info.java
 delete mode 100644 plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilder.java
 delete mode 100644 plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryFactory.java
 delete mode 100644 plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/package-info.java
 delete mode 100644 plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/package-info.java
 delete mode 100644 plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/IndexCorrelationRuleAction.java
 delete mode 100644 plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/IndexCorrelationRuleRequest.java
 delete mode 100644 plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/IndexCorrelationRuleResponse.java
 delete mode 100644 plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/package-info.java
 delete mode 100644 plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/CorrelationQuery.java
 delete mode 100644 plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/CorrelationRule.java
 delete mode 100644 plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/package-info.java
 delete mode 100644 plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/resthandler/RestIndexCorrelationRuleAction.java
 delete mode 100644 plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/resthandler/package-info.java
 delete mode 100644 plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/transport/TransportIndexCorrelationRuleAction.java
 delete mode 100644 plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/transport/package-info.java
 delete mode 100644 plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/settings/EventsCorrelationSettings.java
 delete mode 100644 plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/settings/package-info.java
 delete mode 100644 plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/CorrelationRuleIndices.java
 delete mode 100644 plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/IndexUtils.java
 delete mode 100644 plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/package-info.java
 delete mode 100644 plugins/events-correlation-engine/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec
 delete mode 100644 plugins/events-correlation-engine/src/main/resources/mappings/correlation-rules.json
 delete mode 100644 plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/EventsCorrelationPluginTests.java
 delete mode 100644 plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/CorrelationParamsContextTests.java
 delete mode 100644 plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/VectorFieldTests.java
 delete mode 100644 plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/CorrelationCodecTests.java
 delete mode 100644 plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapperTests.java
 delete mode 100644 plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilderTests.java
 delete mode 100644 plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/settings/EventsCorrelationSettingsTests.java

diff --git a/gradle/missing-javadoc.gradle b/gradle/missing-javadoc.gradle
index 751da941d25dd..5a98a60e806ea 100644
--- a/gradle/missing-javadoc.gradle
+++ b/gradle/missing-javadoc.gradle
@@ -170,7 +170,6 @@ configure([
   project(":libs:opensearch-common"),
   project(":libs:opensearch-core"),
   project(":libs:opensearch-compress"),
-  project(":plugins:events-correlation-engine"),
   project(":server")
 ]) {
   project.tasks.withType(MissingJavadocTask) {
diff --git a/plugins/events-correlation-engine/build.gradle b/plugins/events-correlation-engine/build.gradle
deleted file mode 100644
index c3eff30012b1d..0000000000000
--- a/plugins/events-correlation-engine/build.gradle
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- *
- * Modifications Copyright OpenSearch Contributors. See
- * GitHub history for details.
- */
-
-apply plugin: 'opensearch.java-rest-test'
-apply plugin: 'opensearch.internal-cluster-test'
-
-opensearchplugin {
-  description 'OpenSearch Events Correlation Engine.'
-  classname 'org.opensearch.plugin.correlation.EventsCorrelationPlugin'
-}
-
-dependencies {
-}
diff --git a/plugins/events-correlation-engine/src/internalClusterTest/java/org/opensearch/plugin/correlation/EventsCorrelationPluginTransportIT.java b/plugins/events-correlation-engine/src/internalClusterTest/java/org/opensearch/plugin/correlation/EventsCorrelationPluginTransportIT.java
deleted file mode 100644
index 028848a91213e..0000000000000
--- a/plugins/events-correlation-engine/src/internalClusterTest/java/org/opensearch/plugin/correlation/EventsCorrelationPluginTransportIT.java
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-package org.opensearch.plugin.correlation;
-
-import org.apache.lucene.search.join.ScoreMode;
-import org.opensearch.action.admin.cluster.node.info.NodeInfo;
-import org.opensearch.action.admin.cluster.node.info.NodesInfoRequest;
-import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse;
-import org.opensearch.action.admin.cluster.node.info.PluginsAndModules;
-import org.opensearch.action.search.SearchRequest;
-import org.opensearch.action.search.SearchResponse;
-import org.opensearch.core.rest.RestStatus;
-import org.opensearch.index.query.NestedQueryBuilder;
-import org.opensearch.index.query.QueryBuilders;
-import org.opensearch.plugin.correlation.rules.action.IndexCorrelationRuleAction;
-import org.opensearch.plugin.correlation.rules.action.IndexCorrelationRuleRequest;
-import org.opensearch.plugin.correlation.rules.action.IndexCorrelationRuleResponse;
-import org.opensearch.plugin.correlation.rules.model.CorrelationQuery;
-import org.opensearch.plugin.correlation.rules.model.CorrelationRule;
-import org.opensearch.plugins.Plugin;
-import org.opensearch.plugins.PluginInfo;
-import org.opensearch.rest.RestRequest;
-import org.opensearch.search.builder.SearchSourceBuilder;
-import org.opensearch.test.OpenSearchIntegTestCase;
-import org.junit.Assert;
-
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.function.Function;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-
-/**
- * Transport Action tests for events-correlation-plugin
- */
-public class EventsCorrelationPluginTransportIT extends OpenSearchIntegTestCase {
-
-    @Override
-    protected Collection<Class<? extends Plugin>> nodePlugins() {
-        return Arrays.asList(EventsCorrelationPlugin.class);
-    }
-
-    /**
-     * test events-correlation-plugin is installed
-     */
-    public void testPluginsAreInstalled() {
-        NodesInfoRequest nodesInfoRequest = new NodesInfoRequest();
-        nodesInfoRequest.addMetric(NodesInfoRequest.Metric.PLUGINS.metricName());
-        NodesInfoResponse nodesInfoResponse = OpenSearchIntegTestCase.client().admin().cluster().nodesInfo(nodesInfoRequest).actionGet();
-        List<PluginInfo> pluginInfos = nodesInfoResponse.getNodes()
-            .stream()
-            .flatMap(
-                (Function<NodeInfo, Stream<PluginInfo>>) nodeInfo -> nodeInfo.getInfo(PluginsAndModules.class).getPluginInfos().stream()
-            )
-            .collect(Collectors.toList());
-        Assert.assertTrue(
-            pluginInfos.stream()
-                .anyMatch(pluginInfo -> pluginInfo.getName().equals("org.opensearch.plugin.correlation.EventsCorrelationPlugin"))
-        );
-    }
-
-    /**
-     * test creating a correlation rule
-     * @throws Exception Exception
-     */
-    public void testCreatingACorrelationRule() throws Exception {
-        List<CorrelationQuery> correlationQueries = Arrays.asList(
-            new CorrelationQuery("s3_access_logs", "aws.cloudtrail.eventName:ReplicateObject", "@timestamp", List.of("s3")),
-            new CorrelationQuery("app_logs", "keywords:PermissionDenied", "@timestamp", List.of("others_application"))
-        );
-        CorrelationRule correlationRule = new CorrelationRule("s3 to app logs", correlationQueries);
-        IndexCorrelationRuleRequest request = new IndexCorrelationRuleRequest(correlationRule, RestRequest.Method.POST);
-
-        IndexCorrelationRuleResponse response = client().execute(IndexCorrelationRuleAction.INSTANCE, request).get();
-        Assert.assertEquals(RestStatus.CREATED, response.getStatus());
-
-        NestedQueryBuilder queryBuilder = QueryBuilders.nestedQuery(
-            "correlate",
-            QueryBuilders.matchQuery("correlate.index", "s3_access_logs"),
-            ScoreMode.None
-        );
-        SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
-        searchSourceBuilder.query(queryBuilder);
-        searchSourceBuilder.fetchSource(true);
-
-        SearchRequest searchRequest = new SearchRequest();
-        searchRequest.indices(CorrelationRule.CORRELATION_RULE_INDEX);
-        searchRequest.source(searchSourceBuilder);
-
-        SearchResponse searchResponse = client().search(searchRequest).get();
-        Assert.assertEquals(1L, searchResponse.getHits().getTotalHits().value);
-    }
-
-    /**
-     * test filtering correlation rules
-     * @throws Exception Exception
-     */
-    public void testFilteringCorrelationRules() throws Exception {
-        List<CorrelationQuery> correlationQueries1 = Arrays.asList(
-            new CorrelationQuery("s3_access_logs", "aws.cloudtrail.eventName:ReplicateObject", "@timestamp", List.of("s3")),
-            new CorrelationQuery("app_logs", "keywords:PermissionDenied", "@timestamp", List.of("others_application"))
-        );
-        CorrelationRule correlationRule1 = new CorrelationRule("s3 to app logs", correlationQueries1);
-        IndexCorrelationRuleRequest request1 = new IndexCorrelationRuleRequest(correlationRule1, RestRequest.Method.POST);
-        client().execute(IndexCorrelationRuleAction.INSTANCE, request1).get();
-
-        List<CorrelationQuery> correlationQueries2 = Arrays.asList(
-            new CorrelationQuery("windows", "host.hostname:EC2AMAZ*", "@timestamp", List.of("windows")),
-            new CorrelationQuery("app_logs", "endpoint:/customer_records.txt", "@timestamp", List.of("others_application"))
-        );
-        CorrelationRule correlationRule2 = new CorrelationRule("windows to app logs", correlationQueries2);
-        IndexCorrelationRuleRequest request2 = new IndexCorrelationRuleRequest(correlationRule2, RestRequest.Method.POST);
-        client().execute(IndexCorrelationRuleAction.INSTANCE, request2).get();
-
-        NestedQueryBuilder queryBuilder = QueryBuilders.nestedQuery(
-            "correlate",
-            QueryBuilders.matchQuery("correlate.index", "s3_access_logs"),
-            ScoreMode.None
-        );
-        SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
-        searchSourceBuilder.query(queryBuilder);
-        searchSourceBuilder.fetchSource(true);
-
-        SearchRequest searchRequest = new SearchRequest();
-        searchRequest.indices(CorrelationRule.CORRELATION_RULE_INDEX);
-        searchRequest.source(searchSourceBuilder);
-
-        SearchResponse searchResponse = client().search(searchRequest).get();
-        Assert.assertEquals(1L, searchResponse.getHits().getTotalHits().value);
-    }
-
-    /**
-     * test creating a correlation rule with no timestamp field
-     * @throws Exception Exception
-     */
-    @SuppressWarnings("unchecked")
-    public void testCreatingACorrelationRuleWithNoTimestampField() throws Exception {
-        List<CorrelationQuery> correlationQueries = Arrays.asList(
-            new CorrelationQuery("s3_access_logs", "aws.cloudtrail.eventName:ReplicateObject", null, List.of("s3")),
-            new CorrelationQuery("app_logs", "keywords:PermissionDenied", null, List.of("others_application"))
-        );
-        CorrelationRule correlationRule = new CorrelationRule("s3 to app logs", correlationQueries);
-        IndexCorrelationRuleRequest request = new IndexCorrelationRuleRequest(correlationRule, RestRequest.Method.POST);
-
-        IndexCorrelationRuleResponse response = client().execute(IndexCorrelationRuleAction.INSTANCE, request).get();
-        Assert.assertEquals(RestStatus.CREATED, response.getStatus());
-
-        NestedQueryBuilder queryBuilder = QueryBuilders.nestedQuery(
-            "correlate",
-            QueryBuilders.matchQuery("correlate.index", "s3_access_logs"),
-            ScoreMode.None
-        );
-        SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
-        searchSourceBuilder.query(queryBuilder);
-        searchSourceBuilder.fetchSource(true);
-
-        SearchRequest searchRequest = new SearchRequest();
-        searchRequest.indices(CorrelationRule.CORRELATION_RULE_INDEX);
-        searchRequest.source(searchSourceBuilder);
-
-        SearchResponse searchResponse = client().search(searchRequest).get();
-        Assert.assertEquals(1L, searchResponse.getHits().getTotalHits().value);
-        Assert.assertEquals(
-            "_timestamp",
-            ((List<Map<String, Object>>) (searchResponse.getHits().getHits()[0].getSourceAsMap().get("correlate"))).get(0)
-                .get("timestampField")
-        );
-    }
-}
diff --git a/plugins/events-correlation-engine/src/javaRestTest/java/org/opensearch/plugin/correlation/CorrelationVectorsEngineIT.java b/plugins/events-correlation-engine/src/javaRestTest/java/org/opensearch/plugin/correlation/CorrelationVectorsEngineIT.java
deleted file mode 100644
index 414fe1948f053..0000000000000
--- a/plugins/events-correlation-engine/src/javaRestTest/java/org/opensearch/plugin/correlation/CorrelationVectorsEngineIT.java
+++ /dev/null
@@ -1,312 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-package org.opensearch.plugin.correlation;
-
-import org.apache.hc.core5.http.Header;
-import org.apache.hc.core5.http.HttpEntity;
-import org.apache.lucene.index.VectorSimilarityFunction;
-import org.opensearch.client.Request;
-import org.opensearch.client.RequestOptions;
-import org.opensearch.client.Response;
-import org.opensearch.client.ResponseException;
-import org.opensearch.client.RestClient;
-import org.opensearch.client.WarningsHandler;
-import org.opensearch.common.settings.Settings;
-import org.opensearch.common.xcontent.XContentFactory;
-import org.opensearch.core.common.Strings;
-import org.opensearch.core.rest.RestStatus;
-import org.opensearch.core.xcontent.MediaTypeRegistry;
-import org.opensearch.core.xcontent.XContentBuilder;
-import org.opensearch.index.IndexSettings;
-import org.opensearch.test.rest.OpenSearchRestTestCase;
-import org.junit.Assert;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.function.Function;
-import java.util.stream.Collectors;
-
-/**
- * Correlation Vectors Engine e2e tests
- */
-public class CorrelationVectorsEngineIT extends OpenSearchRestTestCase {
-
-    private static final int DIMENSION = 4;
-    private static final String PROPERTIES_FIELD_NAME = "properties";
-    private static final String TYPE_FIELD_NAME = "type";
-    private static final String CORRELATION_VECTOR_TYPE = "correlation_vector";
-    private static final String DIMENSION_FIELD_NAME = "dimension";
-    private static final int M = 16;
-    private static final int EF_CONSTRUCTION = 128;
-    private static final String INDEX_NAME = "test-index-1";
-    private static final Float[][] TEST_VECTORS = new Float[][] {
-        { 1.0f, 1.0f, 1.0f, 1.0f },
-        { 2.0f, 2.0f, 2.0f, 2.0f },
-        { 3.0f, 3.0f, 3.0f, 3.0f } };
-    private static final float[][] TEST_QUERY_VECTORS = new float[][] {
-        { 1.0f, 1.0f, 1.0f, 1.0f },
-        { 2.0f, 2.0f, 2.0f, 2.0f },
-        { 3.0f, 3.0f, 3.0f, 3.0f } };
-    private static final Map<VectorSimilarityFunction, Function<Float, Float>> VECTOR_SIMILARITY_TO_SCORE = Map.of(
-        VectorSimilarityFunction.EUCLIDEAN,
-        (similarity) -> 1 / (1 + similarity),
-        VectorSimilarityFunction.DOT_PRODUCT,
-        (similarity) -> (1 + similarity) / 2,
-        VectorSimilarityFunction.COSINE,
-        (similarity) -> (1 + similarity) / 2
-    );
-
-    /**
-     * test the e2e storage and query layer of events-correlation-engine
-     * @throws IOException IOException
-     */
-    @SuppressWarnings("unchecked")
-    public void testQuery() throws IOException {
-        String textField = "text-field";
-        String luceneField = "lucene-field";
-        XContentBuilder builder = XContentFactory.jsonBuilder()
-            .startObject()
-            .startObject(PROPERTIES_FIELD_NAME)
-            .startObject(textField)
-            .field(TYPE_FIELD_NAME, "text")
-            .endObject()
-            .startObject(luceneField)
-            .field(TYPE_FIELD_NAME, CORRELATION_VECTOR_TYPE)
-            .field(DIMENSION_FIELD_NAME, DIMENSION)
-            .startObject("correlation_ctx")
-            .field("similarityFunction", VectorSimilarityFunction.EUCLIDEAN.name())
-            .startObject("parameters")
-            .field("m", M)
-            .field("ef_construction", EF_CONSTRUCTION)
-            .endObject()
-            .endObject()
-            .endObject()
-            .endObject()
-            .endObject();
-
-        String mapping = builder.toString();
-        createTestIndexWithMappingJson(client(), INDEX_NAME, mapping, getCorrelationDefaultIndexSettings());
-
-        for (int idx = 0; idx < TEST_VECTORS.length; ++idx) {
-            addCorrelationDoc(
-                INDEX_NAME,
-                String.valueOf(idx + 1),
-                List.of(textField, luceneField),
-                List.of(java.util.UUID.randomUUID().toString(), TEST_VECTORS[idx])
-            );
-        }
-        refreshAllIndices();
-        Assert.assertEquals(TEST_VECTORS.length, getDocCount(INDEX_NAME));
-
-        int k = 2;
-        for (float[] query : TEST_QUERY_VECTORS) {
-
-            String correlationQuery = "{\n"
-                + "  \"query\": {\n"
-                + "    \"correlation\": {\n"
-                + "      \"lucene-field\": {\n"
-                + "        \"vector\": \n"
-                + Arrays.toString(query)
-                + "        ,\n"
-                + "        \"k\": 2,\n"
-                + "        \"boost\": 1\n"
-                + "      }\n"
-                + "    }\n"
-                + "  }\n"
-                + "}";
-
-            Response response = searchCorrelationIndex(INDEX_NAME, correlationQuery, k);
-            Map<String, Object> responseBody = entityAsMap(response);
-            Assert.assertEquals(2, ((List<Object>) ((Map<String, Object>) responseBody.get("hits")).get("hits")).size());
-            @SuppressWarnings("unchecked")
-            double actualScore1 = Double.parseDouble(
-                ((List<Map<String, Object>>) ((Map<String, Object>) responseBody.get("hits")).get("hits")).get(0).get("_score").toString()
-            );
-            @SuppressWarnings("unchecked")
-            double actualScore2 = Double.parseDouble(
-                ((List<Map<String, Object>>) ((Map<String, Object>) responseBody.get("hits")).get("hits")).get(1).get("_score").toString()
-            );
-            @SuppressWarnings("unchecked")
-            List<Float> hit1 = ((Map<String, List<Double>>) ((List<Map<String, Object>>) ((Map<String, Object>) responseBody.get("hits"))
-                .get("hits")).get(0).get("_source")).get(luceneField).stream().map(Double::floatValue).collect(Collectors.toList());
-            float[] resultVector1 = new float[hit1.size()];
-            for (int i = 0; i < hit1.size(); ++i) {
-                resultVector1[i] = hit1.get(i);
-            }
-
-            @SuppressWarnings("unchecked")
-            List<Float> hit2 = ((Map<String, List<Double>>) ((List<Map<String, Object>>) ((Map<String, Object>) responseBody.get("hits"))
-                .get("hits")).get(1).get("_source")).get(luceneField).stream().map(Double::floatValue).collect(Collectors.toList());
-            float[] resultVector2 = new float[hit2.size()];
-            for (int i = 0; i < hit2.size(); ++i) {
-                resultVector2[i] = hit2.get(i);
-            }
-
-            double rawScore1 = VectorSimilarityFunction.EUCLIDEAN.compare(resultVector1, query);
-            Assert.assertEquals(rawScore1, actualScore1, 0.0001);
-            double rawScore2 = VectorSimilarityFunction.EUCLIDEAN.compare(resultVector2, query);
-            Assert.assertEquals(rawScore2, actualScore2, 0.0001);
-        }
-    }
-
-    /**
-     * unhappy test for the e2e storage and query layer of events-correlation-engine with no index exist
-     */
-    public void testQueryWithNoIndexExist() {
-        float[] query = new float[] { 1.0f, 1.0f, 1.0f, 1.0f };
-        String correlationQuery = "{\n"
-            + "  \"query\": {\n"
-            + "    \"correlation\": {\n"
-            + "      \"lucene-field\": {\n"
-            + "        \"vector\": \n"
-            + Arrays.toString(query)
-            + "        ,\n"
-            + "        \"k\": 2,\n"
-            + "        \"boost\": 1\n"
-            + "      }\n"
-            + "    }\n"
-            + "  }\n"
-            + "}";
-        Exception ex = assertThrows(ResponseException.class, () -> { searchCorrelationIndex(INDEX_NAME, correlationQuery, 2); });
-        String expectedMessage = String.format(Locale.ROOT, "no such index [%s]", INDEX_NAME);
-        String actualMessage = ex.getMessage();
-        Assert.assertTrue(actualMessage.contains(expectedMessage));
-    }
-
-    /**
-     * unhappy test for the e2e storage and query layer of events-correlation-engine with wrong mapping
-     */
-    public void testQueryWithWrongMapping() throws IOException {
-        String textField = "text-field";
-        String luceneField = "lucene-field";
-        XContentBuilder builder = XContentFactory.jsonBuilder()
-            .startObject()
-            .startObject(PROPERTIES_FIELD_NAME)
-            .startObject(textField)
-            .field(TYPE_FIELD_NAME, "text")
-            .endObject()
-            .startObject(luceneField)
-            .field(TYPE_FIELD_NAME, CORRELATION_VECTOR_TYPE)
-            .field("test", DIMENSION)
-            .startObject("correlation_ctx")
-            .field("similarityFunction", VectorSimilarityFunction.EUCLIDEAN.name())
-            .startObject("parameters")
-            .field("m", M)
-            .field("ef_construction", EF_CONSTRUCTION)
-            .endObject()
-            .endObject()
-            .endObject()
-            .endObject()
-            .endObject();
-
-        String mapping = builder.toString();
-        Exception ex = assertThrows(ResponseException.class, () -> {
-            createTestIndexWithMappingJson(client(), INDEX_NAME, mapping, getCorrelationDefaultIndexSettings());
-        });
-
-        String expectedMessage = String.format(
-            Locale.ROOT,
-            "unknown parameter [test] on mapper [%s] of type [correlation_vector]",
-            luceneField
-        );
-        String actualMessage = ex.getMessage();
-        Assert.assertTrue(actualMessage.contains(expectedMessage));
-    }
-
-    private String createTestIndexWithMappingJson(RestClient client, String index, String mapping, Settings settings) throws IOException {
-        Request request = new Request("PUT", "/" + index);
-        String entity = "{\"settings\": " + Strings.toString(MediaTypeRegistry.JSON, settings);
-        if (mapping != null) {
-            entity = entity + ",\"mappings\" : " + mapping;
-        }
-
-        entity = entity + "}";
-        if (!settings.getAsBoolean(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true)) {
-            expectSoftDeletesWarning(request, index);
-        }
-
-        request.setJsonEntity(entity);
-        client.performRequest(request);
-        return index;
-    }
-
-    private Settings getCorrelationDefaultIndexSettings() {
-        return Settings.builder().put("number_of_shards", 1).put("number_of_replicas", 0).put("index.correlation", true).build();
-    }
-
-    private void addCorrelationDoc(String index, String docId, List<String> fieldNames, List<Object> vectors) throws IOException {
-        Request request = new Request("POST", "/" + index + "/_doc/" + docId + "?refresh=true");
-
-        XContentBuilder builder = XContentFactory.jsonBuilder().startObject();
-        for (int i = 0; i < fieldNames.size(); i++) {
-            builder.field(fieldNames.get(i), vectors.get(i));
-        }
-        builder.endObject();
-
-        request.setJsonEntity(builder.toString());
-        Response response = client().performRequest(request);
-        assertEquals(request.getEndpoint() + ": failed", RestStatus.CREATED, RestStatus.fromCode(response.getStatusLine().getStatusCode()));
-    }
-
-    private Response searchCorrelationIndex(String index, String correlationQuery, int resultSize) throws IOException {
-        Request request = new Request("POST", "/" + index + "/_search");
-
-        request.addParameter("size", Integer.toString(resultSize));
-        request.addParameter("explain", Boolean.toString(true));
-        request.addParameter("search_type", "query_then_fetch");
-        request.setJsonEntity(correlationQuery);
-
-        Response response = client().performRequest(request);
-        Assert.assertEquals("Search failed", RestStatus.OK, restStatus(response));
-        return response;
-    }
-
-    private int getDocCount(String index) throws IOException {
-        Response response = makeRequest(
-            client(),
-            "GET",
-            String.format(Locale.getDefault(), "/%s/_count", index),
-            Collections.emptyMap(),
-            null
-        );
-        Assert.assertEquals(RestStatus.OK, restStatus(response));
-        return Integer.parseInt(entityAsMap(response).get("count").toString());
-    }
-
-    private Response makeRequest(
-        RestClient client,
-        String method,
-        String endpoint,
-        Map<String, String> params,
-        HttpEntity entity,
-        Header... headers
-    ) throws IOException {
-        Request request = new Request(method, endpoint);
-        RequestOptions.Builder options = RequestOptions.DEFAULT.toBuilder();
-        options.setWarningsHandler(WarningsHandler.PERMISSIVE);
-
-        for (Header header : headers) {
-            options.addHeader(header.getName(), header.getValue());
-        }
-        request.setOptions(options.build());
-        request.addParameters(params);
-        if (entity != null) {
-            request.setEntity(entity);
-        }
-        return client.performRequest(request);
-    }
-
-    private RestStatus restStatus(Response response) {
-        return RestStatus.fromCode(response.getStatusLine().getStatusCode());
-    }
-}
diff --git a/plugins/events-correlation-engine/src/javaRestTest/java/org/opensearch/plugin/correlation/EventsCorrelationPluginRestIT.java b/plugins/events-correlation-engine/src/javaRestTest/java/org/opensearch/plugin/correlation/EventsCorrelationPluginRestIT.java
deleted file mode 100644
index 3791a5cdf5db0..0000000000000
--- a/plugins/events-correlation-engine/src/javaRestTest/java/org/opensearch/plugin/correlation/EventsCorrelationPluginRestIT.java
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-package org.opensearch.plugin.correlation;
-
-import org.opensearch.action.search.SearchResponse;
-import org.opensearch.client.Request;
-import org.opensearch.client.Response;
-import org.opensearch.common.xcontent.LoggingDeprecationHandler;
-import org.opensearch.common.xcontent.json.JsonXContent;
-import org.opensearch.core.xcontent.NamedXContentRegistry;
-import org.opensearch.test.rest.OpenSearchRestTestCase;
-import org.junit.Assert;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-
-/**
- * Rest Action tests for events-correlation-plugin
- */
-public class EventsCorrelationPluginRestIT extends OpenSearchRestTestCase {
-
-    /**
-     * test events-correlation-plugin is installed
-     * @throws IOException IOException
-     */
-    @SuppressWarnings("unchecked")
-    public void testPluginsAreInstalled() throws IOException {
-        Request request = new Request("GET", "/_cat/plugins?s=component&h=name,component,version,description&format=json");
-        Response response = client().performRequest(request);
-        List<Object> pluginsList = JsonXContent.jsonXContent.createParser(
-            NamedXContentRegistry.EMPTY,
-            LoggingDeprecationHandler.INSTANCE,
-            response.getEntity().getContent()
-        ).list();
-        Assert.assertTrue(
-            pluginsList.stream()
-                .map(o -> (Map<String, Object>) o)
-                .anyMatch(plugin -> plugin.get("component").equals("events-correlation-engine"))
-        );
-    }
-
-    /**
-     * test creating a correlation rule
-     * @throws IOException IOException
-     */
-    public void testCreatingACorrelationRule() throws IOException {
-        Request request = new Request("POST", "/_correlation/rules");
-        request.setJsonEntity(sampleCorrelationRule());
-        Response response = client().performRequest(request);
-
-        Assert.assertEquals(201, response.getStatusLine().getStatusCode());
-
-        Map<String, Object> responseMap = entityAsMap(response);
-        String id = responseMap.get("_id").toString();
-
-        request = new Request("POST", "/.opensearch-correlation-rules-config/_search");
-        request.setJsonEntity(matchIdQuery(id));
-        response = client().performRequest(request);
-
-        Assert.assertEquals(200, response.getStatusLine().getStatusCode());
-        SearchResponse searchResponse = SearchResponse.fromXContent(
-            createParser(JsonXContent.jsonXContent, response.getEntity().getContent())
-        );
-        Assert.assertEquals(1L, searchResponse.getHits().getTotalHits().value);
-    }
-
-    /**
-     * test creating a correlation rule with no timestamp field
-     * @throws IOException IOException
-     */
-    @SuppressWarnings("unchecked")
-    public void testCreatingACorrelationRuleWithNoTimestampField() throws IOException {
-        Request request = new Request("POST", "/_correlation/rules");
-        request.setJsonEntity(sampleCorrelationRuleWithNoTimestamp());
-        Response response = client().performRequest(request);
-
-        Assert.assertEquals(201, response.getStatusLine().getStatusCode());
-
-        Map<String, Object> responseMap = entityAsMap(response);
-        String id = responseMap.get("_id").toString();
-
-        request = new Request("POST", "/.opensearch-correlation-rules-config/_search");
-        request.setJsonEntity(matchIdQuery(id));
-        response = client().performRequest(request);
-
-        Assert.assertEquals(200, response.getStatusLine().getStatusCode());
-        SearchResponse searchResponse = SearchResponse.fromXContent(
-            createParser(JsonXContent.jsonXContent, response.getEntity().getContent())
-        );
-        Assert.assertEquals(1L, searchResponse.getHits().getTotalHits().value);
-        Assert.assertEquals(
-            "_timestamp",
-            ((List<Map<String, Object>>) (searchResponse.getHits().getHits()[0].getSourceAsMap().get("correlate"))).get(0)
-                .get("timestampField")
-        );
-    }
-
-    private String sampleCorrelationRule() {
-        return "{\n"
-            + "  \"name\": \"s3 to app logs\",\n"
-            + "  \"correlate\": [\n"
-            + "    {\n"
-            + "      \"index\": \"s3_access_logs\",\n"
-            + "      \"query\": \"aws.cloudtrail.eventName:ReplicateObject\",\n"
-            + "      \"timestampField\": \"@timestamp\",\n"
-            + "      \"tags\": [\n"
-            + "        \"s3\"\n"
-            + "      ]\n"
-            + "    },\n"
-            + "    {\n"
-            + "      \"index\": \"app_logs\",\n"
-            + "      \"query\": \"keywords:PermissionDenied\",\n"
-            + "      \"timestampField\": \"@timestamp\",\n"
-            + "      \"tags\": [\n"
-            + "        \"others_application\"\n"
-            + "      ]\n"
-            + "    }\n"
-            + "  ]\n"
-            + "}";
-    }
-
-    private String sampleCorrelationRuleWithNoTimestamp() {
-        return "{\n"
-            + "  \"name\": \"s3 to app logs\",\n"
-            + "  \"correlate\": [\n"
-            + "    {\n"
-            + "      \"index\": \"s3_access_logs\",\n"
-            + "      \"query\": \"aws.cloudtrail.eventName:ReplicateObject\",\n"
-            + "      \"tags\": [\n"
-            + "        \"s3\"\n"
-            + "      ]\n"
-            + "    },\n"
-            + "    {\n"
-            + "      \"index\": \"app_logs\",\n"
-            + "      \"query\": \"keywords:PermissionDenied\",\n"
-            + "      \"tags\": [\n"
-            + "        \"others_application\"\n"
-            + "      ]\n"
-            + "    }\n"
-            + "  ]\n"
-            + "}";
-    }
-
-    private String matchIdQuery(String id) {
-        return "{\n" + "   \"query\" : {\n" + "     \"match\":{\n" + "        \"_id\": \"" + id + "\"\n" + "     }\n" + "   }\n" + "}";
-    }
-}
diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/EventsCorrelationPlugin.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/EventsCorrelationPlugin.java
deleted file mode 100644
index 9637042974d03..0000000000000
--- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/EventsCorrelationPlugin.java
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-package org.opensearch.plugin.correlation;
-
-import org.opensearch.action.ActionRequest;
-import org.opensearch.client.Client;
-import org.opensearch.cluster.metadata.IndexNameExpressionResolver;
-import org.opensearch.cluster.node.DiscoveryNodes;
-import org.opensearch.cluster.service.ClusterService;
-import org.opensearch.common.settings.ClusterSettings;
-import org.opensearch.common.settings.IndexScopedSettings;
-import org.opensearch.common.settings.Setting;
-import org.opensearch.common.settings.Settings;
-import org.opensearch.common.settings.SettingsFilter;
-import org.opensearch.core.action.ActionResponse;
-import org.opensearch.core.common.io.stream.NamedWriteableRegistry;
-import org.opensearch.core.xcontent.NamedXContentRegistry;
-import org.opensearch.env.Environment;
-import org.opensearch.env.NodeEnvironment;
-import org.opensearch.index.IndexSettings;
-import org.opensearch.index.codec.CodecServiceFactory;
-import org.opensearch.index.mapper.Mapper;
-import org.opensearch.plugin.correlation.core.index.codec.CorrelationCodecService;
-import org.opensearch.plugin.correlation.core.index.mapper.CorrelationVectorFieldMapper;
-import org.opensearch.plugin.correlation.core.index.mapper.VectorFieldMapper;
-import org.opensearch.plugin.correlation.core.index.query.CorrelationQueryBuilder;
-import org.opensearch.plugin.correlation.rules.action.IndexCorrelationRuleAction;
-import org.opensearch.plugin.correlation.rules.resthandler.RestIndexCorrelationRuleAction;
-import org.opensearch.plugin.correlation.rules.transport.TransportIndexCorrelationRuleAction;
-import org.opensearch.plugin.correlation.settings.EventsCorrelationSettings;
-import org.opensearch.plugin.correlation.utils.CorrelationRuleIndices;
-import org.opensearch.plugins.ActionPlugin;
-import org.opensearch.plugins.EnginePlugin;
-import org.opensearch.plugins.MapperPlugin;
-import org.opensearch.plugins.Plugin;
-import org.opensearch.plugins.SearchPlugin;
-import org.opensearch.repositories.RepositoriesService;
-import org.opensearch.rest.RestController;
-import org.opensearch.rest.RestHandler;
-import org.opensearch.script.ScriptService;
-import org.opensearch.threadpool.ThreadPool;
-import org.opensearch.watcher.ResourceWatcherService;
-
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-import java.util.function.Supplier;
-
-/**
- * Plugin class for events-correlation-engine
- */
-public class EventsCorrelationPlugin extends Plugin implements ActionPlugin, MapperPlugin, SearchPlugin, EnginePlugin {
-
-    /**
-     * events-correlation-engine base uri
-     */
-    public static final String PLUGINS_BASE_URI = "/_correlation";
-    /**
-     * events-correlation-engine rules uri
-     */
-    public static final String CORRELATION_RULES_BASE_URI = PLUGINS_BASE_URI + "/rules";
-
-    private CorrelationRuleIndices correlationRuleIndices;
-
-    /**
-     * Default constructor
-     */
-    public EventsCorrelationPlugin() {}
-
-    @Override
-    public Collection<Object> createComponents(
-        Client client,
-        ClusterService clusterService,
-        ThreadPool threadPool,
-        ResourceWatcherService resourceWatcherService,
-        ScriptService scriptService,
-        NamedXContentRegistry xContentRegistry,
-        Environment environment,
-        NodeEnvironment nodeEnvironment,
-        NamedWriteableRegistry namedWriteableRegistry,
-        IndexNameExpressionResolver indexNameExpressionResolver,
-        Supplier<RepositoriesService> repositoriesServiceSupplier
-    ) {
-        correlationRuleIndices = new CorrelationRuleIndices(client, clusterService);
-        return List.of(correlationRuleIndices);
-    }
-
-    @Override
-    public List<RestHandler> getRestHandlers(
-        Settings settings,
-        RestController restController,
-        ClusterSettings clusterSettings,
-        IndexScopedSettings indexScopedSettings,
-        SettingsFilter settingsFilter,
-        IndexNameExpressionResolver indexNameExpressionResolver,
-        Supplier<DiscoveryNodes> nodesInCluster
-    ) {
-        return List.of(new RestIndexCorrelationRuleAction());
-    }
-
-    @Override
-    public Map<String, Mapper.TypeParser> getMappers() {
-        return Collections.singletonMap(CorrelationVectorFieldMapper.CONTENT_TYPE, new VectorFieldMapper.TypeParser());
-    }
-
-    @Override
-    public Optional<CodecServiceFactory> getCustomCodecServiceFactory(IndexSettings indexSettings) {
-        if (indexSettings.getValue(EventsCorrelationSettings.IS_CORRELATION_INDEX_SETTING)) {
-            return Optional.of(CorrelationCodecService::new);
-        }
-        return Optional.empty();
-    }
-
-    @Override
-    public List<QuerySpec<?>> getQueries() {
-        return Collections.singletonList(
-            new QuerySpec<>(
-                CorrelationQueryBuilder.NAME_FIELD.getPreferredName(),
-                CorrelationQueryBuilder::new,
-                CorrelationQueryBuilder::parse
-            )
-        );
-    }
-
-    @Override
-    public List<ActionHandler<? extends ActionRequest, ? extends ActionResponse>> getActions() {
-        return List.of(new ActionPlugin.ActionHandler<>(IndexCorrelationRuleAction.INSTANCE, TransportIndexCorrelationRuleAction.class));
-    }
-
-    @Override
-    public List<Setting<?>> getSettings() {
-        return List.of(EventsCorrelationSettings.IS_CORRELATION_INDEX_SETTING, EventsCorrelationSettings.CORRELATION_TIME_WINDOW);
-    }
-}
diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/CorrelationParamsContext.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/CorrelationParamsContext.java
deleted file mode 100644
index fef9200a73091..0000000000000
--- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/CorrelationParamsContext.java
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-package org.opensearch.plugin.correlation.core.index;
-
-import org.apache.lucene.index.VectorSimilarityFunction;
-import org.opensearch.core.common.io.stream.StreamInput;
-import org.opensearch.core.common.io.stream.StreamOutput;
-import org.opensearch.core.common.io.stream.Writeable;
-import org.opensearch.core.xcontent.ToXContentFragment;
-import org.opensearch.core.xcontent.XContentBuilder;
-import org.opensearch.index.mapper.MapperParsingException;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Locale;
-import java.util.Map;
-
-/**
- * Defines vector similarity function, m and ef_construction hyper parameters field mappings for correlation_vector type.
- *
- * @opensearch.internal
- */
-public class CorrelationParamsContext implements ToXContentFragment, Writeable {
-
-    /**
-     * Vector Similarity Function field
-     */
-    public static final String VECTOR_SIMILARITY_FUNCTION = "similarityFunction";
-    /**
-     * Parameters field to define m and ef_construction
-     */
-    public static final String PARAMETERS = "parameters";
-
-    private final VectorSimilarityFunction similarityFunction;
-    private final Map<String, Object> parameters;
-
-    /**
-     * Parameterized ctor for CorrelationParamsContext
-     * @param similarityFunction Vector Similarity Function
-     * @param parameters Parameters to define m and ef_construction
-     */
-    public CorrelationParamsContext(VectorSimilarityFunction similarityFunction, Map<String, Object> parameters) {
-        this.similarityFunction = similarityFunction;
-        this.parameters = parameters;
-    }
-
-    /**
-     * Parameterized ctor for CorrelationParamsContext
-     * @param sin StreamInput
-     * @throws IOException IOException
-     */
-    public CorrelationParamsContext(StreamInput sin) throws IOException {
-        this.similarityFunction = VectorSimilarityFunction.valueOf(sin.readString());
-        if (sin.available() > 0) {
-            this.parameters = sin.readMap();
-        } else {
-            this.parameters = null;
-        }
-    }
-
-    /**
-     * Parse into CorrelationParamsContext
-     * @param in Object
-     * @return CorrelationParamsContext
-     */
-    public static CorrelationParamsContext parse(Object in) {
-        if (!(in instanceof Map<?, ?>)) {
-            throw new MapperParsingException("Unable to parse CorrelationParamsContext");
-        }
-
-        @SuppressWarnings("unchecked")
-        Map<String, Object> contextMap = (Map<String, Object>) in;
-        VectorSimilarityFunction similarityFunction = VectorSimilarityFunction.EUCLIDEAN;
-        Map<String, Object> parameters = new HashMap<>();
-
-        if (contextMap.containsKey(VECTOR_SIMILARITY_FUNCTION)) {
-            Object value = contextMap.get(VECTOR_SIMILARITY_FUNCTION);
-
-            if (value != null && !(value instanceof String)) {
-                throw new MapperParsingException(String.format(Locale.getDefault(), "%s must be a string", VECTOR_SIMILARITY_FUNCTION));
-            }
-
-            try {
-                similarityFunction = VectorSimilarityFunction.valueOf((String) value);
-            } catch (IllegalArgumentException ex) {
-                throw new MapperParsingException(String.format(Locale.getDefault(), "Invalid %s: %s", VECTOR_SIMILARITY_FUNCTION, value));
-            }
-        }
-        if (contextMap.containsKey(PARAMETERS)) {
-            Object value = contextMap.get(PARAMETERS);
-            if (!(value instanceof Map)) {
-                throw new MapperParsingException("Unable to parse parameters for Correlation context");
-            }
-
-            @SuppressWarnings("unchecked")
-            Map<String, Object> valueMap = (Map<String, Object>) value;
-            parameters.putAll(valueMap);
-        }
-        return new CorrelationParamsContext(similarityFunction, parameters);
-    }
-
-    @Override
-    public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
-        builder.startObject();
-        builder.field(VECTOR_SIMILARITY_FUNCTION, similarityFunction.name());
-        if (params == null) {
-            builder.field(PARAMETERS, (String) null);
-        } else {
-            builder.startObject(PARAMETERS);
-            for (Map.Entry<String, Object> parameter : parameters.entrySet()) {
-                builder.field(parameter.getKey(), parameter.getValue());
-            }
-            builder.endObject();
-        }
-        builder.endObject();
-        return builder;
-    }
-
-    @Override
-    public void writeTo(StreamOutput out) throws IOException {
-        out.writeString(similarityFunction.name());
-        if (this.parameters != null) {
-            out.writeMap(parameters);
-        }
-    }
-
-    /**
-     * get Vector Similarity Function
-     * @return Vector Similarity Function
-     */
-    public VectorSimilarityFunction getSimilarityFunction() {
-        return similarityFunction;
-    }
-
-    /**
-     * Get Parameters to define m and ef_construction
-     * @return Parameters to define m and ef_construction
-     */
-    public Map<String, Object> getParameters() {
-        return parameters;
-    }
-}
diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/VectorField.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/VectorField.java
deleted file mode 100644
index 61efd6b9a87ae..0000000000000
--- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/VectorField.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-package org.opensearch.plugin.correlation.core.index;
-
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexableFieldType;
-import org.apache.lucene.util.BytesRef;
-import org.opensearch.common.io.stream.BytesStreamOutput;
-
-import java.io.IOException;
-
-/**
- * Generic Vector Field defining a correlation vector name, float array.
- *
- * @opensearch.internal
- */
-public class VectorField extends Field {
-
-    /**
-     * Parameterized ctor for VectorField
-     * @param name name of the field
-     * @param value float array value for the field
-     * @param type type of the field
-     */
-    public VectorField(String name, float[] value, IndexableFieldType type) {
-        super(name, new BytesRef(), type);
-        try {
-            final byte[] floatToByte = floatToByteArray(value);
-            this.setBytesValue(floatToByte);
-        } catch (IOException ex) {
-            throw new RuntimeException(ex);
-        }
-    }
-
-    /**
-     * converts float array based vector to byte array.
-     * @param input float array
-     * @return byte array
-     */
-    protected static byte[] floatToByteArray(float[] input) throws IOException {
-        BytesStreamOutput objectStream = new BytesStreamOutput();
-        objectStream.writeFloatArray(input);
-        return objectStream.bytes().toBytesRef().bytes;
-    }
-}
diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/BasePerFieldCorrelationVectorsFormat.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/BasePerFieldCorrelationVectorsFormat.java
deleted file mode 100644
index 00b55eb75995c..0000000000000
--- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/BasePerFieldCorrelationVectorsFormat.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-package org.opensearch.plugin.correlation.core.index.codec;
-
-import org.apache.lucene.codecs.KnnVectorsFormat;
-import org.apache.lucene.codecs.perfield.PerFieldKnnVectorsFormat;
-import org.opensearch.index.mapper.MapperService;
-import org.opensearch.plugin.correlation.core.index.mapper.CorrelationVectorFieldMapper;
-
-import java.util.Locale;
-import java.util.Map;
-import java.util.Optional;
-import java.util.function.BiFunction;
-import java.util.function.Supplier;
-
-/**
- * Class to define the hyper-parameters m and ef_construction for insert and store of correlation vectors into HNSW graphs based lucene index.
- *
- * @opensearch.internal
- */
-public abstract class BasePerFieldCorrelationVectorsFormat extends PerFieldKnnVectorsFormat {
-    /**
-     * the hyper-parameters for constructing HNSW graphs.
-     * <a href="https://lucene.apache.org/core/9_4_0/core/org/apache/lucene/util/hnsw/HnswGraph.html">HnswGraph.html</a>
-     */
-    public static final String METHOD_PARAMETER_M = "m";
-    /**
-     * the hyper-parameters for constructing HNSW graphs.
-     * <a href="https://lucene.apache.org/core/9_4_0/core/org/apache/lucene/util/hnsw/HnswGraph.html">HnswGraph.html</a>
-     */
-    public static final String METHOD_PARAMETER_EF_CONSTRUCTION = "ef_construction";
-
-    private final Optional<MapperService> mapperService;
-    private final int defaultMaxConnections;
-    private final int defaultBeamWidth;
-    private final Supplier<KnnVectorsFormat> defaultFormatSupplier;
-    private final BiFunction<Integer, Integer, KnnVectorsFormat> formatSupplier;
-
-    /**
-     * Parameterized ctor of BasePerFieldCorrelationVectorsFormat
-     * @param mapperService mapper service
-     * @param defaultMaxConnections default m
-     * @param defaultBeamWidth default ef_construction
-     * @param defaultFormatSupplier default format supplier
-     * @param formatSupplier format supplier
-     */
-    public BasePerFieldCorrelationVectorsFormat(
-        Optional<MapperService> mapperService,
-        int defaultMaxConnections,
-        int defaultBeamWidth,
-        Supplier<KnnVectorsFormat> defaultFormatSupplier,
-        BiFunction<Integer, Integer, KnnVectorsFormat> formatSupplier
-    ) {
-        this.mapperService = mapperService;
-        this.defaultMaxConnections = defaultMaxConnections;
-        this.defaultBeamWidth = defaultBeamWidth;
-        this.defaultFormatSupplier = defaultFormatSupplier;
-        this.formatSupplier = formatSupplier;
-    }
-
-    @Override
-    public KnnVectorsFormat getKnnVectorsFormatForField(String field) {
-        if (!isCorrelationVectorFieldType(field)) {
-            return defaultFormatSupplier.get();
-        }
-
-        var type = (CorrelationVectorFieldMapper.CorrelationVectorFieldType) mapperService.orElseThrow(
-            () -> new IllegalArgumentException(
-                String.format(Locale.getDefault(), "Cannot read field type for field [%s] because mapper service is not available", field)
-            )
-        ).fieldType(field);
-
-        var params = type.getCorrelationParams().getParameters();
-        int maxConnections = getMaxConnections(params);
-        int beamWidth = getBeamWidth(params);
-
-        return formatSupplier.apply(maxConnections, beamWidth);
-    }
-
-    private boolean isCorrelationVectorFieldType(final String field) {
-        return mapperService.isPresent()
-            && mapperService.get().fieldType(field) instanceof CorrelationVectorFieldMapper.CorrelationVectorFieldType;
-    }
-
-    private int getMaxConnections(final Map<String, Object> params) {
-        if (params != null && params.containsKey(METHOD_PARAMETER_M)) {
-            return (int) params.get(METHOD_PARAMETER_M);
-        }
-        return defaultMaxConnections;
-    }
-
-    private int getBeamWidth(final Map<String, Object> params) {
-        if (params != null && params.containsKey(METHOD_PARAMETER_EF_CONSTRUCTION)) {
-            return (int) params.get(METHOD_PARAMETER_EF_CONSTRUCTION);
-        }
-        return defaultBeamWidth;
-    }
-}
diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/CorrelationCodecService.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/CorrelationCodecService.java
deleted file mode 100644
index 09d5e1d2c19e3..0000000000000
--- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/CorrelationCodecService.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-package org.opensearch.plugin.correlation.core.index.codec;
-
-import org.apache.lucene.codecs.Codec;
-import org.opensearch.index.codec.CodecService;
-import org.opensearch.index.codec.CodecServiceConfig;
-import org.opensearch.index.mapper.MapperService;
-
-/**
- * custom Correlation Codec Service
- *
- * @opensearch.internal
- */
-public class CorrelationCodecService extends CodecService {
-
-    private final MapperService mapperService;
-
-    /**
-     * Parameterized ctor for CorrelationCodecService
-     * @param codecServiceConfig Generic codec service config
-     */
-    public CorrelationCodecService(CodecServiceConfig codecServiceConfig) {
-        super(codecServiceConfig.getMapperService(), codecServiceConfig.getIndexSettings(), codecServiceConfig.getLogger());
-        mapperService = codecServiceConfig.getMapperService();
-    }
-
-    @Override
-    public Codec codec(String name) {
-        return CorrelationCodecVersion.current().getCorrelationCodecSupplier().apply(super.codec(name), mapperService);
-    }
-}
diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/CorrelationCodecVersion.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/CorrelationCodecVersion.java
deleted file mode 100644
index 9dbb695f14b78..0000000000000
--- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/CorrelationCodecVersion.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-package org.opensearch.plugin.correlation.core.index.codec;
-
-import org.apache.lucene.backward_codecs.lucene99.Lucene99Codec;
-import org.apache.lucene.codecs.Codec;
-import org.opensearch.index.mapper.MapperService;
-import org.opensearch.plugin.correlation.core.index.codec.correlation990.CorrelationCodec;
-import org.opensearch.plugin.correlation.core.index.codec.correlation990.PerFieldCorrelationVectorsFormat;
-
-import java.util.Optional;
-import java.util.function.BiFunction;
-import java.util.function.Supplier;
-
-/**
- * CorrelationCodecVersion enum
- *
- * @opensearch.internal
- */
-public enum CorrelationCodecVersion {
-    V_9_9_0(
-        "CorrelationCodec",
-        new Lucene99Codec(),
-        new PerFieldCorrelationVectorsFormat(Optional.empty()),
-        (userCodec, mapperService) -> new CorrelationCodec(userCodec, new PerFieldCorrelationVectorsFormat(Optional.of(mapperService))),
-        CorrelationCodec::new
-    );
-
-    private static final CorrelationCodecVersion CURRENT = V_9_9_0;
-    private final String codecName;
-    private final Codec defaultCodecDelegate;
-    private final PerFieldCorrelationVectorsFormat perFieldCorrelationVectorsFormat;
-    private final BiFunction<Codec, MapperService, Codec> correlationCodecSupplier;
-    private final Supplier<Codec> defaultCorrelationCodecSupplier;
-
-    CorrelationCodecVersion(
-        String codecName,
-        Codec defaultCodecDelegate,
-        PerFieldCorrelationVectorsFormat perFieldCorrelationVectorsFormat,
-        BiFunction<Codec, MapperService, Codec> correlationCodecSupplier,
-        Supplier<Codec> defaultCorrelationCodecSupplier
-    ) {
-        this.codecName = codecName;
-        this.defaultCodecDelegate = defaultCodecDelegate;
-        this.perFieldCorrelationVectorsFormat = perFieldCorrelationVectorsFormat;
-        this.correlationCodecSupplier = correlationCodecSupplier;
-        this.defaultCorrelationCodecSupplier = defaultCorrelationCodecSupplier;
-    }
-
-    /**
-     * get codec name
-     * @return codec name
-     */
-    public String getCodecName() {
-        return codecName;
-    }
-
-    /**
-     * get default codec delegate
-     * @return default codec delegate
-     */
-    public Codec getDefaultCodecDelegate() {
-        return defaultCodecDelegate;
-    }
-
-    /**
-     * get correlation vectors format
-     * @return correlation vectors format
-     */
-    public PerFieldCorrelationVectorsFormat getPerFieldCorrelationVectorsFormat() {
-        return perFieldCorrelationVectorsFormat;
-    }
-
-    /**
-     * get correlation codec supplier
-     * @return correlation codec supplier
-     */
-    public BiFunction<Codec, MapperService, Codec> getCorrelationCodecSupplier() {
-        return correlationCodecSupplier;
-    }
-
-    /**
-     * get default correlation codec supplier
-     * @return default correlation codec supplier
-     */
-    public Supplier<Codec> getDefaultCorrelationCodecSupplier() {
-        return defaultCorrelationCodecSupplier;
-    }
-
-    /**
-     * static method to get correlation codec version
-     * @return correlation codec version
-     */
-    public static final CorrelationCodecVersion current() {
-        return CURRENT;
-    }
-}
diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/CorrelationCodec.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/CorrelationCodec.java
deleted file mode 100644
index 022972e2e06c3..0000000000000
--- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/CorrelationCodec.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-package org.opensearch.plugin.correlation.core.index.codec.correlation990;
-
-import org.apache.lucene.codecs.Codec;
-import org.apache.lucene.codecs.FilterCodec;
-import org.apache.lucene.codecs.KnnVectorsFormat;
-import org.opensearch.plugin.correlation.core.index.codec.CorrelationCodecVersion;
-
-/**
- * Correlation Codec class
- *
- * @opensearch.internal
- */
-public class CorrelationCodec extends FilterCodec {
-    private static final CorrelationCodecVersion VERSION = CorrelationCodecVersion.V_9_9_0;
-    private final PerFieldCorrelationVectorsFormat perFieldCorrelationVectorsFormat;
-
-    /**
-     * ctor for CorrelationCodec
-     */
-    public CorrelationCodec() {
-        this(VERSION.getDefaultCodecDelegate(), VERSION.getPerFieldCorrelationVectorsFormat());
-    }
-
-    /**
-     * Parameterized ctor for CorrelationCodec
-     * @param delegate codec delegate
-     * @param perFieldCorrelationVectorsFormat correlation vectors format
-     */
-    public CorrelationCodec(Codec delegate, PerFieldCorrelationVectorsFormat perFieldCorrelationVectorsFormat) {
-        super(VERSION.getCodecName(), delegate);
-        this.perFieldCorrelationVectorsFormat = perFieldCorrelationVectorsFormat;
-    }
-
-    @Override
-    public KnnVectorsFormat knnVectorsFormat() {
-        return perFieldCorrelationVectorsFormat;
-    }
-}
diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/PerFieldCorrelationVectorsFormat.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/PerFieldCorrelationVectorsFormat.java
deleted file mode 100644
index 89cc0b614a1a5..0000000000000
--- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/PerFieldCorrelationVectorsFormat.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-package org.opensearch.plugin.correlation.core.index.codec.correlation990;
-
-import org.apache.lucene.codecs.lucene99.Lucene99HnswVectorsFormat;
-import org.opensearch.index.mapper.MapperService;
-import org.opensearch.plugin.correlation.core.index.codec.BasePerFieldCorrelationVectorsFormat;
-
-import java.util.Optional;
-
-/**
- * Class to define the hyper-parameters m and ef_construction for insert and store of correlation vectors into HNSW graphs based lucene index.
- */
-public class PerFieldCorrelationVectorsFormat extends BasePerFieldCorrelationVectorsFormat {
-
-    /**
-     * Parameterized ctor for PerFieldCorrelationVectorsFormat
-     * @param mapperService mapper service
-     */
-    public PerFieldCorrelationVectorsFormat(final Optional<MapperService> mapperService) {
-        super(
-            mapperService,
-            Lucene99HnswVectorsFormat.DEFAULT_MAX_CONN,
-            Lucene99HnswVectorsFormat.DEFAULT_BEAM_WIDTH,
-            Lucene99HnswVectorsFormat::new,
-            Lucene99HnswVectorsFormat::new
-        );
-    }
-}
diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/package-info.java
deleted file mode 100644
index fc2a9de58a73a..0000000000000
--- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/package-info.java
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-/**
- * custom Lucene9.5 codec package for events-correlation-engine
- */
-package org.opensearch.plugin.correlation.core.index.codec.correlation990;
diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/package-info.java
deleted file mode 100644
index 862b7cd253f04..0000000000000
--- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/package-info.java
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-/**
- * custom codec package for events-correlation-engine
- */
-package org.opensearch.plugin.correlation.core.index.codec;
diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapper.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapper.java
deleted file mode 100644
index 18c9dd222e2cf..0000000000000
--- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapper.java
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-package org.opensearch.plugin.correlation.core.index.mapper;
-
-import org.apache.lucene.codecs.KnnVectorsFormat;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.KnnFloatVectorField;
-import org.apache.lucene.document.StoredField;
-import org.apache.lucene.index.DocValuesType;
-import org.apache.lucene.index.VectorSimilarityFunction;
-import org.opensearch.common.Explicit;
-import org.opensearch.index.mapper.FieldMapper;
-import org.opensearch.index.mapper.ParseContext;
-import org.opensearch.plugin.correlation.core.index.CorrelationParamsContext;
-import org.opensearch.plugin.correlation.core.index.VectorField;
-
-import java.io.IOException;
-import java.util.Locale;
-import java.util.Optional;
-
-/**
- * Field mapper for the correlation vector type
- *
- * @opensearch.internal
- */
-public class CorrelationVectorFieldMapper extends VectorFieldMapper {
-
-    private static final int LUCENE_MAX_DIMENSION = KnnVectorsFormat.DEFAULT_MAX_DIMENSIONS;
-
-    private final FieldType vectorFieldType;
-
-    /**
-     * Parameterized ctor for CorrelationVectorFieldMapper
-     * @param input Object containing name of the field, type and other details.
-     */
-    public CorrelationVectorFieldMapper(final CreateLuceneFieldMapperInput input) {
-        super(
-            input.getName(),
-            input.getMappedFieldType(),
-            input.getMultiFields(),
-            input.getCopyTo(),
-            input.getIgnoreMalformed(),
-            input.isStored(),
-            input.isHasDocValues()
-        );
-
-        this.correlationParams = input.getCorrelationParams();
-        final VectorSimilarityFunction vectorSimilarityFunction = this.correlationParams.getSimilarityFunction();
-
-        final int dimension = input.getMappedFieldType().getDimension();
-        if (dimension > LUCENE_MAX_DIMENSION) {
-            throw new IllegalArgumentException(
-                String.format(
-                    Locale.ROOT,
-                    "Dimension value cannot be greater than [%s] but got [%s] for vector [%s]",
-                    LUCENE_MAX_DIMENSION,
-                    dimension,
-                    input.getName()
-                )
-            );
-        }
-
-        this.fieldType = KnnFloatVectorField.createFieldType(dimension, vectorSimilarityFunction);
-
-        if (this.hasDocValues) {
-            this.vectorFieldType = buildDocValuesFieldType();
-        } else {
-            this.vectorFieldType = null;
-        }
-    }
-
-    private static FieldType buildDocValuesFieldType() {
-        FieldType field = new FieldType();
-        field.setDocValuesType(DocValuesType.BINARY);
-        field.freeze();
-        return field;
-    }
-
-    @Override
-    protected void parseCreateField(ParseContext context, int dimension) throws IOException {
-        Optional<float[]> arrayOptional = getFloatsFromContext(context, dimension);
-
-        if (arrayOptional.isEmpty()) {
-            return;
-        }
-        final float[] array = arrayOptional.get();
-
-        KnnFloatVectorField point = new KnnFloatVectorField(name(), array, fieldType);
-
-        context.doc().add(point);
-        if (fieldType.stored()) {
-            context.doc().add(new StoredField(name(), point.toString()));
-        }
-        if (hasDocValues && vectorFieldType != null) {
-            context.doc().add(new VectorField(name(), array, vectorFieldType));
-        }
-        context.path().remove();
-    }
-
-    static class CreateLuceneFieldMapperInput {
-        String name;
-
-        CorrelationVectorFieldType mappedFieldType;
-
-        FieldMapper.MultiFields multiFields;
-
-        FieldMapper.CopyTo copyTo;
-
-        Explicit<Boolean> ignoreMalformed;
-        boolean stored;
-        boolean hasDocValues;
-
-        CorrelationParamsContext correlationParams;
-
-        public CreateLuceneFieldMapperInput(
-            String name,
-            CorrelationVectorFieldType mappedFieldType,
-            FieldMapper.MultiFields multiFields,
-            FieldMapper.CopyTo copyTo,
-            Explicit<Boolean> ignoreMalformed,
-            boolean stored,
-            boolean hasDocValues,
-            CorrelationParamsContext correlationParams
-        ) {
-            this.name = name;
-            this.mappedFieldType = mappedFieldType;
-            this.multiFields = multiFields;
-            this.copyTo = copyTo;
-            this.ignoreMalformed = ignoreMalformed;
-            this.stored = stored;
-            this.hasDocValues = hasDocValues;
-            this.correlationParams = correlationParams;
-        }
-
-        public String getName() {
-            return name;
-        }
-
-        public CorrelationVectorFieldType getMappedFieldType() {
-            return mappedFieldType;
-        }
-
-        public FieldMapper.MultiFields getMultiFields() {
-            return multiFields;
-        }
-
-        public FieldMapper.CopyTo getCopyTo() {
-            return copyTo;
-        }
-
-        public Explicit<Boolean> getIgnoreMalformed() {
-            return ignoreMalformed;
-        }
-
-        public boolean isStored() {
-            return stored;
-        }
-
-        public boolean isHasDocValues() {
-            return hasDocValues;
-        }
-
-        public CorrelationParamsContext getCorrelationParams() {
-            return correlationParams;
-        }
-    }
-}
diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/VectorFieldMapper.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/VectorFieldMapper.java
deleted file mode 100644
index 5ac6d92792295..0000000000000
--- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/VectorFieldMapper.java
+++ /dev/null
@@ -1,399 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-package org.opensearch.plugin.correlation.core.index.mapper;
-
-import org.apache.lucene.search.FieldExistsQuery;
-import org.apache.lucene.search.Query;
-import org.opensearch.common.Explicit;
-import org.opensearch.common.xcontent.support.XContentMapValues;
-import org.opensearch.core.xcontent.XContentBuilder;
-import org.opensearch.core.xcontent.XContentParser;
-import org.opensearch.index.mapper.FieldMapper;
-import org.opensearch.index.mapper.MappedFieldType;
-import org.opensearch.index.mapper.Mapper;
-import org.opensearch.index.mapper.MapperParsingException;
-import org.opensearch.index.mapper.ParametrizedFieldMapper;
-import org.opensearch.index.mapper.ParseContext;
-import org.opensearch.index.mapper.TextSearchInfo;
-import org.opensearch.index.mapper.ValueFetcher;
-import org.opensearch.index.query.QueryShardContext;
-import org.opensearch.index.query.QueryShardException;
-import org.opensearch.plugin.correlation.core.index.CorrelationParamsContext;
-import org.opensearch.search.lookup.SearchLookup;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Optional;
-
-/**
- * Parameterized field mapper for Correlation Vector type
- *
- * @opensearch.internal
- */
-public abstract class VectorFieldMapper extends ParametrizedFieldMapper {
-
-    /**
-     * name of Correlation Vector type
-     */
-    public static final String CONTENT_TYPE = "correlation_vector";
-    /**
-     * dimension of the correlation vectors
-     */
-    public static final String DIMENSION = "dimension";
-    /**
-     * context e.g. parameters and vector similarity function of Correlation Vector type
-     */
-    public static final String CORRELATION_CONTEXT = "correlation_ctx";
-
-    private static VectorFieldMapper toType(FieldMapper in) {
-        return (VectorFieldMapper) in;
-    }
-
-    /**
-     * definition of VectorFieldMapper.Builder
-     */
-    public static class Builder extends ParametrizedFieldMapper.Builder {
-        protected Boolean ignoreMalformed;
-
-        protected final Parameter<Boolean> stored = Parameter.boolParam("store", false, m -> toType(m).stored, false);
-        protected final Parameter<Boolean> hasDocValues = Parameter.boolParam("doc_values", false, m -> toType(m).hasDocValues, true);
-        protected final Parameter<Integer> dimension = new Parameter<>(DIMENSION, false, () -> -1, (n, c, o) -> {
-            if (o == null) {
-                throw new IllegalArgumentException("Dimension cannot be null");
-            }
-            int value;
-            try {
-                value = XContentMapValues.nodeIntegerValue(o);
-            } catch (Exception ex) {
-                throw new IllegalArgumentException(
-                    String.format(Locale.getDefault(), "Unable to parse [dimension] from provided value [%s] for vector [%s]", o, name)
-                );
-            }
-            if (value <= 0) {
-                throw new IllegalArgumentException(
-                    String.format(Locale.getDefault(), "Dimension value must be greater than 0 for vector: %s", name)
-                );
-            }
-            return value;
-        }, m -> toType(m).dimension);
-
-        protected final Parameter<CorrelationParamsContext> correlationParamsContext = new Parameter<>(
-            CORRELATION_CONTEXT,
-            false,
-            () -> null,
-            (n, c, o) -> CorrelationParamsContext.parse(o),
-            m -> toType(m).correlationParams
-        );
-
-        protected final Parameter<Map<String, String>> meta = Parameter.metaParam();
-
-        /**
-         * Parameterized ctor for VectorFieldMapper.Builder
-         * @param name name
-         */
-        public Builder(String name) {
-            super(name);
-        }
-
-        @Override
-        protected List<Parameter<?>> getParameters() {
-            return Arrays.asList(stored, hasDocValues, dimension, meta, correlationParamsContext);
-        }
-
-        protected Explicit<Boolean> ignoreMalformed(BuilderContext context) {
-            if (ignoreMalformed != null) {
-                return new Explicit<>(ignoreMalformed, true);
-            }
-            if (context.indexSettings() != null) {
-                return new Explicit<>(IGNORE_MALFORMED_SETTING.get(context.indexSettings()), false);
-            }
-            return Defaults.IGNORE_MALFORMED;
-        }
-
-        @Override
-        public ParametrizedFieldMapper build(BuilderContext context) {
-            final CorrelationParamsContext correlationParams = correlationParamsContext.getValue();
-            final MultiFields multiFieldsBuilder = this.multiFieldsBuilder.build(this, context);
-            final CopyTo copyToBuilder = copyTo.build();
-            final Explicit<Boolean> ignoreMalformed = ignoreMalformed(context);
-            final Map<String, String> metaValue = meta.getValue();
-
-            final CorrelationVectorFieldType mappedFieldType = new CorrelationVectorFieldType(
-                buildFullName(context),
-                metaValue,
-                dimension.getValue(),
-                correlationParams
-            );
-
-            CorrelationVectorFieldMapper.CreateLuceneFieldMapperInput createLuceneFieldMapperInput =
-                new CorrelationVectorFieldMapper.CreateLuceneFieldMapperInput(
-                    name,
-                    mappedFieldType,
-                    multiFieldsBuilder,
-                    copyToBuilder,
-                    ignoreMalformed,
-                    stored.get(),
-                    hasDocValues.get(),
-                    correlationParams
-                );
-            return new CorrelationVectorFieldMapper(createLuceneFieldMapperInput);
-        }
-    }
-
-    /**
-     * deifintion of VectorFieldMapper.TypeParser
-     */
-    public static class TypeParser implements Mapper.TypeParser {
-
-        /**
-         * default constructor of VectorFieldMapper.TypeParser
-         */
-        public TypeParser() {}
-
-        @Override
-        public Mapper.Builder<?> parse(String name, Map<String, Object> node, ParserContext context) throws MapperParsingException {
-            Builder builder = new VectorFieldMapper.Builder(name);
-            builder.parse(name, context, node);
-
-            if (builder.dimension.getValue() == -1) {
-                throw new IllegalArgumentException(String.format(Locale.getDefault(), "Dimension value missing for vector: %s", name));
-            }
-            return builder;
-        }
-    }
-
-    /**
-     * deifintion of VectorFieldMapper.CorrelationVectorFieldType
-     */
-    public static class CorrelationVectorFieldType extends MappedFieldType {
-        int dimension;
-        CorrelationParamsContext correlationParams;
-
-        /**
-         * Parameterized ctor for VectorFieldMapper.CorrelationVectorFieldType
-         * @param name name of the field
-         * @param meta meta of the field
-         * @param dimension dimension of the field
-         */
-        public CorrelationVectorFieldType(String name, Map<String, String> meta, int dimension) {
-            this(name, meta, dimension, null);
-        }
-
-        /**
-         * Parameterized ctor for VectorFieldMapper.CorrelationVectorFieldType
-         * @param name name of the field
-         * @param meta meta of the field
-         * @param dimension dimension of the field
-         * @param correlationParams correlation params for the field
-         */
-        public CorrelationVectorFieldType(
-            String name,
-            Map<String, String> meta,
-            int dimension,
-            CorrelationParamsContext correlationParams
-        ) {
-            super(name, false, false, true, TextSearchInfo.NONE, meta);
-            this.dimension = dimension;
-            this.correlationParams = correlationParams;
-        }
-
-        @Override
-        public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String s) {
-            throw new UnsupportedOperationException("Correlation Vector do not support fields search");
-        }
-
-        @Override
-        public String typeName() {
-            return CONTENT_TYPE;
-        }
-
-        @Override
-        public Query existsQuery(QueryShardContext context) {
-            return new FieldExistsQuery(name());
-        }
-
-        @Override
-        public Query termQuery(Object o, QueryShardContext context) {
-            throw new QueryShardException(
-                context,
-                String.format(
-                    Locale.getDefault(),
-                    "Correlation vector do not support exact searching, use Correlation queries instead: [%s]",
-                    name()
-                )
-            );
-        }
-
-        /**
-         * get dimension
-         * @return dimension
-         */
-        public int getDimension() {
-            return dimension;
-        }
-
-        /**
-         * get correlation params
-         * @return correlation params
-         */
-        public CorrelationParamsContext getCorrelationParams() {
-            return correlationParams;
-        }
-    }
-
-    protected Explicit<Boolean> ignoreMalformed;
-    protected boolean stored;
-    protected boolean hasDocValues;
-    protected Integer dimension;
-    protected CorrelationParamsContext correlationParams;
-
-    /**
-     * Parameterized ctor for VectorFieldMapper
-     * @param simpleName name of field
-     * @param mappedFieldType field type of field
-     * @param multiFields multi fields
-     * @param copyTo copy to
-     * @param ignoreMalformed ignore malformed
-     * @param stored stored field
-     * @param hasDocValues has doc values
-     */
-    public VectorFieldMapper(
-        String simpleName,
-        CorrelationVectorFieldType mappedFieldType,
-        FieldMapper.MultiFields multiFields,
-        FieldMapper.CopyTo copyTo,
-        Explicit<Boolean> ignoreMalformed,
-        boolean stored,
-        boolean hasDocValues
-    ) {
-        super(simpleName, mappedFieldType, multiFields, copyTo);
-        this.ignoreMalformed = ignoreMalformed;
-        this.stored = stored;
-        this.hasDocValues = hasDocValues;
-        this.dimension = mappedFieldType.getDimension();
-    }
-
-    @Override
-    protected VectorFieldMapper clone() {
-        return (VectorFieldMapper) super.clone();
-    }
-
-    @Override
-    protected String contentType() {
-        return CONTENT_TYPE;
-    }
-
-    @Override
-    protected void parseCreateField(ParseContext parseContext) throws IOException {
-        parseCreateField(parseContext, fieldType().getDimension());
-    }
-
-    protected abstract void parseCreateField(ParseContext parseContext, int dimension) throws IOException;
-
-    Optional<float[]> getFloatsFromContext(ParseContext context, int dimension) throws IOException {
-        context.path().add(simpleName());
-
-        List<Float> vector = new ArrayList<>();
-        XContentParser.Token token = context.parser().currentToken();
-        float value;
-        if (token == XContentParser.Token.START_ARRAY) {
-            token = context.parser().nextToken();
-            while (token != XContentParser.Token.END_ARRAY) {
-                value = context.parser().floatValue();
-
-                if (Float.isNaN(value)) {
-                    throw new IllegalArgumentException("Correlation vector values cannot be NaN");
-                }
-
-                if (Float.isInfinite(value)) {
-                    throw new IllegalArgumentException("Correlation vector values cannot be infinity");
-                }
-                vector.add(value);
-                token = context.parser().nextToken();
-            }
-        } else if (token == XContentParser.Token.VALUE_NUMBER) {
-            value = context.parser().floatValue();
-            if (Float.isNaN(value)) {
-                throw new IllegalArgumentException("Correlation vector values cannot be NaN");
-            }
-
-            if (Float.isInfinite(value)) {
-                throw new IllegalArgumentException("Correlation vector values cannot be infinity");
-            }
-            vector.add(value);
-            context.parser().nextToken();
-        } else if (token == XContentParser.Token.VALUE_NULL) {
-            context.path().remove();
-            return Optional.empty();
-        }
-
-        if (dimension != vector.size()) {
-            String errorMessage = String.format(
-                Locale.ROOT,
-                "Vector dimension mismatch. Expected: %d, Given: %d",
-                dimension,
-                vector.size()
-            );
-            throw new IllegalArgumentException(errorMessage);
-        }
-
-        float[] array = new float[vector.size()];
-        int i = 0;
-        for (Float f : vector) {
-            array[i++] = f;
-        }
-        return Optional.of(array);
-    }
-
-    @Override
-    protected boolean docValuesByDefault() {
-        return true;
-    }
-
-    @Override
-    public ParametrizedFieldMapper.Builder getMergeBuilder() {
-        return new VectorFieldMapper.Builder(simpleName()).init(this);
-    }
-
-    @Override
-    public boolean parsesArrayValue() {
-        return true;
-    }
-
-    @Override
-    public CorrelationVectorFieldType fieldType() {
-        return (CorrelationVectorFieldType) super.fieldType();
-    }
-
-    @Override
-    protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
-        super.doXContentBody(builder, includeDefaults, params);
-        if (includeDefaults || ignoreMalformed.explicit()) {
-            builder.field(Names.IGNORE_MALFORMED, ignoreMalformed.value());
-        }
-    }
-
-    /**
-     * Class for constants used in parent class VectorFieldMapper
-     */
-    public static class Names {
-        public static final String IGNORE_MALFORMED = "ignore_malformed";
-    }
-
-    /**
-     * Class for constants used in parent class VectorFieldMapper
-     */
-    public static class Defaults {
-        public static final Explicit<Boolean> IGNORE_MALFORMED = new Explicit<>(false, false);
-    }
-}
diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/package-info.java
deleted file mode 100644
index 4fdc622c3d886..0000000000000
--- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/package-info.java
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-/**
- * correlation field mapper package
- */
-package org.opensearch.plugin.correlation.core.index.mapper;
diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/package-info.java
deleted file mode 100644
index cfc0ffdfa81f1..0000000000000
--- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/package-info.java
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-/**
- * package to wrap Lucene KnnFloatVectorField and KnnFloatVectorQuery for Opensearch events-correlation-engine
- */
-package org.opensearch.plugin.correlation.core.index;
diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilder.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilder.java
deleted file mode 100644
index e95b68e855cca..0000000000000
--- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilder.java
+++ /dev/null
@@ -1,332 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-package org.opensearch.plugin.correlation.core.index.query;
-
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.apache.lucene.search.Query;
-import org.opensearch.core.ParseField;
-import org.opensearch.core.common.ParsingException;
-import org.opensearch.core.common.Strings;
-import org.opensearch.core.common.io.stream.StreamInput;
-import org.opensearch.core.common.io.stream.StreamOutput;
-import org.opensearch.core.xcontent.XContentBuilder;
-import org.opensearch.core.xcontent.XContentParser;
-import org.opensearch.index.mapper.MappedFieldType;
-import org.opensearch.index.mapper.NumberFieldMapper;
-import org.opensearch.index.query.AbstractQueryBuilder;
-import org.opensearch.index.query.QueryBuilder;
-import org.opensearch.index.query.QueryShardContext;
-import org.opensearch.index.query.WithFieldName;
-import org.opensearch.plugin.correlation.core.index.mapper.VectorFieldMapper;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Locale;
-import java.util.Objects;
-
-/**
- * Constructs a query to get correlated events or documents for a particular event or document.
- *
- * @opensearch.internal
- */
-public class CorrelationQueryBuilder extends AbstractQueryBuilder<CorrelationQueryBuilder> implements WithFieldName {
-
-    private static final Logger log = LogManager.getLogger(CorrelationQueryBuilder.class);
-    protected static final ParseField VECTOR_FIELD = new ParseField("vector");
-    protected static final ParseField K_FIELD = new ParseField("k");
-    protected static final ParseField FILTER_FIELD = new ParseField("filter");
-    /**
-     * max number of neighbors that can be retrieved.
-     */
-    public static int K_MAX = 10000;
-
-    /**
-     * name of the query
-     */
-    public static final ParseField NAME_FIELD = new ParseField("correlation");
-
-    private String fieldName;
-    private float[] vector;
-    private int k = 0;
-    private double boost;
-    private QueryBuilder filter;
-
-    private CorrelationQueryBuilder() {}
-
-    /**
-     * parameterized ctor for CorrelationQueryBuilder
-     * @param fieldName field name for query
-     * @param vector query vector
-     * @param k number of nearby neighbors
-     */
-    public CorrelationQueryBuilder(String fieldName, float[] vector, int k) {
-        this(fieldName, vector, k, null);
-    }
-
-    /**
-     * parameterized ctor for CorrelationQueryBuilder
-     * @param fieldName field name for query
-     * @param vector query vector
-     * @param k number of nearby neighbors
-     * @param filter optional filter query
-     */
-    public CorrelationQueryBuilder(String fieldName, float[] vector, int k, QueryBuilder filter) {
-        if (Strings.isNullOrEmpty(fieldName)) {
-            throw new IllegalArgumentException(
-                String.format(Locale.getDefault(), "[%s] requires fieldName", NAME_FIELD.getPreferredName())
-            );
-        }
-        if (vector == null) {
-            throw new IllegalArgumentException(
-                String.format(Locale.getDefault(), "[%s] requires query vector", NAME_FIELD.getPreferredName())
-            );
-        }
-        if (vector.length == 0) {
-            throw new IllegalArgumentException(
-                String.format(Locale.getDefault(), "[%s] query vector is empty", NAME_FIELD.getPreferredName())
-            );
-        }
-        if (k <= 0) {
-            throw new IllegalArgumentException(String.format(Locale.getDefault(), "[%s] requires k > 0", NAME_FIELD.getPreferredName()));
-        }
-        if (k > K_MAX) {
-            throw new IllegalArgumentException(String.format(Locale.getDefault(), "[%s] requires k <= ", K_MAX));
-        }
-
-        this.fieldName = fieldName;
-        this.vector = vector;
-        this.k = k;
-        this.filter = filter;
-    }
-
-    /**
-     * parameterized ctor for CorrelationQueryBuilder
-     * @param sin StreamInput
-     * @throws IOException IOException
-     */
-    public CorrelationQueryBuilder(StreamInput sin) throws IOException {
-        super(sin);
-        this.fieldName = sin.readString();
-        this.vector = sin.readFloatArray();
-        this.k = sin.readInt();
-        this.filter = sin.readOptionalNamedWriteable(QueryBuilder.class);
-    }
-
-    private static float[] objectsToFloats(List<Object> objs) {
-        float[] vector = new float[objs.size()];
-        for (int i = 0; i < objs.size(); ++i) {
-            vector[i] = ((Number) objs.get(i)).floatValue();
-        }
-        return vector;
-    }
-
-    /**
-     * parse into CorrelationQueryBuilder
-     * @param xcp XContentParser
-     * @return CorrelationQueryBuilder
-     */
-    public static CorrelationQueryBuilder parse(XContentParser xcp) throws IOException {
-        String fieldName = null;
-        List<Object> vector = null;
-        float boost = AbstractQueryBuilder.DEFAULT_BOOST;
-
-        int k = 0;
-        QueryBuilder filter = null;
-        String queryName = null;
-        String currentFieldName = null;
-        XContentParser.Token token;
-        while ((token = xcp.nextToken()) != XContentParser.Token.END_OBJECT) {
-            if (token == XContentParser.Token.FIELD_NAME) {
-                currentFieldName = xcp.currentName();
-            } else if (token == XContentParser.Token.START_OBJECT) {
-                throwParsingExceptionOnMultipleFields(NAME_FIELD.getPreferredName(), xcp.getTokenLocation(), fieldName, currentFieldName);
-                fieldName = currentFieldName;
-                while ((token = xcp.nextToken()) != XContentParser.Token.END_OBJECT) {
-                    if (token == XContentParser.Token.FIELD_NAME) {
-                        currentFieldName = xcp.currentName();
-                    } else if (token.isValue() || token == XContentParser.Token.START_ARRAY) {
-                        if (VECTOR_FIELD.match(currentFieldName, xcp.getDeprecationHandler())) {
-                            vector = xcp.list();
-                        } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, xcp.getDeprecationHandler())) {
-                            boost = xcp.floatValue();
-                        } else if (K_FIELD.match(currentFieldName, xcp.getDeprecationHandler())) {
-                            k = (Integer) NumberFieldMapper.NumberType.INTEGER.parse(xcp.objectBytes(), false);
-                        } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, xcp.getDeprecationHandler())) {
-                            queryName = xcp.text();
-                        } else {
-                            throw new ParsingException(
-                                xcp.getTokenLocation(),
-                                "[" + NAME_FIELD.getPreferredName() + "] query does not support [" + currentFieldName + "]"
-                            );
-                        }
-                    } else if (token == XContentParser.Token.START_OBJECT) {
-                        String tokenName = xcp.currentName();
-                        if (FILTER_FIELD.getPreferredName().equals(tokenName)) {
-                            filter = parseInnerQueryBuilder(xcp);
-                        } else {
-                            throw new ParsingException(
-                                xcp.getTokenLocation(),
-                                "[" + NAME_FIELD.getPreferredName() + "] unknown token [" + token + "]"
-                            );
-                        }
-                    } else {
-                        throw new ParsingException(
-                            xcp.getTokenLocation(),
-                            "[" + NAME_FIELD.getPreferredName() + "] unknown token [" + token + "] after [" + currentFieldName + "]"
-                        );
-                    }
-                }
-            } else {
-                throwParsingExceptionOnMultipleFields(NAME_FIELD.getPreferredName(), xcp.getTokenLocation(), fieldName, xcp.currentName());
-                fieldName = xcp.currentName();
-                vector = xcp.list();
-            }
-        }
-
-        assert vector != null;
-        CorrelationQueryBuilder correlationQueryBuilder = new CorrelationQueryBuilder(fieldName, objectsToFloats(vector), k, filter);
-        correlationQueryBuilder.queryName(queryName);
-        correlationQueryBuilder.boost(boost);
-        return correlationQueryBuilder;
-    }
-
-    public void setFieldName(String fieldName) {
-        this.fieldName = fieldName;
-    }
-
-    /**
-     * get field name
-     * @return field name
-     */
-    @Override
-    public String fieldName() {
-        return fieldName;
-    }
-
-    public void setVector(float[] vector) {
-        this.vector = vector;
-    }
-
-    /**
-     * get query vector
-     * @return query vector
-     */
-    public Object vector() {
-        return vector;
-    }
-
-    public void setK(int k) {
-        this.k = k;
-    }
-
-    /**
-     * get number of nearby neighbors
-     * @return number of nearby neighbors
-     */
-    public int getK() {
-        return k;
-    }
-
-    public void setBoost(double boost) {
-        this.boost = boost;
-    }
-
-    /**
-     * get boost
-     * @return boost
-     */
-    public double getBoost() {
-        return boost;
-    }
-
-    public void setFilter(QueryBuilder filter) {
-        this.filter = filter;
-    }
-
-    /**
-     * get optional filter
-     * @return optional filter
-     */
-    public QueryBuilder getFilter() {
-        return filter;
-    }
-
-    @Override
-    protected void doWriteTo(StreamOutput out) throws IOException {
-        out.writeString(fieldName);
-        out.writeFloatArray(vector);
-        out.writeInt(k);
-        out.writeOptionalNamedWriteable(filter);
-    }
-
-    @Override
-    public void doXContent(XContentBuilder builder, Params params) throws IOException {
-        builder.startObject(fieldName);
-
-        builder.field(VECTOR_FIELD.getPreferredName(), vector);
-        builder.field(K_FIELD.getPreferredName(), k);
-        if (filter != null) {
-            builder.field(FILTER_FIELD.getPreferredName(), filter);
-        }
-        printBoostAndQueryName(builder);
-        builder.endObject();
-    }
-
-    @Override
-    protected Query doToQuery(QueryShardContext context) throws IOException {
-        MappedFieldType mappedFieldType = context.fieldMapper(fieldName);
-
-        if (!(mappedFieldType instanceof VectorFieldMapper.CorrelationVectorFieldType)) {
-            throw new IllegalArgumentException(String.format(Locale.getDefault(), "Field '%s' is not knn_vector type.", this.fieldName));
-        }
-
-        VectorFieldMapper.CorrelationVectorFieldType correlationVectorFieldType =
-            (VectorFieldMapper.CorrelationVectorFieldType) mappedFieldType;
-        int fieldDimension = correlationVectorFieldType.getDimension();
-
-        if (fieldDimension != vector.length) {
-            throw new IllegalArgumentException(
-                String.format(
-                    Locale.getDefault(),
-                    "Query vector has invalid dimension: %d. Dimension should be: %d",
-                    vector.length,
-                    fieldDimension
-                )
-            );
-        }
-
-        String indexName = context.index().getName();
-        CorrelationQueryFactory.CreateQueryRequest createQueryRequest = new CorrelationQueryFactory.CreateQueryRequest(
-            indexName,
-            this.fieldName,
-            this.vector,
-            this.k,
-            this.filter,
-            context
-        );
-        return CorrelationQueryFactory.create(createQueryRequest);
-    }
-
-    @Override
-    protected boolean doEquals(CorrelationQueryBuilder other) {
-        return Objects.equals(fieldName, other.fieldName) && Arrays.equals(vector, other.vector) && Objects.equals(k, other.k);
-    }
-
-    @Override
-    protected int doHashCode() {
-        return Objects.hash(fieldName, vector, k);
-    }
-
-    @Override
-    public String getWriteableName() {
-        return NAME_FIELD.getPreferredName();
-    }
-}
diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryFactory.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryFactory.java
deleted file mode 100644
index d5db299bfa3a5..0000000000000
--- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryFactory.java
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-package org.opensearch.plugin.correlation.core.index.query;
-
-import org.apache.lucene.search.KnnFloatVectorQuery;
-import org.apache.lucene.search.Query;
-import org.opensearch.index.query.QueryBuilder;
-import org.opensearch.index.query.QueryShardContext;
-
-import java.io.IOException;
-import java.util.Optional;
-
-/**
- * CorrelationQueryFactory util class is used to construct a Lucene KnnFloatVectorQuery.
- *
- * @opensearch.internal
- */
-public class CorrelationQueryFactory {
-
-    /**
-     * static method which takes input params to construct a Lucene KnnFloatVectorQuery.
-     * @param createQueryRequest object parameter containing inputs for constructing Lucene KnnFloatVectorQuery.
-     * @return generic Lucene Query object
-     */
-    public static Query create(CreateQueryRequest createQueryRequest) {
-        final String indexName = createQueryRequest.getIndexName();
-        final String fieldName = createQueryRequest.getFieldName();
-        final int k = createQueryRequest.getK();
-        final float[] vector = createQueryRequest.getVector();
-
-        if (createQueryRequest.getFilter().isPresent()) {
-            final QueryShardContext context = createQueryRequest.getContext()
-                .orElseThrow(() -> new RuntimeException("Shard context cannot be null"));
-
-            try {
-                final Query filterQuery = createQueryRequest.getFilter().get().toQuery(context);
-                return new KnnFloatVectorQuery(fieldName, vector, k, filterQuery);
-            } catch (IOException ex) {
-                throw new RuntimeException("Cannot create knn query with filter", ex);
-            }
-        }
-        return new KnnFloatVectorQuery(fieldName, vector, k);
-    }
-
-    /**
-     * class containing params to construct a Lucene KnnFloatVectorQuery.
-     *
-     * @opensearch.internal
-     */
-    public static class CreateQueryRequest {
-        private String indexName;
-
-        private String fieldName;
-
-        private float[] vector;
-
-        private int k;
-
-        private QueryBuilder filter;
-
-        private QueryShardContext context;
-
-        /**
-         * Parameterized ctor for CreateQueryRequest
-         * @param indexName index name
-         * @param fieldName field name
-         * @param vector query vector
-         * @param k number of nearby neighbors
-         * @param filter additional filter query
-         * @param context QueryShardContext
-         */
-        public CreateQueryRequest(
-            String indexName,
-            String fieldName,
-            float[] vector,
-            int k,
-            QueryBuilder filter,
-            QueryShardContext context
-        ) {
-            this.indexName = indexName;
-            this.fieldName = fieldName;
-            this.vector = vector;
-            this.k = k;
-            this.filter = filter;
-            this.context = context;
-        }
-
-        /**
-         * get index name
-         * @return get index name
-         */
-        public String getIndexName() {
-            return indexName;
-        }
-
-        /**
-         * get field name
-         * @return get field name
-         */
-        public String getFieldName() {
-            return fieldName;
-        }
-
-        /**
-         * get vector
-         * @return get vector
-         */
-        public float[] getVector() {
-            return vector;
-        }
-
-        /**
-         * get number of nearby neighbors
-         * @return number of nearby neighbors
-         */
-        public int getK() {
-            return k;
-        }
-
-        /**
-         * get optional filter query
-         * @return get optional filter query
-         */
-        public Optional<QueryBuilder> getFilter() {
-            return Optional.ofNullable(filter);
-        }
-
-        /**
-         * get optional query shard context
-         * @return get optional query shard context
-         */
-        public Optional<QueryShardContext> getContext() {
-            return Optional.ofNullable(context);
-        }
-    }
-}
diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/package-info.java
deleted file mode 100644
index 2cf5db786a60f..0000000000000
--- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/package-info.java
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-/**
- * correlation query builder package
- */
-package org.opensearch.plugin.correlation.core.index.query;
diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/package-info.java
deleted file mode 100644
index 82be787af5a72..0000000000000
--- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/package-info.java
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-/**
- * base package of events-correlation-engine
- */
-package org.opensearch.plugin.correlation;
diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/IndexCorrelationRuleAction.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/IndexCorrelationRuleAction.java
deleted file mode 100644
index ab6f05ec0e6a3..0000000000000
--- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/IndexCorrelationRuleAction.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-package org.opensearch.plugin.correlation.rules.action;
-
-import org.opensearch.action.ActionType;
-
-/**
- * Transport Action for indexing correlation rules.
- *
- * @opensearch.internal
- */
-public class IndexCorrelationRuleAction extends ActionType<IndexCorrelationRuleResponse> {
-
-    /**
-     * Instance of IndexCorrelationRuleAction
-     */
-    public static final IndexCorrelationRuleAction INSTANCE = new IndexCorrelationRuleAction();
-    /**
-     * Name of IndexCorrelationRuleAction
-     */
-    public static final String NAME = "cluster:admin/correlation/rules";
-
-    private IndexCorrelationRuleAction() {
-        super(NAME, IndexCorrelationRuleResponse::new);
-    }
-}
diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/IndexCorrelationRuleRequest.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/IndexCorrelationRuleRequest.java
deleted file mode 100644
index 3fe25d144059d..0000000000000
--- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/IndexCorrelationRuleRequest.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-package org.opensearch.plugin.correlation.rules.action;
-
-import org.opensearch.action.ActionRequest;
-import org.opensearch.action.ActionRequestValidationException;
-import org.opensearch.core.common.io.stream.StreamInput;
-import org.opensearch.core.common.io.stream.StreamOutput;
-import org.opensearch.plugin.correlation.rules.model.CorrelationRule;
-import org.opensearch.rest.RestRequest;
-
-import java.io.IOException;
-
-/**
- * A request to index correlation rules.
- *
- * @opensearch.internal
- */
-public class IndexCorrelationRuleRequest extends ActionRequest {
-
-    private String correlationRuleId;
-
-    private CorrelationRule correlationRule;
-
-    private RestRequest.Method method;
-
-    /**
-     * Parameterized ctor for IndexCorrelationRuleRequest
-     * @param correlationRule correlation rule
-     * @param method Rest method of request PUT or POST
-     */
-    public IndexCorrelationRuleRequest(CorrelationRule correlationRule, RestRequest.Method method) {
-        super();
-        this.correlationRuleId = "";
-        this.correlationRule = correlationRule;
-        this.method = method;
-    }
-
-    /**
-     * Parameterized ctor for IndexCorrelationRuleRequest
-     * @param correlationRuleId correlation rule id
-     * @param correlationRule correlation rule
-     * @param method Rest method of request PUT or POST
-     */
-    public IndexCorrelationRuleRequest(String correlationRuleId, CorrelationRule correlationRule, RestRequest.Method method) {
-        super();
-        this.correlationRuleId = correlationRuleId;
-        this.correlationRule = correlationRule;
-        this.method = method;
-    }
-
-    /**
-     * StreamInput ctor of IndexCorrelationRuleRequest
-     * @param sin StreamInput
-     * @throws IOException IOException
-     */
-    public IndexCorrelationRuleRequest(StreamInput sin) throws IOException {
-        this(sin.readString(), CorrelationRule.readFrom(sin), sin.readEnum(RestRequest.Method.class));
-    }
-
-    @Override
-    public ActionRequestValidationException validate() {
-        return null;
-    }
-
-    @Override
-    public void writeTo(StreamOutput out) throws IOException {
-        out.writeString(correlationRuleId);
-        correlationRule.writeTo(out);
-    }
-
-    /**
-     * get correlation rule id
-     * @return correlation rule id
-     */
-    public String getCorrelationRuleId() {
-        return correlationRuleId;
-    }
-
-    /**
-     * get correlation rule
-     * @return correlation rule
-     */
-    public CorrelationRule getCorrelationRule() {
-        return correlationRule;
-    }
-
-    /**
-     * get Rest method
-     * @return Rest method
-     */
-    public RestRequest.Method getMethod() {
-        return method;
-    }
-}
diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/IndexCorrelationRuleResponse.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/IndexCorrelationRuleResponse.java
deleted file mode 100644
index 8102e6585825e..0000000000000
--- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/IndexCorrelationRuleResponse.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-package org.opensearch.plugin.correlation.rules.action;
-
-import org.opensearch.core.ParseField;
-import org.opensearch.core.action.ActionResponse;
-import org.opensearch.core.common.io.stream.StreamInput;
-import org.opensearch.core.common.io.stream.StreamOutput;
-import org.opensearch.core.rest.RestStatus;
-import org.opensearch.core.xcontent.ToXContentObject;
-import org.opensearch.core.xcontent.XContentBuilder;
-import org.opensearch.plugin.correlation.rules.model.CorrelationRule;
-
-import java.io.IOException;
-
-/**
- * Transport Response for indexing correlation rules.
- *
- * @opensearch.internal
- */
-public class IndexCorrelationRuleResponse extends ActionResponse implements ToXContentObject {
-
-    private static final ParseField _ID = new ParseField("_id");
-    private static final ParseField _VERSION = new ParseField("_version");
-
-    private String id;
-
-    private Long version;
-
-    private RestStatus status;
-
-    private CorrelationRule correlationRule;
-
-    /**
-     * Parameterized ctor for IndexCorrelationRuleResponse
-     * @param version version of rule
-     * @param status Rest status of indexing rule
-     * @param correlationRule correlation rule
-     */
-    public IndexCorrelationRuleResponse(String id, Long version, RestStatus status, CorrelationRule correlationRule) {
-        super();
-        this.id = id;
-        this.version = version;
-        this.status = status;
-        this.correlationRule = correlationRule;
-    }
-
-    /**
-     * StreamInput ctor of IndexCorrelationRuleResponse
-     * @param sin StreamInput
-     * @throws IOException IOException
-     */
-    public IndexCorrelationRuleResponse(StreamInput sin) throws IOException {
-        this(sin.readString(), sin.readLong(), sin.readEnum(RestStatus.class), CorrelationRule.readFrom(sin));
-    }
-
-    @Override
-    public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
-        builder.startObject().field(_ID.getPreferredName(), id).field(_VERSION.getPreferredName(), version);
-
-        builder.field("rule", correlationRule);
-        return builder.endObject();
-    }
-
-    @Override
-    public void writeTo(StreamOutput out) throws IOException {
-        out.writeString(id);
-        out.writeLong(version);
-        out.writeEnum(status);
-        correlationRule.writeTo(out);
-    }
-
-    /**
-     * get id
-     * @return id of rule
-     */
-    public String getId() {
-        return id;
-    }
-
-    /**
-     * get status
-     * @return Rest status of indexing rule
-     */
-    public RestStatus getStatus() {
-        return status;
-    }
-}
diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/package-info.java
deleted file mode 100644
index c01f2936a20ca..0000000000000
--- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/package-info.java
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-/**
- * Transport Actions, Requests and Responses for correlation rules
- */
-package org.opensearch.plugin.correlation.rules.action;
diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/CorrelationQuery.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/CorrelationQuery.java
deleted file mode 100644
index 3797e0c7043dc..0000000000000
--- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/CorrelationQuery.java
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-package org.opensearch.plugin.correlation.rules.model;
-
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.opensearch.core.ParseField;
-import org.opensearch.core.common.io.stream.StreamInput;
-import org.opensearch.core.common.io.stream.StreamOutput;
-import org.opensearch.core.common.io.stream.Writeable;
-import org.opensearch.core.xcontent.ObjectParser;
-import org.opensearch.core.xcontent.ToXContentObject;
-import org.opensearch.core.xcontent.XContentBuilder;
-import org.opensearch.core.xcontent.XContentParser;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Correlation Query DSL
- * {
- *   "index": "s3_access_logs",
- *   "query": "aws.cloudtrail.eventName:ReplicateObject",
- *   "timestampField": "@timestamp",
- *   "tags": [
- *     "s3"
- *   ]
- * }
- */
-public class CorrelationQuery implements Writeable, ToXContentObject {
-
-    private static final Logger log = LogManager.getLogger(CorrelationQuery.class);
-    private static final ParseField INDEX_FIELD = new ParseField("index");
-    private static final ParseField QUERY_FIELD = new ParseField("query");
-    private static final ParseField TIMESTAMP_FIELD = new ParseField("timestampField");
-    private static final ParseField TAGS_FIELD = new ParseField("tags");
-    private static final ObjectParser<CorrelationQuery, Void> PARSER = new ObjectParser<>("CorrelationQuery", CorrelationQuery::new);
-
-    static {
-        PARSER.declareString(CorrelationQuery::setIndex, INDEX_FIELD);
-        PARSER.declareString(CorrelationQuery::setQuery, QUERY_FIELD);
-        PARSER.declareStringOrNull(CorrelationQuery::setTimestampField, TIMESTAMP_FIELD);
-        PARSER.declareField((xcp, query, context) -> {
-            List<String> tags = new ArrayList<>();
-            XContentParser.Token currentToken = xcp.currentToken();
-            if (currentToken == XContentParser.Token.START_ARRAY) {
-                while (xcp.nextToken() != XContentParser.Token.END_ARRAY) {
-                    tags.add(xcp.text());
-                }
-            }
-            query.setTags(tags);
-        }, TAGS_FIELD, ObjectParser.ValueType.STRING_ARRAY);
-    }
-
-    private String index;
-
-    private String query;
-
-    private String timestampField;
-
-    private List<String> tags;
-
-    private CorrelationQuery() {
-        this.timestampField = "_timestamp";
-    }
-
-    /**
-     * Parameterized ctor of Correlation Query
-     * @param index event index to correlate
-     * @param query query to filter relevant events for correlations from index
-     * @param timestampField timestamp field in the index
-     * @param tags tags to store additional metadata as part of correlation queries.
-     */
-    public CorrelationQuery(String index, String query, String timestampField, List<String> tags) {
-        this.index = index;
-        this.query = query;
-        this.timestampField = timestampField != null ? timestampField : "_timestamp";
-        this.tags = tags;
-    }
-
-    /**
-     * StreamInput ctor of Correlation Query
-     * @param sin StreamInput
-     * @throws IOException IOException
-     */
-    public CorrelationQuery(StreamInput sin) throws IOException {
-        this(sin.readString(), sin.readString(), sin.readString(), sin.readStringList());
-    }
-
-    @Override
-    public void writeTo(StreamOutput out) throws IOException {
-        out.writeString(index);
-        out.writeString(query);
-        out.writeString(timestampField);
-        out.writeStringCollection(tags);
-    }
-
-    @Override
-    public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
-        builder.startObject();
-        builder.field(INDEX_FIELD.getPreferredName(), index)
-            .field(QUERY_FIELD.getPreferredName(), query)
-            .field(TIMESTAMP_FIELD.getPreferredName(), timestampField)
-            .field(TAGS_FIELD.getPreferredName(), tags);
-        return builder.endObject();
-    }
-
-    /**
-     * parse into CorrelationQuery
-     * @param xcp XContentParser
-     * @return CorrelationQuery
-     */
-    public static CorrelationQuery parse(XContentParser xcp) {
-        return PARSER.apply(xcp, null);
-    }
-
-    /**
-     * convert StreamInput to CorrelationQuery
-     * @param sin StreamInput
-     * @return CorrelationQuery
-     * @throws IOException IOException
-     */
-    public static CorrelationQuery readFrom(StreamInput sin) throws IOException {
-        return new CorrelationQuery(sin);
-    }
-
-    /**
-     * Set index
-     * @param index event index to correlate
-     */
-    public void setIndex(String index) {
-        this.index = index;
-    }
-
-    /**
-     * Get index
-     * @return event index to correlate
-     */
-    public String getIndex() {
-        return index;
-    }
-
-    /**
-     * Set query
-     * @param query query to filter relevant events for correlations from index
-     */
-    public void setQuery(String query) {
-        this.query = query;
-    }
-
-    /**
-     * Get query
-     * @return query to filter relevant events for correlations from index
-     */
-    public String getQuery() {
-        return query;
-    }
-
-    /**
-     * Set timestamp field
-     * @param timestampField timestamp field in the index
-     */
-    public void setTimestampField(String timestampField) {
-        this.timestampField = timestampField != null ? timestampField : "_timestamp";
-    }
-
-    /**
-     * Get timestamp field
-     * @return timestamp field in the index
-     */
-    public String getTimestampField() {
-        return timestampField;
-    }
-
-    /**
-     * Set tags
-     * @param tags tags to store additional metadata as part of correlation queries.
-     */
-    public void setTags(List<String> tags) {
-        this.tags = tags;
-    }
-
-    /**
-     * Get tags
-     * @return tags to store additional metadata as part of correlation queries.
-     */
-    public List<String> getTags() {
-        return tags;
-    }
-}
diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/CorrelationRule.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/CorrelationRule.java
deleted file mode 100644
index 6978d7248e199..0000000000000
--- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/CorrelationRule.java
+++ /dev/null
@@ -1,244 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-package org.opensearch.plugin.correlation.rules.model;
-
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.opensearch.core.ParseField;
-import org.opensearch.core.common.io.stream.StreamInput;
-import org.opensearch.core.common.io.stream.StreamOutput;
-import org.opensearch.core.common.io.stream.Writeable;
-import org.opensearch.core.xcontent.ObjectParser;
-import org.opensearch.core.xcontent.ToXContentObject;
-import org.opensearch.core.xcontent.XContentBuilder;
-import org.opensearch.core.xcontent.XContentParser;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Objects;
-
-/**
- * Correlation Rule DSL
- * {
- *   "name": "s3 to app logs",
- *   "correlate": [
- *     {
- *       "index": "s3_access_logs",
- *       "query": "aws.cloudtrail.eventName:ReplicateObject",
- *       "timestampField": "@timestamp",
- *       "tags": [
- *         "s3"
- *       ]
- *     }
- *   ]
- * }
- *
- * @opensearch.api
- * @opensearch.experimental
- */
-public class CorrelationRule implements Writeable, ToXContentObject {
-
-    private static final Logger log = LogManager.getLogger(CorrelationRule.class);
-
-    /**
-     * Correlation Rule Index
-     */
-    public static final String CORRELATION_RULE_INDEX = ".opensearch-correlation-rules-config";
-
-    private static final ParseField ID_FIELD = new ParseField("id");
-    private static final ParseField VERSION_FIELD = new ParseField("version");
-    private static final ParseField NAME_FIELD = new ParseField("name");
-    private static final ParseField CORRELATION_QUERIES_FIELD = new ParseField("correlate");
-    private static final ObjectParser<CorrelationRule, Void> PARSER = new ObjectParser<>("CorrelationRule", CorrelationRule::new);
-
-    static {
-        PARSER.declareString(CorrelationRule::setId, ID_FIELD);
-        PARSER.declareLong(CorrelationRule::setVersion, VERSION_FIELD);
-        PARSER.declareString(CorrelationRule::setName, NAME_FIELD);
-        PARSER.declareField((xcp, rule, context) -> {
-            List<CorrelationQuery> correlationQueries = new ArrayList<>();
-            XContentParser.Token currentToken = xcp.currentToken();
-            if (currentToken == XContentParser.Token.START_ARRAY) {
-                while (xcp.nextToken() != XContentParser.Token.END_ARRAY) {
-                    correlationQueries.add(CorrelationQuery.parse(xcp));
-                }
-            }
-            rule.setCorrelationQueries(correlationQueries);
-        }, CORRELATION_QUERIES_FIELD, ObjectParser.ValueType.OBJECT_ARRAY);
-    }
-
-    private String id;
-
-    private Long version;
-
-    private String name;
-
-    private List<CorrelationQuery> correlationQueries;
-
-    private CorrelationRule() {}
-
-    /**
-     * Parameterized ctor of Correlation Rule
-     * @param name name of rule
-     * @param correlationQueries list of correlation queries part of rule
-     */
-    public CorrelationRule(String name, List<CorrelationQuery> correlationQueries) {
-        this("", 1L, name, correlationQueries);
-    }
-
-    /**
-     * Parameterized ctor of Correlation Rule
-     * @param id id of rule
-     * @param version version of rule
-     * @param name name of rule
-     * @param correlationQueries list of correlation queries part of rule
-     */
-    public CorrelationRule(String id, Long version, String name, List<CorrelationQuery> correlationQueries) {
-        this.id = id;
-        this.version = version;
-        this.name = name;
-        this.correlationQueries = correlationQueries;
-    }
-
-    /**
-     * StreamInput ctor of Correlation Rule
-     * @param sin StreamInput
-     * @throws IOException IOException
-     */
-    public CorrelationRule(StreamInput sin) throws IOException {
-        this(sin.readString(), sin.readLong(), sin.readString(), sin.readList(CorrelationQuery::readFrom));
-    }
-
-    @Override
-    public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
-        builder.startObject();
-
-        builder.field(ID_FIELD.getPreferredName(), id);
-        builder.field(VERSION_FIELD.getPreferredName(), version);
-        builder.field(NAME_FIELD.getPreferredName(), name);
-
-        CorrelationQuery[] correlationQueries = new CorrelationQuery[] {};
-        correlationQueries = this.correlationQueries.toArray(correlationQueries);
-        builder.field(CORRELATION_QUERIES_FIELD.getPreferredName(), correlationQueries);
-        return builder.endObject();
-    }
-
-    @Override
-    public void writeTo(StreamOutput out) throws IOException {
-        out.writeString(id);
-        out.writeLong(version);
-        out.writeString(name);
-
-        for (CorrelationQuery query : correlationQueries) {
-            query.writeTo(out);
-        }
-    }
-
-    /**
-     * parse into CorrelationRule
-     * @param xcp XContentParser
-     * @param id id of rule
-     * @param version version of rule
-     * @return CorrelationRule
-     */
-    public static CorrelationRule parse(XContentParser xcp, String id, Long version) {
-        return PARSER.apply(xcp, null);
-    }
-
-    /**
-     * convert StreamInput to CorrelationRule
-     * @param sin StreamInput
-     * @return CorrelationRule
-     * @throws IOException IOException
-     */
-    public static CorrelationRule readFrom(StreamInput sin) throws IOException {
-        return new CorrelationRule(sin);
-    }
-
-    /**
-     * set id
-     * @param id id of rule
-     */
-    public void setId(String id) {
-        this.id = id;
-    }
-
-    /**
-     * get id
-     * @return id of rule
-     */
-    public String getId() {
-        return id;
-    }
-
-    /**
-     * set version
-     * @param version version of rule
-     */
-    public void setVersion(Long version) {
-        this.version = version;
-    }
-
-    /**
-     * get version
-     * @return version of rule
-     */
-    public Long getVersion() {
-        return version;
-    }
-
-    /**
-     * set name
-     * @param name name of rule
-     */
-    public void setName(String name) {
-        this.name = name;
-    }
-
-    /**
-     * get name
-     * @return name of rule
-     */
-    public String getName() {
-        return name;
-    }
-
-    /**
-     * set correlation queries
-     * @param correlationQueries set correlation queries for the rule
-     */
-    public void setCorrelationQueries(List<CorrelationQuery> correlationQueries) {
-        this.correlationQueries = correlationQueries;
-    }
-
-    /**
-     * get correlation queries
-     * @return correlation queries for the rule
-     */
-    public List<CorrelationQuery> getCorrelationQueries() {
-        return correlationQueries;
-    }
-
-    @Override
-    public boolean equals(Object o) {
-        if (this == o) return true;
-        if (o == null || getClass() != o.getClass()) return false;
-        CorrelationRule that = (CorrelationRule) o;
-        return id.equals(that.id)
-            && version.equals(that.version)
-            && name.equals(that.name)
-            && correlationQueries.equals(that.correlationQueries);
-    }
-
-    @Override
-    public int hashCode() {
-        return Objects.hash(id, version, name, correlationQueries);
-    }
-}
diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/package-info.java
deleted file mode 100644
index b04b7be3c62e3..0000000000000
--- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/package-info.java
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-/**
- * data models for correlation rules
- */
-package org.opensearch.plugin.correlation.rules.model;
diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/resthandler/RestIndexCorrelationRuleAction.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/resthandler/RestIndexCorrelationRuleAction.java
deleted file mode 100644
index 3b2b7eb02ae5f..0000000000000
--- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/resthandler/RestIndexCorrelationRuleAction.java
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-package org.opensearch.plugin.correlation.rules.resthandler;
-
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.opensearch.client.node.NodeClient;
-import org.opensearch.core.rest.RestStatus;
-import org.opensearch.core.xcontent.ToXContent;
-import org.opensearch.core.xcontent.XContentParser;
-import org.opensearch.plugin.correlation.EventsCorrelationPlugin;
-import org.opensearch.plugin.correlation.rules.action.IndexCorrelationRuleAction;
-import org.opensearch.plugin.correlation.rules.action.IndexCorrelationRuleRequest;
-import org.opensearch.plugin.correlation.rules.action.IndexCorrelationRuleResponse;
-import org.opensearch.plugin.correlation.rules.model.CorrelationRule;
-import org.opensearch.rest.BaseRestHandler;
-import org.opensearch.rest.BytesRestResponse;
-import org.opensearch.rest.RestChannel;
-import org.opensearch.rest.RestRequest;
-import org.opensearch.rest.RestResponse;
-import org.opensearch.rest.action.RestResponseListener;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Locale;
-
-/**
- * Rest action for indexing correlation rules.
- *
- * @opensearch.api
- */
-public class RestIndexCorrelationRuleAction extends BaseRestHandler {
-
-    private static final Logger log = LogManager.getLogger(RestIndexCorrelationRuleAction.class);
-
-    /**
-     * Default constructor
-     */
-    public RestIndexCorrelationRuleAction() {}
-
-    @Override
-    public String getName() {
-        return "index_correlation_rule_action";
-    }
-
-    @Override
-    public List<Route> routes() {
-        return List.of(
-            new Route(RestRequest.Method.POST, EventsCorrelationPlugin.CORRELATION_RULES_BASE_URI),
-            new Route(
-                RestRequest.Method.PUT,
-                String.format(Locale.ROOT, "%s/{%s}", EventsCorrelationPlugin.CORRELATION_RULES_BASE_URI, "rule_id")
-            )
-        );
-    }
-
-    @Override
-    protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException {
-        log.debug(String.format(Locale.ROOT, "%s %s", request.method(), EventsCorrelationPlugin.CORRELATION_RULES_BASE_URI));
-
-        String id = request.param("rule_id", "");
-
-        XContentParser xcp = request.contentParser();
-
-        CorrelationRule correlationRule = CorrelationRule.parse(xcp, id, 1L);
-        IndexCorrelationRuleRequest indexCorrelationRuleRequest = new IndexCorrelationRuleRequest(id, correlationRule, request.method());
-        return channel -> client.execute(
-            IndexCorrelationRuleAction.INSTANCE,
-            indexCorrelationRuleRequest,
-            indexCorrelationRuleResponse(channel, request.method())
-        );
-    }
-
-    private RestResponseListener<IndexCorrelationRuleResponse> indexCorrelationRuleResponse(
-        RestChannel channel,
-        RestRequest.Method restMethod
-    ) {
-        return new RestResponseListener<>(channel) {
-            @Override
-            public RestResponse buildResponse(IndexCorrelationRuleResponse response) throws Exception {
-                RestStatus returnStatus = RestStatus.CREATED;
-                if (restMethod == RestRequest.Method.PUT) {
-                    returnStatus = RestStatus.OK;
-                }
-
-                BytesRestResponse restResponse = new BytesRestResponse(
-                    returnStatus,
-                    response.toXContent(channel.newBuilder(), ToXContent.EMPTY_PARAMS)
-                );
-
-                if (restMethod == RestRequest.Method.POST) {
-                    String location = String.format(
-                        Locale.ROOT,
-                        "%s/%s",
-                        EventsCorrelationPlugin.CORRELATION_RULES_BASE_URI,
-                        response.getId()
-                    );
-                    restResponse.addHeader("Location", location);
-                }
-
-                return restResponse;
-            }
-        };
-    }
-}
diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/resthandler/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/resthandler/package-info.java
deleted file mode 100644
index 607ec355801ad..0000000000000
--- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/resthandler/package-info.java
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-/**
- * Rest Handlers for correlation rules
- */
-package org.opensearch.plugin.correlation.rules.resthandler;
diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/transport/TransportIndexCorrelationRuleAction.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/transport/TransportIndexCorrelationRuleAction.java
deleted file mode 100644
index 7b4fb670c4aee..0000000000000
--- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/transport/TransportIndexCorrelationRuleAction.java
+++ /dev/null
@@ -1,234 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-package org.opensearch.plugin.correlation.rules.transport;
-
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.opensearch.OpenSearchStatusException;
-import org.opensearch.action.admin.indices.create.CreateIndexResponse;
-import org.opensearch.action.index.IndexRequest;
-import org.opensearch.action.index.IndexResponse;
-import org.opensearch.action.support.ActionFilters;
-import org.opensearch.action.support.HandledTransportAction;
-import org.opensearch.action.support.WriteRequest;
-import org.opensearch.action.support.master.AcknowledgedResponse;
-import org.opensearch.client.Client;
-import org.opensearch.cluster.service.ClusterService;
-import org.opensearch.common.inject.Inject;
-import org.opensearch.common.unit.TimeValue;
-import org.opensearch.common.xcontent.XContentFactory;
-import org.opensearch.core.action.ActionListener;
-import org.opensearch.core.rest.RestStatus;
-import org.opensearch.core.xcontent.ToXContent;
-import org.opensearch.plugin.correlation.rules.action.IndexCorrelationRuleAction;
-import org.opensearch.plugin.correlation.rules.action.IndexCorrelationRuleRequest;
-import org.opensearch.plugin.correlation.rules.action.IndexCorrelationRuleResponse;
-import org.opensearch.plugin.correlation.rules.model.CorrelationRule;
-import org.opensearch.plugin.correlation.utils.CorrelationRuleIndices;
-import org.opensearch.plugin.correlation.utils.IndexUtils;
-import org.opensearch.rest.RestRequest;
-import org.opensearch.tasks.Task;
-import org.opensearch.transport.TransportService;
-
-import java.io.IOException;
-import java.util.Locale;
-
-/**
- * Transport Action for indexing correlation rules.
- *
- * @opensearch.internal
- */
-public class TransportIndexCorrelationRuleAction extends HandledTransportAction<IndexCorrelationRuleRequest, IndexCorrelationRuleResponse> {
-
-    private static final Logger log = LogManager.getLogger(TransportIndexCorrelationRuleAction.class);
-
-    private final Client client;
-
-    private final CorrelationRuleIndices correlationRuleIndices;
-
-    private final ClusterService clusterService;
-
-    /**
-     * Parameterized ctor for Transport Action
-     * @param transportService TransportService
-     * @param client OS client
-     * @param actionFilters ActionFilters
-     * @param clusterService ClusterService
-     * @param correlationRuleIndices CorrelationRuleIndices which manages lifecycle of correlation rule index
-     */
-    @Inject
-    public TransportIndexCorrelationRuleAction(
-        TransportService transportService,
-        Client client,
-        ActionFilters actionFilters,
-        ClusterService clusterService,
-        CorrelationRuleIndices correlationRuleIndices
-    ) {
-        super(IndexCorrelationRuleAction.NAME, transportService, actionFilters, IndexCorrelationRuleRequest::new);
-        this.client = client;
-        this.clusterService = clusterService;
-        this.correlationRuleIndices = correlationRuleIndices;
-    }
-
-    @Override
-    protected void doExecute(Task task, IndexCorrelationRuleRequest request, ActionListener<IndexCorrelationRuleResponse> listener) {
-        AsyncIndexCorrelationRuleAction asyncAction = new AsyncIndexCorrelationRuleAction(request, listener);
-        asyncAction.start();
-    }
-
-    private class AsyncIndexCorrelationRuleAction {
-        private final IndexCorrelationRuleRequest request;
-
-        private final ActionListener<IndexCorrelationRuleResponse> listener;
-
-        AsyncIndexCorrelationRuleAction(IndexCorrelationRuleRequest request, ActionListener<IndexCorrelationRuleResponse> listener) {
-            this.request = request;
-            this.listener = listener;
-        }
-
-        void start() {
-            try {
-                if (correlationRuleIndices.correlationRuleIndexExists() == false) {
-                    try {
-                        correlationRuleIndices.initCorrelationRuleIndex(new ActionListener<>() {
-                            @Override
-                            public void onResponse(CreateIndexResponse response) {
-                                try {
-                                    onCreateMappingsResponse(response);
-                                    indexCorrelationRule();
-                                } catch (IOException e) {
-                                    onFailures(e);
-                                }
-                            }
-
-                            @Override
-                            public void onFailure(Exception e) {
-                                onFailures(e);
-                            }
-                        });
-                    } catch (IOException e) {
-                        onFailures(e);
-                    }
-                } else if (!IndexUtils.correlationRuleIndexUpdated) {
-                    IndexUtils.updateIndexMapping(
-                        CorrelationRule.CORRELATION_RULE_INDEX,
-                        CorrelationRuleIndices.correlationRuleIndexMappings(),
-                        clusterService.state(),
-                        client.admin().indices(),
-                        new ActionListener<>() {
-                            @Override
-                            public void onResponse(AcknowledgedResponse response) {
-                                onUpdateMappingsResponse(response);
-                                try {
-                                    indexCorrelationRule();
-                                } catch (IOException e) {
-                                    onFailures(e);
-                                }
-                            }
-
-                            @Override
-                            public void onFailure(Exception e) {
-                                onFailures(e);
-                            }
-                        }
-                    );
-                } else {
-                    indexCorrelationRule();
-                }
-            } catch (IOException ex) {
-                onFailures(ex);
-            }
-        }
-
-        void indexCorrelationRule() throws IOException {
-            IndexRequest indexRequest;
-            if (request.getMethod() == RestRequest.Method.POST) {
-                indexRequest = new IndexRequest(CorrelationRule.CORRELATION_RULE_INDEX).setRefreshPolicy(
-                    WriteRequest.RefreshPolicy.IMMEDIATE
-                )
-                    .source(request.getCorrelationRule().toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS))
-                    .timeout(TimeValue.timeValueSeconds(60));
-            } else {
-                indexRequest = new IndexRequest(CorrelationRule.CORRELATION_RULE_INDEX).setRefreshPolicy(
-                    WriteRequest.RefreshPolicy.IMMEDIATE
-                )
-                    .source(request.getCorrelationRule().toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS))
-                    .id(request.getCorrelationRuleId())
-                    .timeout(TimeValue.timeValueSeconds(60));
-            }
-
-            client.index(indexRequest, new ActionListener<>() {
-                @Override
-                public void onResponse(IndexResponse response) {
-                    if (response.status().equals(RestStatus.CREATED) || response.status().equals(RestStatus.OK)) {
-                        CorrelationRule ruleResponse = request.getCorrelationRule();
-                        ruleResponse.setId(response.getId());
-                        onOperation(ruleResponse);
-                    } else {
-                        onFailures(new OpenSearchStatusException(response.toString(), RestStatus.INTERNAL_SERVER_ERROR));
-                    }
-                }
-
-                @Override
-                public void onFailure(Exception e) {
-                    onFailures(e);
-                }
-            });
-        }
-
-        private void onCreateMappingsResponse(CreateIndexResponse response) throws IOException {
-            if (response.isAcknowledged()) {
-                log.info(String.format(Locale.ROOT, "Created %s with mappings.", CorrelationRule.CORRELATION_RULE_INDEX));
-                IndexUtils.correlationRuleIndexUpdated();
-            } else {
-                log.error(String.format(Locale.ROOT, "Create %s mappings call not acknowledged.", CorrelationRule.CORRELATION_RULE_INDEX));
-                throw new OpenSearchStatusException(
-                    String.format(Locale.getDefault(), "Create %s mappings call not acknowledged", CorrelationRule.CORRELATION_RULE_INDEX),
-                    RestStatus.INTERNAL_SERVER_ERROR
-                );
-            }
-        }
-
-        private void onUpdateMappingsResponse(AcknowledgedResponse response) {
-            if (response.isAcknowledged()) {
-                log.info(String.format(Locale.ROOT, "Created %s with mappings.", CorrelationRule.CORRELATION_RULE_INDEX));
-                IndexUtils.correlationRuleIndexUpdated();
-            } else {
-                log.error(String.format(Locale.ROOT, "Create %s mappings call not acknowledged.", CorrelationRule.CORRELATION_RULE_INDEX));
-                throw new OpenSearchStatusException(
-                    String.format(Locale.getDefault(), "Create %s mappings call not acknowledged", CorrelationRule.CORRELATION_RULE_INDEX),
-                    RestStatus.INTERNAL_SERVER_ERROR
-                );
-            }
-        }
-
-        private void onOperation(CorrelationRule correlationRule) {
-            finishHim(correlationRule, null);
-        }
-
-        private void onFailures(Exception t) {
-            finishHim(null, t);
-        }
-
-        private void finishHim(CorrelationRule correlationRule, Exception t) {
-            if (t != null) {
-                listener.onFailure(t);
-            } else {
-                listener.onResponse(
-                    new IndexCorrelationRuleResponse(
-                        correlationRule.getId(),
-                        correlationRule.getVersion(),
-                        request.getMethod() == RestRequest.Method.POST ? RestStatus.CREATED : RestStatus.OK,
-                        correlationRule
-                    )
-                );
-            }
-        }
-    }
-}
diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/transport/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/transport/package-info.java
deleted file mode 100644
index 7a47efbb9bb45..0000000000000
--- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/transport/package-info.java
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-/**
- * Transport Actions for correlation rules.
- */
-package org.opensearch.plugin.correlation.rules.transport;
diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/settings/EventsCorrelationSettings.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/settings/EventsCorrelationSettings.java
deleted file mode 100644
index 2e2dbbffbeaa2..0000000000000
--- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/settings/EventsCorrelationSettings.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-package org.opensearch.plugin.correlation.settings;
-
-import org.opensearch.common.settings.Setting;
-import org.opensearch.common.unit.TimeValue;
-
-import java.util.concurrent.TimeUnit;
-
-import static org.opensearch.common.settings.Setting.Property.IndexScope;
-
-/**
- * Settings for events-correlation-engine.
- *
- * @opensearch.api
- * @opensearch.experimental
- */
-public class EventsCorrelationSettings {
-    /**
-     * Correlation Index setting name
-     */
-    public static final String CORRELATION_INDEX = "index.correlation";
-    /**
-     * Boolean setting to check if an OS index is a correlation index.
-     */
-    public static final Setting<Boolean> IS_CORRELATION_INDEX_SETTING = Setting.boolSetting(CORRELATION_INDEX, false, IndexScope);
-    /**
-     * Global time window setting for Correlations
-     */
-    public static final Setting<TimeValue> CORRELATION_TIME_WINDOW = Setting.positiveTimeSetting(
-        "plugins.security_analytics.correlation_time_window",
-        new TimeValue(5, TimeUnit.MINUTES),
-        Setting.Property.NodeScope,
-        Setting.Property.Dynamic
-    );
-
-    /**
-     * Default constructor
-     */
-    public EventsCorrelationSettings() {}
-}
diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/settings/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/settings/package-info.java
deleted file mode 100644
index 795291cd0de2e..0000000000000
--- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/settings/package-info.java
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-/**
- * Settings for events-correlation-engine
- */
-package org.opensearch.plugin.correlation.settings;
diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/CorrelationRuleIndices.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/CorrelationRuleIndices.java
deleted file mode 100644
index 3656bd413733a..0000000000000
--- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/CorrelationRuleIndices.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-package org.opensearch.plugin.correlation.utils;
-
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.opensearch.action.admin.indices.create.CreateIndexRequest;
-import org.opensearch.action.admin.indices.create.CreateIndexResponse;
-import org.opensearch.client.Client;
-import org.opensearch.cluster.ClusterState;
-import org.opensearch.cluster.service.ClusterService;
-import org.opensearch.common.settings.Settings;
-import org.opensearch.core.action.ActionListener;
-import org.opensearch.plugin.correlation.rules.model.CorrelationRule;
-
-import java.io.IOException;
-import java.nio.charset.Charset;
-import java.util.Objects;
-
-/**
- * Correlation Rule Index manager
- *
- * @opensearch.internal
- */
-public class CorrelationRuleIndices {
-    private static final Logger log = LogManager.getLogger(CorrelationRuleIndices.class);
-
-    private final Client client;
-
-    private final ClusterService clusterService;
-
-    /**
-     * Parameterized ctor for CorrelationRuleIndices
-     * @param client OS Client
-     * @param clusterService ClusterService
-     */
-    public CorrelationRuleIndices(Client client, ClusterService clusterService) {
-        this.client = client;
-        this.clusterService = clusterService;
-    }
-
-    /**
-     * get correlation rule index mappings
-     * @return mappings of correlation rule index
-     * @throws IOException IOException
-     */
-    public static String correlationRuleIndexMappings() throws IOException {
-        return new String(
-            Objects.requireNonNull(CorrelationRuleIndices.class.getClassLoader().getResourceAsStream("mappings/correlation-rules.json"))
-                .readAllBytes(),
-            Charset.defaultCharset()
-        );
-    }
-
-    /**
-     * init the correlation rule index
-     * @param actionListener listener
-     * @throws IOException IOException
-     */
-    public void initCorrelationRuleIndex(ActionListener<CreateIndexResponse> actionListener) throws IOException {
-        if (correlationRuleIndexExists() == false) {
-            CreateIndexRequest indexRequest = new CreateIndexRequest(CorrelationRule.CORRELATION_RULE_INDEX).mapping(
-                correlationRuleIndexMappings()
-            ).settings(Settings.builder().put("index.hidden", true).build());
-            client.admin().indices().create(indexRequest, actionListener);
-        }
-    }
-
-    /**
-     * check if correlation rule index exists
-     * @return boolean
-     */
-    public boolean correlationRuleIndexExists() {
-        ClusterState clusterState = clusterService.state();
-        return clusterState.getRoutingTable().hasIndex(CorrelationRule.CORRELATION_RULE_INDEX);
-    }
-}
diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/IndexUtils.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/IndexUtils.java
deleted file mode 100644
index 362be3d2932e3..0000000000000
--- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/IndexUtils.java
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-package org.opensearch.plugin.correlation.utils;
-
-import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest;
-import org.opensearch.action.support.master.AcknowledgedResponse;
-import org.opensearch.client.IndicesAdminClient;
-import org.opensearch.cluster.ClusterState;
-import org.opensearch.cluster.metadata.IndexMetadata;
-import org.opensearch.common.xcontent.LoggingDeprecationHandler;
-import org.opensearch.core.action.ActionListener;
-import org.opensearch.core.xcontent.MediaTypeRegistry;
-import org.opensearch.core.xcontent.NamedXContentRegistry;
-import org.opensearch.core.xcontent.XContentParser;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Objects;
-
-import static org.opensearch.core.ParseField.CommonFields._META;
-
-/**
- * Index Management utils
- *
- * @opensearch.internal
- */
-public class IndexUtils {
-    private static final Integer NO_SCHEMA_VERSION = 0;
-    private static final String SCHEMA_VERSION = "schema_version";
-
-    /**
-     * manages the mappings lifecycle for correlation rule index
-     */
-    public static Boolean correlationRuleIndexUpdated = false;
-
-    private IndexUtils() {}
-
-    /**
-     * updates the status of correlationRuleIndexUpdated to true
-     */
-    public static void correlationRuleIndexUpdated() {
-        correlationRuleIndexUpdated = true;
-    }
-
-    /**
-     * util method which decides based on schema version whether to update an index.
-     * @param index IndexMetadata
-     * @param mapping new mappings
-     * @return Boolean
-     * @throws IOException IOException
-     */
-    public static Boolean shouldUpdateIndex(IndexMetadata index, String mapping) throws IOException {
-        Integer oldVersion = NO_SCHEMA_VERSION;
-        Integer newVersion = getSchemaVersion(mapping);
-
-        Map<String, Object> indexMapping = index.mapping().sourceAsMap();
-        if (indexMapping != null
-            && indexMapping.containsKey(_META.getPreferredName())
-            && indexMapping.get(_META.getPreferredName()) instanceof HashMap<?, ?>) {
-            Map<?, ?> metaData = (HashMap<?, ?>) indexMapping.get(_META.getPreferredName());
-            if (metaData.containsKey(SCHEMA_VERSION)) {
-                oldVersion = (Integer) metaData.get(SCHEMA_VERSION);
-            }
-        }
-        return newVersion > oldVersion;
-    }
-
-    /**
-     * Gets the schema version for the mapping
-     * @param mapping mappings as input
-     * @return schema version
-     * @throws IOException IOException
-     */
-    public static Integer getSchemaVersion(String mapping) throws IOException {
-        XContentParser xcp = MediaTypeRegistry.JSON.xContent()
-            .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, mapping);
-
-        while (!xcp.isClosed()) {
-            XContentParser.Token token = xcp.currentToken();
-            if (token != null && token != XContentParser.Token.END_OBJECT && token != XContentParser.Token.START_OBJECT) {
-                if (!Objects.equals(xcp.currentName(), _META.getPreferredName())) {
-                    xcp.nextToken();
-                    xcp.skipChildren();
-                } else {
-                    while (xcp.nextToken() != XContentParser.Token.END_OBJECT) {
-                        switch (xcp.currentName()) {
-                            case SCHEMA_VERSION:
-                                int version = xcp.intValue();
-                                if (version < 0) {
-                                    throw new IllegalArgumentException(
-                                        String.format(Locale.getDefault(), "%s cannot be negative", SCHEMA_VERSION)
-                                    );
-                                }
-                                return version;
-                            default:
-                                xcp.nextToken();
-                        }
-                    }
-                }
-            }
-            xcp.nextToken();
-        }
-        return NO_SCHEMA_VERSION;
-    }
-
-    /**
-     * updates the mappings for the index.
-     * @param index index for which mapping needs to be updated
-     * @param mapping new mappings
-     * @param clusterState ClusterState
-     * @param client Admin client
-     * @param actionListener listener
-     * @throws IOException IOException
-     */
-    public static void updateIndexMapping(
-        String index,
-        String mapping,
-        ClusterState clusterState,
-        IndicesAdminClient client,
-        ActionListener<AcknowledgedResponse> actionListener
-    ) throws IOException {
-        if (clusterState.metadata().indices().containsKey(index)) {
-            if (shouldUpdateIndex(clusterState.metadata().index(index), mapping)) {
-                PutMappingRequest putMappingRequest = new PutMappingRequest(index).source(mapping, MediaTypeRegistry.JSON);
-                client.putMapping(putMappingRequest, actionListener);
-            } else {
-                actionListener.onResponse(new AcknowledgedResponse(true));
-            }
-        }
-    }
-}
diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/package-info.java
deleted file mode 100644
index 798196c47df20..0000000000000
--- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/package-info.java
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-/**
- * utils package for events-correlation-engine
- */
-package org.opensearch.plugin.correlation.utils;
diff --git a/plugins/events-correlation-engine/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec b/plugins/events-correlation-engine/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec
deleted file mode 100644
index 013c17e4a9736..0000000000000
--- a/plugins/events-correlation-engine/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec
+++ /dev/null
@@ -1 +0,0 @@
-org.opensearch.plugin.correlation.core.index.codec.correlation990.CorrelationCodec
diff --git a/plugins/events-correlation-engine/src/main/resources/mappings/correlation-rules.json b/plugins/events-correlation-engine/src/main/resources/mappings/correlation-rules.json
deleted file mode 100644
index 7741b160eca24..0000000000000
--- a/plugins/events-correlation-engine/src/main/resources/mappings/correlation-rules.json
+++ /dev/null
@@ -1,60 +0,0 @@
-{
-  "_meta" : {
-    "schema_version": 1
-  },
-  "properties": {
-    "name": {
-      "type": "text",
-      "analyzer" : "whitespace",
-      "fields": {
-        "keyword": {
-          "type": "keyword",
-          "ignore_above": 256
-        }
-      }
-    },
-    "correlate": {
-      "type": "nested",
-      "properties": {
-        "index": {
-          "type": "text",
-          "analyzer" : "whitespace",
-          "fields": {
-            "keyword": {
-              "type": "keyword",
-              "ignore_above": 256
-            }
-          }
-        },
-        "query": {
-          "type": "text",
-          "analyzer" : "whitespace",
-          "fields": {
-            "keyword": {
-              "type": "keyword",
-              "ignore_above": 256
-            }
-          }
-        },
-        "tags": {
-          "type": "text",
-          "fields" : {
-            "keyword" : {
-              "type" : "keyword"
-            }
-          }
-        },
-        "timestampField": {
-          "type": "text",
-          "analyzer" : "whitespace",
-          "fields": {
-            "keyword": {
-              "type": "keyword",
-              "ignore_above": 256
-            }
-          }
-        }
-      }
-    }
-  }
-}
diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/EventsCorrelationPluginTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/EventsCorrelationPluginTests.java
deleted file mode 100644
index 005ffa2097b03..0000000000000
--- a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/EventsCorrelationPluginTests.java
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-package org.opensearch.plugin.correlation;
-
-import org.opensearch.test.OpenSearchTestCase;
-import org.junit.Assert;
-
-public class EventsCorrelationPluginTests extends OpenSearchTestCase {
-
-    public void testDummy() {
-        Assert.assertEquals(1, 1);
-    }
-}
diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/CorrelationParamsContextTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/CorrelationParamsContextTests.java
deleted file mode 100644
index 19ce3b33514d8..0000000000000
--- a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/CorrelationParamsContextTests.java
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-package org.opensearch.plugin.correlation.core.index;
-
-import org.apache.lucene.index.VectorSimilarityFunction;
-import org.opensearch.common.io.stream.BytesStreamOutput;
-import org.opensearch.common.xcontent.XContentFactory;
-import org.opensearch.common.xcontent.XContentHelper;
-import org.opensearch.core.common.bytes.BytesReference;
-import org.opensearch.core.xcontent.ToXContent;
-import org.opensearch.core.xcontent.XContentBuilder;
-import org.opensearch.index.mapper.MapperParsingException;
-import org.opensearch.test.OpenSearchTestCase;
-import org.junit.Assert;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-
-import static org.opensearch.plugin.correlation.core.index.CorrelationParamsContext.PARAMETERS;
-import static org.opensearch.plugin.correlation.core.index.CorrelationParamsContext.VECTOR_SIMILARITY_FUNCTION;
-
-/**
- * Unit tests for CorrelationsParamsContext
- */
-public class CorrelationParamsContextTests extends OpenSearchTestCase {
-
-    /**
-     * Test reading from and writing to streams
-     */
-    public void testStreams() throws IOException {
-        int efConstruction = 321;
-        int m = 12;
-
-        Map<String, Object> parameters = new HashMap<>();
-        parameters.put("m", m);
-        parameters.put("ef_construction", efConstruction);
-
-        CorrelationParamsContext context = new CorrelationParamsContext(VectorSimilarityFunction.EUCLIDEAN, parameters);
-
-        BytesStreamOutput streamOutput = new BytesStreamOutput();
-        context.writeTo(streamOutput);
-
-        CorrelationParamsContext copy = new CorrelationParamsContext(streamOutput.bytes().streamInput());
-        Assert.assertEquals(context.getSimilarityFunction(), copy.getSimilarityFunction());
-        Assert.assertEquals(context.getParameters(), copy.getParameters());
-    }
-
-    /**
-     * test get vector similarity function
-     */
-    public void testVectorSimilarityFunction() {
-        int efConstruction = 321;
-        int m = 12;
-
-        Map<String, Object> parameters = new HashMap<>();
-        parameters.put("m", m);
-        parameters.put("ef_construction", efConstruction);
-
-        CorrelationParamsContext context = new CorrelationParamsContext(VectorSimilarityFunction.EUCLIDEAN, parameters);
-        Assert.assertEquals(VectorSimilarityFunction.EUCLIDEAN, context.getSimilarityFunction());
-    }
-
-    /**
-     * test get parameters
-     */
-    public void testParameters() {
-        int efConstruction = 321;
-        int m = 12;
-
-        Map<String, Object> parameters = new HashMap<>();
-        parameters.put("m", m);
-        parameters.put("ef_construction", efConstruction);
-
-        CorrelationParamsContext context = new CorrelationParamsContext(VectorSimilarityFunction.EUCLIDEAN, parameters);
-        Assert.assertEquals(parameters, context.getParameters());
-    }
-
-    /**
-     * test parse method with invalid input
-     * @throws IOException IOException
-     */
-    public void testParse_Invalid() throws IOException {
-        // Invalid input type
-        Integer invalidIn = 12;
-        expectThrows(MapperParsingException.class, () -> CorrelationParamsContext.parse(invalidIn));
-
-        // Invalid vector similarity function
-        XContentBuilder xContentBuilder = XContentFactory.jsonBuilder()
-            .startObject()
-            .field(CorrelationParamsContext.VECTOR_SIMILARITY_FUNCTION, 0)
-            .endObject();
-
-        final Map<String, Object> in2 = xContentBuilderToMap(xContentBuilder);
-        expectThrows(MapperParsingException.class, () -> CorrelationParamsContext.parse(in2));
-
-        // Invalid parameters
-        xContentBuilder = XContentFactory.jsonBuilder().startObject().field(PARAMETERS, 0).endObject();
-
-        final Map<String, Object> in4 = xContentBuilderToMap(xContentBuilder);
-        expectThrows(MapperParsingException.class, () -> CorrelationParamsContext.parse(in4));
-    }
-
-    /**
-     * test parse with null parameters
-     * @throws IOException IOException
-     */
-    public void testParse_NullParameters() throws IOException {
-        XContentBuilder xContentBuilder = XContentFactory.jsonBuilder()
-            .startObject()
-            .field(VECTOR_SIMILARITY_FUNCTION, VectorSimilarityFunction.EUCLIDEAN)
-            .field(PARAMETERS, (String) null)
-            .endObject();
-        Map<String, Object> in = xContentBuilderToMap(xContentBuilder);
-        Assert.assertThrows(MapperParsingException.class, () -> { CorrelationParamsContext.parse(in); });
-    }
-
-    /**
-     * test parse method
-     * @throws IOException IOException
-     */
-    public void testParse_Valid() throws IOException {
-        XContentBuilder xContentBuilder = XContentFactory.jsonBuilder()
-            .startObject()
-            .field(VECTOR_SIMILARITY_FUNCTION, VectorSimilarityFunction.EUCLIDEAN)
-            .startObject(PARAMETERS)
-            .field("m", 2)
-            .field("ef_construction", 128)
-            .endObject()
-            .endObject();
-
-        Map<String, Object> in = xContentBuilderToMap(xContentBuilder);
-        CorrelationParamsContext context = CorrelationParamsContext.parse(in);
-        Assert.assertEquals(VectorSimilarityFunction.EUCLIDEAN, context.getSimilarityFunction());
-        Assert.assertEquals(Map.of("m", 2, "ef_construction", 128), context.getParameters());
-    }
-
-    /**
-     * test toXContent method
-     * @throws IOException IOException
-     */
-    public void testToXContent() throws IOException {
-        XContentBuilder xContentBuilder = XContentFactory.jsonBuilder()
-            .startObject()
-            .field(VECTOR_SIMILARITY_FUNCTION, VectorSimilarityFunction.EUCLIDEAN)
-            .startObject(PARAMETERS)
-            .field("m", 2)
-            .field("ef_construction", 128)
-            .endObject()
-            .endObject();
-
-        Map<String, Object> in = xContentBuilderToMap(xContentBuilder);
-        CorrelationParamsContext context = CorrelationParamsContext.parse(in);
-        XContentBuilder builder = XContentFactory.jsonBuilder();
-        builder = context.toXContent(builder, ToXContent.EMPTY_PARAMS);
-
-        Map<String, Object> out = xContentBuilderToMap(builder);
-        Assert.assertEquals(VectorSimilarityFunction.EUCLIDEAN.name(), out.get(VECTOR_SIMILARITY_FUNCTION));
-    }
-
-    private Map<String, Object> xContentBuilderToMap(XContentBuilder xContentBuilder) {
-        return XContentHelper.convertToMap(BytesReference.bytes(xContentBuilder), true, xContentBuilder.contentType()).v2();
-    }
-}
diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/VectorFieldTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/VectorFieldTests.java
deleted file mode 100644
index 32c71dcd37196..0000000000000
--- a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/VectorFieldTests.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-package org.opensearch.plugin.correlation.core.index;
-
-import org.apache.lucene.document.FieldType;
-import org.opensearch.ExceptionsHelper;
-import org.opensearch.OpenSearchException;
-import org.opensearch.common.Randomness;
-import org.opensearch.common.io.stream.BytesStreamOutput;
-import org.opensearch.core.common.io.stream.BytesStreamInput;
-import org.opensearch.test.OpenSearchTestCase;
-import org.junit.Assert;
-
-import java.io.IOException;
-import java.nio.charset.StandardCharsets;
-import java.util.Random;
-
-/**
- * Unit tests for VectorField
- */
-public class VectorFieldTests extends OpenSearchTestCase {
-
-    private final Random random = Randomness.get();
-
-    /**
-     * test VectorField ctor
-     */
-    public void testVectorField_ctor() {
-        VectorField field = new VectorField("test-field", new float[] { 1.0f, 1.0f }, new FieldType());
-        Assert.assertEquals("test-field", field.name());
-    }
-
-    /**
-     * test float vector to array serializer
-     * @throws IOException IOException
-     */
-    public void testVectorAsArraySerializer() throws IOException {
-        final float[] vector = getArrayOfRandomFloats(20);
-
-        final BytesStreamOutput objectStream = new BytesStreamOutput();
-        objectStream.writeFloatArray(vector);
-        final byte[] serializedVector = objectStream.bytes().toBytesRef().bytes;
-
-        final byte[] actualSerializedVector = VectorField.floatToByteArray(vector);
-
-        Assert.assertNotNull(actualSerializedVector);
-        Assert.assertArrayEquals(serializedVector, actualSerializedVector);
-
-        final float[] actualDeserializedVector = byteToFloatArray(actualSerializedVector);
-        Assert.assertNotNull(actualDeserializedVector);
-        Assert.assertArrayEquals(vector, actualDeserializedVector, 0.1f);
-    }
-
-    /**
-     * test byte array to float vector failures
-     */
-    public void testByteToFloatArrayFailures() {
-        final byte[] serializedVector = "test-dummy".getBytes(StandardCharsets.UTF_8);
-        expectThrows(OpenSearchException.class, () -> { byteToFloatArray(serializedVector); });
-    }
-
-    private float[] getArrayOfRandomFloats(int length) {
-        float[] vector = new float[length];
-        for (int i = 0; i < 20; ++i) {
-            vector[i] = random.nextFloat();
-        }
-        return vector;
-    }
-
-    private static float[] byteToFloatArray(byte[] byteStream) {
-        try (BytesStreamInput objectStream = new BytesStreamInput(byteStream)) {
-            return objectStream.readFloatArray();
-        } catch (IOException ex) {
-            throw ExceptionsHelper.convertToOpenSearchException(ex);
-        }
-    }
-}
diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/CorrelationCodecTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/CorrelationCodecTests.java
deleted file mode 100644
index 7223b450a136c..0000000000000
--- a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/CorrelationCodecTests.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-package org.opensearch.plugin.correlation.core.index.codec.correlation990;
-
-import org.apache.lucene.codecs.Codec;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.KnnFloatVectorField;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.SerialMergeScheduler;
-import org.apache.lucene.index.VectorSimilarityFunction;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.tests.index.RandomIndexWriter;
-import org.opensearch.index.mapper.MapperService;
-import org.opensearch.plugin.correlation.core.index.CorrelationParamsContext;
-import org.opensearch.plugin.correlation.core.index.mapper.VectorFieldMapper;
-import org.opensearch.plugin.correlation.core.index.query.CorrelationQueryFactory;
-import org.opensearch.test.OpenSearchTestCase;
-
-import java.util.Map;
-import java.util.Optional;
-import java.util.function.Function;
-
-import static org.opensearch.plugin.correlation.core.index.codec.BasePerFieldCorrelationVectorsFormat.METHOD_PARAMETER_EF_CONSTRUCTION;
-import static org.opensearch.plugin.correlation.core.index.codec.BasePerFieldCorrelationVectorsFormat.METHOD_PARAMETER_M;
-import static org.opensearch.plugin.correlation.core.index.codec.CorrelationCodecVersion.V_9_9_0;
-import static org.mockito.ArgumentMatchers.eq;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.spy;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-
-/**
- * Unit tests for custom correlation codec
- */
-public class CorrelationCodecTests extends OpenSearchTestCase {
-
-    private static final String FIELD_NAME_ONE = "test_vector_one";
-    private static final String FIELD_NAME_TWO = "test_vector_two";
-
-    /**
-     * test correlation vector index
-     * @throws Exception Exception
-     */
-    @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8329")
-    public void testCorrelationVectorIndex() throws Exception {
-        Function<MapperService, PerFieldCorrelationVectorsFormat> perFieldCorrelationVectorsProvider =
-            mapperService -> new PerFieldCorrelationVectorsFormat(Optional.of(mapperService));
-        Function<PerFieldCorrelationVectorsFormat, Codec> correlationCodecProvider = (correlationVectorsFormat -> new CorrelationCodec(
-            V_9_9_0.getDefaultCodecDelegate(),
-            correlationVectorsFormat
-        ));
-        testCorrelationVectorIndex(correlationCodecProvider, perFieldCorrelationVectorsProvider);
-    }
-
-    private void testCorrelationVectorIndex(
-        final Function<PerFieldCorrelationVectorsFormat, Codec> codecProvider,
-        final Function<MapperService, PerFieldCorrelationVectorsFormat> perFieldCorrelationVectorsProvider
-    ) throws Exception {
-        final MapperService mapperService = mock(MapperService.class);
-        final CorrelationParamsContext correlationParamsContext = new CorrelationParamsContext(
-            VectorSimilarityFunction.EUCLIDEAN,
-            Map.of(METHOD_PARAMETER_M, 16, METHOD_PARAMETER_EF_CONSTRUCTION, 256)
-        );
-
-        final VectorFieldMapper.CorrelationVectorFieldType mappedFieldType1 = new VectorFieldMapper.CorrelationVectorFieldType(
-            FIELD_NAME_ONE,
-            Map.of(),
-            3,
-            correlationParamsContext
-        );
-        final VectorFieldMapper.CorrelationVectorFieldType mappedFieldType2 = new VectorFieldMapper.CorrelationVectorFieldType(
-            FIELD_NAME_TWO,
-            Map.of(),
-            2,
-            correlationParamsContext
-        );
-        when(mapperService.fieldType(eq(FIELD_NAME_ONE))).thenReturn(mappedFieldType1);
-        when(mapperService.fieldType(eq(FIELD_NAME_TWO))).thenReturn(mappedFieldType2);
-
-        var perFieldCorrelationVectorsFormatSpy = spy(perFieldCorrelationVectorsProvider.apply(mapperService));
-        final Codec codec = codecProvider.apply(perFieldCorrelationVectorsFormatSpy);
-
-        Directory dir = newFSDirectory(createTempDir());
-        IndexWriterConfig iwc = newIndexWriterConfig();
-        iwc.setMergeScheduler(new SerialMergeScheduler());
-        iwc.setCodec(codec);
-
-        final FieldType luceneFieldType = KnnFloatVectorField.createFieldType(3, VectorSimilarityFunction.EUCLIDEAN);
-        float[] array = { 1.0f, 3.0f, 4.0f };
-        KnnFloatVectorField vectorField = new KnnFloatVectorField(FIELD_NAME_ONE, array, luceneFieldType);
-        RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc);
-        Document doc = new Document();
-        doc.add(vectorField);
-        writer.addDocument(doc);
-        writer.commit();
-        IndexReader reader = writer.getReader();
-        writer.close();
-
-        verify(perFieldCorrelationVectorsFormatSpy).getKnnVectorsFormatForField(eq(FIELD_NAME_ONE));
-
-        IndexSearcher searcher = new IndexSearcher(reader);
-        Query query = CorrelationQueryFactory.create(
-            new CorrelationQueryFactory.CreateQueryRequest("dummy", FIELD_NAME_ONE, new float[] { 1.0f, 0.0f, 0.0f }, 1, null, null)
-        );
-
-        assertEquals(1, searcher.count(query));
-
-        reader.close();
-        dir.close();
-    }
-}
diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapperTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapperTests.java
deleted file mode 100644
index 674f35069a742..0000000000000
--- a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapperTests.java
+++ /dev/null
@@ -1,310 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-package org.opensearch.plugin.correlation.core.index.mapper;
-
-import org.apache.lucene.document.KnnFloatVectorField;
-import org.apache.lucene.index.IndexableField;
-import org.apache.lucene.index.VectorSimilarityFunction;
-import org.apache.lucene.search.FieldExistsQuery;
-import org.opensearch.Version;
-import org.opensearch.cluster.metadata.IndexMetadata;
-import org.opensearch.common.Explicit;
-import org.opensearch.common.settings.IndexScopedSettings;
-import org.opensearch.common.settings.Settings;
-import org.opensearch.common.xcontent.XContentFactory;
-import org.opensearch.common.xcontent.XContentHelper;
-import org.opensearch.core.common.bytes.BytesReference;
-import org.opensearch.core.xcontent.XContentBuilder;
-import org.opensearch.index.IndexSettings;
-import org.opensearch.index.mapper.ContentPath;
-import org.opensearch.index.mapper.FieldMapper;
-import org.opensearch.index.mapper.Mapper;
-import org.opensearch.index.mapper.MapperParsingException;
-import org.opensearch.index.mapper.MapperService;
-import org.opensearch.index.mapper.ParseContext;
-import org.opensearch.index.query.QueryShardContext;
-import org.opensearch.index.query.QueryShardException;
-import org.opensearch.plugin.correlation.core.index.CorrelationParamsContext;
-import org.opensearch.search.lookup.SearchLookup;
-import org.opensearch.test.OpenSearchTestCase;
-import org.junit.Assert;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-
-import org.mockito.Mockito;
-
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-/**
- * Unit tests for correlation vector field mapper
- */
-public class CorrelationVectorFieldMapperTests extends OpenSearchTestCase {
-
-    private static final String CORRELATION_VECTOR_TYPE = "correlation_vector";
-    private static final String DIMENSION_FIELD_NAME = "dimension";
-    private static final String TYPE_FIELD_NAME = "type";
-
-    /**
-     * test builder construction from parse of correlation params context
-     * @throws IOException IOException
-     */
-    public void testBuilder_parse_fromCorrelationParamsContext() throws IOException {
-        String fieldName = "test-field-name";
-        String indexName = "test-index-name";
-        Settings settings = Settings.builder().put(settings(Version.CURRENT).build()).build();
-
-        VectorFieldMapper.TypeParser typeParser = new VectorFieldMapper.TypeParser();
-
-        int efConstruction = 321;
-        int m = 12;
-        int dimension = 10;
-        XContentBuilder xContentBuilder = XContentFactory.jsonBuilder()
-            .startObject()
-            .field(TYPE_FIELD_NAME, CORRELATION_VECTOR_TYPE)
-            .field(DIMENSION_FIELD_NAME, dimension)
-            .startObject("correlation_ctx")
-            .field("similarityFunction", VectorSimilarityFunction.EUCLIDEAN.name())
-            .startObject("parameters")
-            .field("m", m)
-            .field("ef_construction", efConstruction)
-            .endObject()
-            .endObject()
-            .endObject();
-
-        VectorFieldMapper.Builder builder = (VectorFieldMapper.Builder) typeParser.parse(
-            fieldName,
-            XContentHelper.convertToMap(BytesReference.bytes(xContentBuilder), true, xContentBuilder.contentType()).v2(),
-            buildParserContext(indexName, settings)
-        );
-        Mapper.BuilderContext builderContext = new Mapper.BuilderContext(settings, new ContentPath());
-        builder.build(builderContext);
-
-        Assert.assertEquals(VectorSimilarityFunction.EUCLIDEAN, builder.correlationParamsContext.getValue().getSimilarityFunction());
-        Assert.assertEquals(321, builder.correlationParamsContext.getValue().getParameters().get("ef_construction"));
-
-        XContentBuilder xContentBuilderEmptyParams = XContentFactory.jsonBuilder()
-            .startObject()
-            .field(TYPE_FIELD_NAME, CORRELATION_VECTOR_TYPE)
-            .field(DIMENSION_FIELD_NAME, dimension)
-            .startObject("correlation_ctx")
-            .field("similarityFunction", VectorSimilarityFunction.EUCLIDEAN.name())
-            .endObject()
-            .endObject();
-
-        VectorFieldMapper.Builder builderEmptyParams = (VectorFieldMapper.Builder) typeParser.parse(
-            fieldName,
-            XContentHelper.convertToMap(BytesReference.bytes(xContentBuilderEmptyParams), true, xContentBuilderEmptyParams.contentType())
-                .v2(),
-            buildParserContext(indexName, settings)
-        );
-
-        Assert.assertEquals(
-            VectorSimilarityFunction.EUCLIDEAN,
-            builderEmptyParams.correlationParamsContext.getValue().getSimilarityFunction()
-        );
-        Assert.assertTrue(builderEmptyParams.correlationParamsContext.getValue().getParameters().isEmpty());
-    }
-
-    /**
-     * test type parser construction throw error for invalid dimension of correlation vectors
-     * @throws IOException IOException
-     */
-    public void testTypeParser_parse_fromCorrelationParamsContext_InvalidDimension() throws IOException {
-        String fieldName = "test-field-name";
-        String indexName = "test-index-name";
-        Settings settings = Settings.builder().put(settings(Version.CURRENT).build()).build();
-
-        VectorFieldMapper.TypeParser typeParser = new VectorFieldMapper.TypeParser();
-
-        int efConstruction = 321;
-        int m = 12;
-        XContentBuilder xContentBuilder = XContentFactory.jsonBuilder()
-            .startObject()
-            .field(TYPE_FIELD_NAME, CORRELATION_VECTOR_TYPE)
-            .field(DIMENSION_FIELD_NAME, 2000)
-            .startObject("correlation_ctx")
-            .field("similarityFunction", VectorSimilarityFunction.EUCLIDEAN.name())
-            .startObject("parameters")
-            .field("m", m)
-            .field("ef_construction", efConstruction)
-            .endObject()
-            .endObject()
-            .endObject();
-
-        VectorFieldMapper.Builder builder = (VectorFieldMapper.Builder) typeParser.parse(
-            fieldName,
-            XContentHelper.convertToMap(BytesReference.bytes(xContentBuilder), true, xContentBuilder.contentType()).v2(),
-            buildParserContext(indexName, settings)
-        );
-
-        expectThrows(IllegalArgumentException.class, () -> builder.build(new Mapper.BuilderContext(settings, new ContentPath())));
-    }
-
-    /**
-     * test type parser construction error for invalid vector similarity function
-     * @throws IOException IOException
-     */
-    public void testTypeParser_parse_fromCorrelationParamsContext_InvalidVectorSimilarityFunction() throws IOException {
-        String fieldName = "test-field-name";
-        String indexName = "test-index-name";
-        Settings settings = Settings.builder().put(settings(Version.CURRENT).build()).build();
-
-        VectorFieldMapper.TypeParser typeParser = new VectorFieldMapper.TypeParser();
-
-        int efConstruction = 321;
-        int m = 12;
-        XContentBuilder xContentBuilder = XContentFactory.jsonBuilder()
-            .startObject()
-            .field(TYPE_FIELD_NAME, CORRELATION_VECTOR_TYPE)
-            .field(DIMENSION_FIELD_NAME, 2000)
-            .startObject("correlation_ctx")
-            .field("similarityFunction", "invalid")
-            .startObject("parameters")
-            .field("m", m)
-            .field("ef_construction", efConstruction)
-            .endObject()
-            .endObject()
-            .endObject();
-
-        expectThrows(
-            MapperParsingException.class,
-            () -> typeParser.parse(
-                fieldName,
-                XContentHelper.convertToMap(BytesReference.bytes(xContentBuilder), true, xContentBuilder.contentType()).v2(),
-                buildParserContext(indexName, settings)
-            )
-        );
-    }
-
-    /**
-     * test parseCreateField in CorrelationVectorFieldMapper
-     * @throws IOException ioexception
-     */
-    public void testCorrelationVectorFieldMapper_parseCreateField() throws IOException {
-        String fieldName = "test-field-name";
-        int dimension = 10;
-        float[] testVector = createInitializedFloatArray(dimension, 1.0f);
-        CorrelationParamsContext correlationParamsContext = new CorrelationParamsContext(VectorSimilarityFunction.EUCLIDEAN, Map.of());
-
-        VectorFieldMapper.CorrelationVectorFieldType correlationVectorFieldType = new VectorFieldMapper.CorrelationVectorFieldType(
-            fieldName,
-            Map.of(),
-            dimension,
-            correlationParamsContext
-        );
-
-        CorrelationVectorFieldMapper.CreateLuceneFieldMapperInput input = new CorrelationVectorFieldMapper.CreateLuceneFieldMapperInput(
-            fieldName,
-            correlationVectorFieldType,
-            FieldMapper.MultiFields.empty(),
-            FieldMapper.CopyTo.empty(),
-            new Explicit<>(true, true),
-            false,
-            false,
-            correlationParamsContext
-        );
-
-        ParseContext.Document document = new ParseContext.Document();
-        ContentPath contentPath = new ContentPath();
-        ParseContext parseContext = mock(ParseContext.class);
-        when(parseContext.doc()).thenReturn(document);
-        when(parseContext.path()).thenReturn(contentPath);
-
-        CorrelationVectorFieldMapper correlationVectorFieldMapper = Mockito.spy(new CorrelationVectorFieldMapper(input));
-        doReturn(Optional.of(testVector)).when(correlationVectorFieldMapper).getFloatsFromContext(parseContext, dimension);
-
-        correlationVectorFieldMapper.parseCreateField(parseContext, dimension);
-
-        List<IndexableField> fields = document.getFields();
-        assertEquals(1, fields.size());
-        IndexableField field = fields.get(0);
-
-        Assert.assertTrue(field instanceof KnnFloatVectorField);
-        KnnFloatVectorField knnFloatVectorField = (KnnFloatVectorField) field;
-        Assert.assertArrayEquals(testVector, knnFloatVectorField.vectorValue(), 0.001f);
-    }
-
-    /**
-     * test CorrelationVectorFieldType subclass
-     */
-    public void testCorrelationVectorFieldType() {
-        String fieldName = "test-field-name";
-        int dimension = 10;
-        QueryShardContext context = mock(QueryShardContext.class);
-        SearchLookup searchLookup = mock(SearchLookup.class);
-
-        VectorFieldMapper.CorrelationVectorFieldType correlationVectorFieldType = new VectorFieldMapper.CorrelationVectorFieldType(
-            fieldName,
-            Map.of(),
-            dimension
-        );
-        Assert.assertThrows(QueryShardException.class, () -> { correlationVectorFieldType.termQuery(new Object(), context); });
-        Assert.assertThrows(
-            UnsupportedOperationException.class,
-            () -> { correlationVectorFieldType.valueFetcher(context, searchLookup, ""); }
-        );
-        Assert.assertTrue(correlationVectorFieldType.existsQuery(context) instanceof FieldExistsQuery);
-        Assert.assertEquals(VectorFieldMapper.CONTENT_TYPE, correlationVectorFieldType.typeName());
-    }
-
-    /**
-     * test constants in VectorFieldMapper
-     */
-    public void testVectorFieldMapperConstants() {
-        Assert.assertNotNull(VectorFieldMapper.Defaults.IGNORE_MALFORMED);
-        Assert.assertNotNull(VectorFieldMapper.Names.IGNORE_MALFORMED);
-    }
-
-    private IndexMetadata buildIndexMetaData(String index, Settings settings) {
-        return IndexMetadata.builder(index)
-            .settings(settings)
-            .numberOfShards(1)
-            .numberOfReplicas(0)
-            .version(7)
-            .mappingVersion(0)
-            .settingsVersion(0)
-            .aliasesVersion(0)
-            .creationDate(0)
-            .build();
-    }
-
-    private Mapper.TypeParser.ParserContext buildParserContext(String index, Settings settings) {
-        IndexSettings indexSettings = new IndexSettings(
-            buildIndexMetaData(index, settings),
-            Settings.EMPTY,
-            new IndexScopedSettings(Settings.EMPTY, new HashSet<>(IndexScopedSettings.BUILT_IN_INDEX_SETTINGS))
-        );
-
-        MapperService mapperService = mock(MapperService.class);
-        when(mapperService.getIndexSettings()).thenReturn(indexSettings);
-
-        return new Mapper.TypeParser.ParserContext(
-            null,
-            mapperService,
-            type -> new VectorFieldMapper.TypeParser(),
-            Version.CURRENT,
-            null,
-            null,
-            null
-        );
-    }
-
-    private static float[] createInitializedFloatArray(int dimension, float value) {
-        float[] array = new float[dimension];
-        Arrays.fill(array, value);
-        return array;
-    }
-}
diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilderTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilderTests.java
deleted file mode 100644
index 3e567d0c04e53..0000000000000
--- a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilderTests.java
+++ /dev/null
@@ -1,269 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-package org.opensearch.plugin.correlation.core.index.query;
-
-import org.apache.lucene.search.KnnFloatVectorQuery;
-import org.opensearch.Version;
-import org.opensearch.cluster.ClusterModule;
-import org.opensearch.common.io.stream.BytesStreamOutput;
-import org.opensearch.common.xcontent.XContentFactory;
-import org.opensearch.common.xcontent.json.JsonXContent;
-import org.opensearch.core.common.Strings;
-import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput;
-import org.opensearch.core.common.io.stream.NamedWriteableRegistry;
-import org.opensearch.core.common.io.stream.StreamInput;
-import org.opensearch.core.index.Index;
-import org.opensearch.core.xcontent.MediaTypeRegistry;
-import org.opensearch.core.xcontent.NamedXContentRegistry;
-import org.opensearch.core.xcontent.XContentBuilder;
-import org.opensearch.core.xcontent.XContentParser;
-import org.opensearch.index.mapper.NumberFieldMapper;
-import org.opensearch.index.query.QueryBuilder;
-import org.opensearch.index.query.QueryBuilders;
-import org.opensearch.index.query.QueryShardContext;
-import org.opensearch.index.query.TermQueryBuilder;
-import org.opensearch.plugin.correlation.core.index.mapper.VectorFieldMapper;
-import org.opensearch.plugins.SearchPlugin;
-import org.opensearch.test.OpenSearchTestCase;
-import org.junit.Assert;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Optional;
-
-import static org.mockito.ArgumentMatchers.anyString;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-/**
- * Unit tests for Correlation Query Builder
- */
-public class CorrelationQueryBuilderTests extends OpenSearchTestCase {
-
-    private static final String FIELD_NAME = "myvector";
-    private static final int K = 1;
-    private static final TermQueryBuilder TERM_QUERY = QueryBuilders.termQuery("field", "value");
-    private static final float[] QUERY_VECTOR = new float[] { 1.0f, 2.0f, 3.0f, 4.0f };
-
-    /**
-     * test invalid number of nearby neighbors
-     */
-    public void testInvalidK() {
-        float[] queryVector = { 1.0f, 1.0f };
-
-        expectThrows(IllegalArgumentException.class, () -> new CorrelationQueryBuilder(FIELD_NAME, queryVector, -K));
-        expectThrows(IllegalArgumentException.class, () -> new CorrelationQueryBuilder(FIELD_NAME, queryVector, 0));
-        expectThrows(
-            IllegalArgumentException.class,
-            () -> new CorrelationQueryBuilder(FIELD_NAME, queryVector, CorrelationQueryBuilder.K_MAX + 1)
-        );
-    }
-
-    /**
-     * test empty vector scenario
-     */
-    public void testEmptyVector() {
-        final float[] queryVector = null;
-        expectThrows(IllegalArgumentException.class, () -> new CorrelationQueryBuilder(FIELD_NAME, queryVector, 1));
-        final float[] queryVector1 = new float[] {};
-        expectThrows(IllegalArgumentException.class, () -> new CorrelationQueryBuilder(FIELD_NAME, queryVector1, 1));
-    }
-
-    /**
-     * test serde with xcontent
-     * @throws IOException IOException
-     */
-    public void testFromXContent() throws IOException {
-        CorrelationQueryBuilder correlationQueryBuilder = new CorrelationQueryBuilder(FIELD_NAME, QUERY_VECTOR, K);
-        XContentBuilder builder = XContentFactory.jsonBuilder();
-        builder.startObject();
-        builder.startObject(correlationQueryBuilder.fieldName());
-        builder.field(CorrelationQueryBuilder.VECTOR_FIELD.getPreferredName(), correlationQueryBuilder.vector());
-        builder.field(CorrelationQueryBuilder.K_FIELD.getPreferredName(), correlationQueryBuilder.getK());
-        builder.endObject();
-        builder.endObject();
-        XContentParser contentParser = createParser(builder);
-        contentParser.nextToken();
-        CorrelationQueryBuilder actualBuilder = CorrelationQueryBuilder.parse(contentParser);
-        Assert.assertEquals(actualBuilder, correlationQueryBuilder);
-    }
-
-    /**
-     * test serde with xcontent
-     * @throws IOException IOException
-     */
-    public void testFromXContentFromString() throws IOException {
-        String correlationQuery = "{\n"
-            + "    \"myvector\" : {\n"
-            + "      \"vector\" : [\n"
-            + "        1.0,\n"
-            + "        2.0,\n"
-            + "        3.0,\n"
-            + "        4.0\n"
-            + "      ],\n"
-            + "      \"k\" : 1,\n"
-            + "      \"boost\" : 1.0\n"
-            + "    }\n"
-            + "}";
-        XContentParser contentParser = createParser(JsonXContent.jsonXContent, correlationQuery);
-        contentParser.nextToken();
-        CorrelationQueryBuilder actualBuilder = CorrelationQueryBuilder.parse(contentParser);
-        Assert.assertEquals(correlationQuery.replace("\n", "").replace(" ", ""), Strings.toString(MediaTypeRegistry.JSON, actualBuilder));
-    }
-
-    /**
-     * test serde with xcontent with filters
-     * @throws IOException IOException
-     */
-    public void testFromXContentWithFilters() throws IOException {
-        CorrelationQueryBuilder correlationQueryBuilder = new CorrelationQueryBuilder(FIELD_NAME, QUERY_VECTOR, K, TERM_QUERY);
-        XContentBuilder builder = XContentFactory.jsonBuilder();
-        builder.startObject();
-        builder.startObject(correlationQueryBuilder.fieldName());
-        builder.field(CorrelationQueryBuilder.VECTOR_FIELD.getPreferredName(), correlationQueryBuilder.vector());
-        builder.field(CorrelationQueryBuilder.K_FIELD.getPreferredName(), correlationQueryBuilder.getK());
-        builder.field(CorrelationQueryBuilder.FILTER_FIELD.getPreferredName(), correlationQueryBuilder.getFilter());
-        builder.endObject();
-        builder.endObject();
-        XContentParser contentParser = createParser(builder);
-        contentParser.nextToken();
-        CorrelationQueryBuilder actualBuilder = CorrelationQueryBuilder.parse(contentParser);
-        Assert.assertEquals(actualBuilder, correlationQueryBuilder);
-    }
-
-    /**
-     * test conversion o KnnFloatVectorQuery logic
-     * @throws IOException IOException
-     */
-    public void testDoToQuery() throws IOException {
-        CorrelationQueryBuilder correlationQueryBuilder = new CorrelationQueryBuilder(FIELD_NAME, QUERY_VECTOR, K);
-        Index dummyIndex = new Index("dummy", "dummy");
-        QueryShardContext mockQueryShardContext = mock(QueryShardContext.class);
-        VectorFieldMapper.CorrelationVectorFieldType mockCorrVectorField = mock(VectorFieldMapper.CorrelationVectorFieldType.class);
-        when(mockQueryShardContext.index()).thenReturn(dummyIndex);
-        when(mockCorrVectorField.getDimension()).thenReturn(4);
-        when(mockQueryShardContext.fieldMapper(anyString())).thenReturn(mockCorrVectorField);
-        KnnFloatVectorQuery query = (KnnFloatVectorQuery) correlationQueryBuilder.doToQuery(mockQueryShardContext);
-        Assert.assertEquals(FIELD_NAME, query.getField());
-        Assert.assertArrayEquals(QUERY_VECTOR, query.getTargetCopy(), 0.1f);
-        Assert.assertEquals(K, query.getK());
-    }
-
-    /**
-     * test conversion o KnnFloatVectorQuery logic with filter
-     * @throws IOException IOException
-     */
-    public void testDoToQueryWithFilter() throws IOException {
-        CorrelationQueryBuilder correlationQueryBuilder = new CorrelationQueryBuilder(FIELD_NAME, QUERY_VECTOR, K, TERM_QUERY);
-        Index dummyIndex = new Index("dummy", "dummy");
-        QueryShardContext mockQueryShardContext = mock(QueryShardContext.class);
-        VectorFieldMapper.CorrelationVectorFieldType mockCorrVectorField = mock(VectorFieldMapper.CorrelationVectorFieldType.class);
-        when(mockQueryShardContext.index()).thenReturn(dummyIndex);
-        when(mockCorrVectorField.getDimension()).thenReturn(4);
-        when(mockQueryShardContext.fieldMapper(anyString())).thenReturn(mockCorrVectorField);
-        KnnFloatVectorQuery query = (KnnFloatVectorQuery) correlationQueryBuilder.doToQuery(mockQueryShardContext);
-        Assert.assertEquals(FIELD_NAME, query.getField());
-        Assert.assertArrayEquals(QUERY_VECTOR, query.getTargetCopy(), 0.1f);
-        Assert.assertEquals(K, query.getK());
-        Assert.assertEquals(TERM_QUERY.toQuery(mockQueryShardContext), query.getFilter());
-    }
-
-    /**
-     * test conversion o KnnFloatVectorQuery logic failure with invalid dimensions
-     */
-    public void testDoToQueryInvalidDimensions() {
-        CorrelationQueryBuilder correlationQueryBuilder = new CorrelationQueryBuilder(FIELD_NAME, QUERY_VECTOR, K);
-        Index dummyIndex = new Index("dummy", "dummy");
-        QueryShardContext mockQueryShardContext = mock(QueryShardContext.class);
-        VectorFieldMapper.CorrelationVectorFieldType mockCorrVectorField = mock(VectorFieldMapper.CorrelationVectorFieldType.class);
-        when(mockQueryShardContext.index()).thenReturn(dummyIndex);
-        when(mockCorrVectorField.getDimension()).thenReturn(400);
-        when(mockQueryShardContext.fieldMapper(anyString())).thenReturn(mockCorrVectorField);
-        expectThrows(IllegalArgumentException.class, () -> correlationQueryBuilder.doToQuery(mockQueryShardContext));
-    }
-
-    /**
-     * test conversion o KnnFloatVectorQuery logic failure with invalid field type
-     */
-    public void testDoToQueryInvalidFieldType() {
-        CorrelationQueryBuilder correlationQueryBuilder = new CorrelationQueryBuilder(FIELD_NAME, QUERY_VECTOR, K);
-        Index dummyIndex = new Index("dummy", "dummy");
-        QueryShardContext mockQueryShardContext = mock(QueryShardContext.class);
-        NumberFieldMapper.NumberFieldType mockCorrVectorField = mock(NumberFieldMapper.NumberFieldType.class);
-        when(mockQueryShardContext.index()).thenReturn(dummyIndex);
-        when(mockQueryShardContext.fieldMapper(anyString())).thenReturn(mockCorrVectorField);
-        expectThrows(IllegalArgumentException.class, () -> correlationQueryBuilder.doToQuery(mockQueryShardContext));
-    }
-
-    /**
-     * test serialization of Correlation Query Builder
-     * @throws Exception throws an IOException if serialization fails
-     * @throws Exception Exception
-     */
-    public void testSerialization() throws Exception {
-        assertSerialization(Optional.empty());
-        assertSerialization(Optional.of(TERM_QUERY));
-    }
-
-    private void assertSerialization(final Optional<QueryBuilder> queryBuilderOptional) throws IOException {
-        final CorrelationQueryBuilder builder = queryBuilderOptional.isPresent()
-            ? new CorrelationQueryBuilder(FIELD_NAME, QUERY_VECTOR, K, queryBuilderOptional.get())
-            : new CorrelationQueryBuilder(FIELD_NAME, QUERY_VECTOR, K);
-
-        try (BytesStreamOutput output = new BytesStreamOutput()) {
-            output.setVersion(Version.CURRENT);
-            output.writeNamedWriteable(builder);
-
-            try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), writableRegistry())) {
-                in.setVersion(Version.CURRENT);
-                final QueryBuilder deserializedQuery = in.readNamedWriteable(QueryBuilder.class);
-
-                assertNotNull(deserializedQuery);
-                assertTrue(deserializedQuery instanceof CorrelationQueryBuilder);
-                final CorrelationQueryBuilder deserializedKnnQueryBuilder = (CorrelationQueryBuilder) deserializedQuery;
-                assertEquals(FIELD_NAME, deserializedKnnQueryBuilder.fieldName());
-                assertArrayEquals(QUERY_VECTOR, (float[]) deserializedKnnQueryBuilder.vector(), 0.0f);
-                assertEquals(K, deserializedKnnQueryBuilder.getK());
-                if (queryBuilderOptional.isPresent()) {
-                    assertNotNull(deserializedKnnQueryBuilder.getFilter());
-                    assertEquals(queryBuilderOptional.get(), deserializedKnnQueryBuilder.getFilter());
-                } else {
-                    assertNull(deserializedKnnQueryBuilder.getFilter());
-                }
-            }
-        }
-    }
-
-    @Override
-    protected NamedXContentRegistry xContentRegistry() {
-        List<NamedXContentRegistry.Entry> list = ClusterModule.getNamedXWriteables();
-        SearchPlugin.QuerySpec<?> spec = new SearchPlugin.QuerySpec<>(
-            TermQueryBuilder.NAME,
-            TermQueryBuilder::new,
-            TermQueryBuilder::fromXContent
-        );
-        list.add(new NamedXContentRegistry.Entry(QueryBuilder.class, spec.getName(), (p, c) -> spec.getParser().fromXContent(p)));
-        NamedXContentRegistry registry = new NamedXContentRegistry(list);
-        return registry;
-    }
-
-    @Override
-    protected NamedWriteableRegistry writableRegistry() {
-        final List<NamedWriteableRegistry.Entry> entries = ClusterModule.getNamedWriteables();
-        entries.add(
-            new NamedWriteableRegistry.Entry(
-                QueryBuilder.class,
-                CorrelationQueryBuilder.NAME_FIELD.getPreferredName(),
-                CorrelationQueryBuilder::new
-            )
-        );
-        entries.add(new NamedWriteableRegistry.Entry(QueryBuilder.class, TermQueryBuilder.NAME, TermQueryBuilder::new));
-        return new NamedWriteableRegistry(entries);
-    }
-}
diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/settings/EventsCorrelationSettingsTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/settings/EventsCorrelationSettingsTests.java
deleted file mode 100644
index 45cb47b05b5c2..0000000000000
--- a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/settings/EventsCorrelationSettingsTests.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-package org.opensearch.plugin.correlation.settings;
-
-import org.opensearch.common.settings.Setting;
-import org.opensearch.common.settings.Settings;
-import org.opensearch.common.unit.TimeValue;
-import org.opensearch.plugin.correlation.EventsCorrelationPlugin;
-import org.opensearch.test.OpenSearchTestCase;
-import org.junit.Assert;
-import org.junit.Before;
-
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-
-/**
- * Unit tests for Correlation Engine settings
- */
-public class EventsCorrelationSettingsTests extends OpenSearchTestCase {
-
-    private EventsCorrelationPlugin plugin;
-
-    @Before
-    public void setup() {
-        plugin = new EventsCorrelationPlugin();
-    }
-
-    /**
-     * test all plugin settings returned
-     */
-    public void testAllPluginSettingsReturned() {
-        List<Object> expectedSettings = List.of(
-            EventsCorrelationSettings.IS_CORRELATION_INDEX_SETTING,
-            EventsCorrelationSettings.CORRELATION_TIME_WINDOW
-        );
-
-        List<Setting<?>> settings = plugin.getSettings();
-        Assert.assertTrue(settings.containsAll(expectedSettings));
-    }
-
-    /**
-     * test settings get value
-     */
-    public void testSettingsGetValue() {
-        Settings settings = Settings.builder().put("index.correlation", true).build();
-        Assert.assertEquals(EventsCorrelationSettings.IS_CORRELATION_INDEX_SETTING.get(settings), true);
-        settings = Settings.builder()
-            .put("plugins.security_analytics.correlation_time_window", new TimeValue(10, TimeUnit.MINUTES))
-            .build();
-        Assert.assertEquals(EventsCorrelationSettings.CORRELATION_TIME_WINDOW.get(settings), new TimeValue(10, TimeUnit.MINUTES));
-    }
-}

From 7050ecf21c8dcecc663f95a0ca68bfa91a3f9030 Mon Sep 17 00:00:00 2001
From: kkewwei <kewei.11@bytedance.com>
Date: Fri, 20 Dec 2024 05:57:34 +0800
Subject: [PATCH 05/61] Fix Flaky Test SearchTimeoutIT.testSimpleTimeout
 (#16828)

Signed-off-by: kkewwei <kkewwei@163.com>
---
 .../java/org/opensearch/search/SearchTimeoutIT.java            | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java b/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java
index ef7da395d2151..79caef1f45a26 100644
--- a/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java
@@ -82,8 +82,7 @@ protected Settings nodeSettings(int nodeOrdinal) {
     }
 
     public void testSimpleTimeout() throws Exception {
-        final int numDocs = 1000;
-        for (int i = 0; i < numDocs; i++) {
+        for (int i = 0; i < 32; i++) {
             client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value").get();
         }
         refresh("test");

From 45d8b0a06bb51b9392228266232ca5b9c71bb483 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 23 Dec 2024 09:28:31 -0500
Subject: [PATCH 06/61] Bump com.nimbusds:oauth2-oidc-sdk from 11.19.1 to
 11.20.1 in /plugins/repository-azure (#16895)

* Bump com.nimbusds:oauth2-oidc-sdk in /plugins/repository-azure

Bumps [com.nimbusds:oauth2-oidc-sdk](https://bitbucket.org/connect2id/oauth-2.0-sdk-with-openid-connect-extensions) from 11.19.1 to 11.20.1.
- [Changelog](https://bitbucket.org/connect2id/oauth-2.0-sdk-with-openid-connect-extensions/src/master/CHANGELOG.txt)
- [Commits](https://bitbucket.org/connect2id/oauth-2.0-sdk-with-openid-connect-extensions/branches/compare/11.20.1..11.19.1)

---
updated-dependencies:
- dependency-name: com.nimbusds:oauth2-oidc-sdk
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

* Updating SHAs

Signed-off-by: dependabot[bot] <support@github.com>

* Update changelog

Signed-off-by: dependabot[bot] <support@github.com>

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot[bot] <dependabot[bot]@users.noreply.github.com>
---
 CHANGELOG.md                                                    | 1 +
 plugins/repository-azure/build.gradle                           | 2 +-
 .../repository-azure/licenses/oauth2-oidc-sdk-11.19.1.jar.sha1  | 1 -
 .../repository-azure/licenses/oauth2-oidc-sdk-11.20.1.jar.sha1  | 1 +
 4 files changed, 3 insertions(+), 2 deletions(-)
 delete mode 100644 plugins/repository-azure/licenses/oauth2-oidc-sdk-11.19.1.jar.sha1
 create mode 100644 plugins/repository-azure/licenses/oauth2-oidc-sdk-11.20.1.jar.sha1

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 9f439e48ecab7..c13c6c59d8a49 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -48,6 +48,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 - Bump `com.gradle.develocity` from 3.18.2 to 3.19 ([#16855](https://github.com/opensearch-project/OpenSearch/pull/16855))
 - Bump `org.jline:jline` from 3.27.1 to 3.28.0 ([#16857](https://github.com/opensearch-project/OpenSearch/pull/16857))
 - Bump `com.azure:azure-core` from 1.51.0 to 1.54.1 ([#16856](https://github.com/opensearch-project/OpenSearch/pull/16856))
+- Bump `com.nimbusds:oauth2-oidc-sdk` from 11.19.1 to 11.20.1 ([#16895](https://github.com/opensearch-project/OpenSearch/pull/16895))
 
 ### Changed
 - Indexed IP field supports `terms_query` with more than 1025 IP masks [#16391](https://github.com/opensearch-project/OpenSearch/pull/16391)
diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle
index aebdd3e3b7f63..543b374776c95 100644
--- a/plugins/repository-azure/build.gradle
+++ b/plugins/repository-azure/build.gradle
@@ -62,7 +62,7 @@ dependencies {
   api 'com.microsoft.azure:msal4j-persistence-extension:1.3.0'
   api "net.java.dev.jna:jna-platform:${versions.jna}"
   api 'com.microsoft.azure:msal4j:1.17.2'
-  api 'com.nimbusds:oauth2-oidc-sdk:11.19.1'
+  api 'com.nimbusds:oauth2-oidc-sdk:11.20.1'
   api 'com.nimbusds:nimbus-jose-jwt:9.41.1'
   api 'com.nimbusds:content-type:2.3'
   api 'com.nimbusds:lang-tag:1.7'
diff --git a/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.19.1.jar.sha1 b/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.19.1.jar.sha1
deleted file mode 100644
index 7d83b0e8ca639..0000000000000
--- a/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.19.1.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-58db85a807a56ae76baffa519772271ad5808195
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.20.1.jar.sha1 b/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.20.1.jar.sha1
new file mode 100644
index 0000000000000..7527d31eb1d37
--- /dev/null
+++ b/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.20.1.jar.sha1
@@ -0,0 +1 @@
+8d1ecd62d31945534a7cd63062c3c48ff0df9c43
\ No newline at end of file

From 05ab37d931e170364af04848f85347b928200d71 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 23 Dec 2024 10:51:08 -0500
Subject: [PATCH 07/61] Bump com.netflix.nebula.ospackage-base from 11.10.0 to
 11.10.1 in /distribution/packages (#16896)

* Bump com.netflix.nebula.ospackage-base in /distribution/packages

Bumps com.netflix.nebula.ospackage-base from 11.10.0 to 11.10.1.

---
updated-dependencies:
- dependency-name: com.netflix.nebula.ospackage-base
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

* Update changelog

Signed-off-by: dependabot[bot] <support@github.com>

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot[bot] <dependabot[bot]@users.noreply.github.com>
---
 CHANGELOG.md                       | 1 +
 distribution/packages/build.gradle | 2 +-
 2 files changed, 2 insertions(+), 1 deletion(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index c13c6c59d8a49..35f48e30aecb8 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -49,6 +49,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 - Bump `org.jline:jline` from 3.27.1 to 3.28.0 ([#16857](https://github.com/opensearch-project/OpenSearch/pull/16857))
 - Bump `com.azure:azure-core` from 1.51.0 to 1.54.1 ([#16856](https://github.com/opensearch-project/OpenSearch/pull/16856))
 - Bump `com.nimbusds:oauth2-oidc-sdk` from 11.19.1 to 11.20.1 ([#16895](https://github.com/opensearch-project/OpenSearch/pull/16895))
+- Bump `com.netflix.nebula.ospackage-base` from 11.10.0 to 11.10.1 ([#16896](https://github.com/opensearch-project/OpenSearch/pull/16896))
 
 ### Changed
 - Indexed IP field supports `terms_query` with more than 1025 IP masks [#16391](https://github.com/opensearch-project/OpenSearch/pull/16391)
diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle
index 25af649bb4aed..e1fa4de5a0caa 100644
--- a/distribution/packages/build.gradle
+++ b/distribution/packages/build.gradle
@@ -63,7 +63,7 @@ import java.util.regex.Pattern
  */
 
 plugins {
-  id "com.netflix.nebula.ospackage-base" version "11.10.0"
+  id "com.netflix.nebula.ospackage-base" version "11.10.1"
 }
 
 void addProcessFilesTask(String type, boolean jdk) {

From 38a4112bd119d4e1d51367abe55d0fbad7a38bf8 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 23 Dec 2024 13:38:52 -0500
Subject: [PATCH 08/61] Bump ch.qos.logback:logback-classic from 1.5.12 to
 1.5.15 in /test/fixtures/hdfs-fixture (#16898)

* Bump ch.qos.logback:logback-classic in /test/fixtures/hdfs-fixture

Bumps [ch.qos.logback:logback-classic](https://github.com/qos-ch/logback) from 1.5.12 to 1.5.15.
- [Commits](https://github.com/qos-ch/logback/compare/v_1.5.12...v_1.5.15)

---
updated-dependencies:
- dependency-name: ch.qos.logback:logback-classic
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

* Update changelog

Signed-off-by: dependabot[bot] <support@github.com>

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot[bot] <dependabot[bot]@users.noreply.github.com>
---
 CHANGELOG.md                            | 2 +-
 test/fixtures/hdfs-fixture/build.gradle | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 35f48e30aecb8..bd57d918eb390 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -42,7 +42,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 - Bump `codecov/codecov-action` from 4 to 5 ([#16667](https://github.com/opensearch-project/OpenSearch/pull/16667))
 - Bump `org.apache.logging.log4j:log4j-core` from 2.24.1 to 2.24.3 ([#16718](https://github.com/opensearch-project/OpenSearch/pull/16718), [#16858](https://github.com/opensearch-project/OpenSearch/pull/16858))
 - Bump `jackson` from 2.17.2 to 2.18.2 ([#16733](https://github.com/opensearch-project/OpenSearch/pull/16733))
-- Bump `ch.qos.logback:logback-classic` from 1.2.13 to 1.5.12 ([#16716](https://github.com/opensearch-project/OpenSearch/pull/16716))
+- Bump `ch.qos.logback:logback-classic` from 1.2.13 to 1.5.15 ([#16716](https://github.com/opensearch-project/OpenSearch/pull/16716), [#16898](https://github.com/opensearch-project/OpenSearch/pull/16898))
 - Bump `com.azure:azure-identity` from 1.13.2 to 1.14.2 ([#16778](https://github.com/opensearch-project/OpenSearch/pull/16778))
 - Bump Apache Lucene from 9.12.0 to 9.12.1 ([#16846](https://github.com/opensearch-project/OpenSearch/pull/16846))
 - Bump `com.gradle.develocity` from 3.18.2 to 3.19 ([#16855](https://github.com/opensearch-project/OpenSearch/pull/16855))
diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle
index d645171d9f8e4..06b35a2622cf4 100644
--- a/test/fixtures/hdfs-fixture/build.gradle
+++ b/test/fixtures/hdfs-fixture/build.gradle
@@ -75,7 +75,7 @@ dependencies {
   api "org.apache.commons:commons-text:1.12.0"
   api "commons-net:commons-net:3.11.1"
   api "ch.qos.logback:logback-core:1.5.12"
-  api "ch.qos.logback:logback-classic:1.5.12"
+  api "ch.qos.logback:logback-classic:1.5.15"
   api "org.jboss.xnio:xnio-nio:3.8.16.Final"
   api 'org.jline:jline:3.28.0'
   api 'org.apache.commons:commons-configuration2:2.11.0'

From 6b41e4f4ca5ebdf606470c517fe551ab6659d4e6 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 23 Dec 2024 16:20:09 -0500
Subject: [PATCH 09/61] Bump lycheeverse/lychee-action from 2.1.0 to 2.2.0
 (#16897)

* Bump lycheeverse/lychee-action from 2.1.0 to 2.2.0

Bumps [lycheeverse/lychee-action](https://github.com/lycheeverse/lychee-action) from 2.1.0 to 2.2.0.
- [Release notes](https://github.com/lycheeverse/lychee-action/releases)
- [Commits](https://github.com/lycheeverse/lychee-action/compare/v2.1.0...v2.2.0)

---
updated-dependencies:
- dependency-name: lycheeverse/lychee-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

* Update changelog

Signed-off-by: dependabot[bot] <support@github.com>

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot[bot] <dependabot[bot]@users.noreply.github.com>
---
 .github/workflows/links.yml | 2 +-
 CHANGELOG.md                | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml
index 3697750dab97a..923c82028cd1b 100644
--- a/.github/workflows/links.yml
+++ b/.github/workflows/links.yml
@@ -13,7 +13,7 @@ jobs:
       - uses: actions/checkout@v4
       - name: lychee Link Checker
         id: lychee
-        uses: lycheeverse/lychee-action@v2.1.0
+        uses: lycheeverse/lychee-action@v2.2.0
         with:
           args: --accept=200,403,429 --exclude-mail **/*.html **/*.md **/*.txt **/*.json --exclude-file .lychee.excludes
           fail: true
diff --git a/CHANGELOG.md b/CHANGELOG.md
index bd57d918eb390..44236c4bc25c0 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -34,7 +34,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 - Bump `org.apache.hadoop:hadoop-minicluster` from 3.4.0 to 3.4.1 ([#16550](https://github.com/opensearch-project/OpenSearch/pull/16550))
 - Bump `org.apache.xmlbeans:xmlbeans` from 5.2.1 to 5.3.0 ([#16612](https://github.com/opensearch-project/OpenSearch/pull/16612), [#16854](https://github.com/opensearch-project/OpenSearch/pull/16854))
 - Bump `com.nimbusds:nimbus-jose-jwt` from 9.41.1 to 9.47 ([#16611](https://github.com/opensearch-project/OpenSearch/pull/16611), [#16807](https://github.com/opensearch-project/OpenSearch/pull/16807))
-- Bump `lycheeverse/lychee-action` from 2.0.2 to 2.1.0 ([#16610](https://github.com/opensearch-project/OpenSearch/pull/16610))
+- Bump `lycheeverse/lychee-action` from 2.0.2 to 2.2.0 ([#16610](https://github.com/opensearch-project/OpenSearch/pull/16610), [#16897](https://github.com/opensearch-project/OpenSearch/pull/16897))
 - Bump `me.champeau.gradle.japicmp` from 0.4.4 to 0.4.5 ([#16614](https://github.com/opensearch-project/OpenSearch/pull/16614))
 - Bump `mockito` from 5.14.1 to 5.14.2, `objenesis` from 3.2 to 3.3 and `bytebuddy` from 1.15.4 to 1.15.10 ([#16655](https://github.com/opensearch-project/OpenSearch/pull/16655))
 - Bump `Netty` from 4.1.114.Final to 4.1.115.Final ([#16661](https://github.com/opensearch-project/OpenSearch/pull/16661))

From 54ae54a2fc7b2131285e0405186600ea5fa4b220 Mon Sep 17 00:00:00 2001
From: Finn <carrofin@amazon.com>
Date: Tue, 24 Dec 2024 06:29:30 -0800
Subject: [PATCH 10/61] Create sub directories for ThirdPartyAudit dependency
 metadata (#16844)

* Extract jars to sub dirs during thirdPartyAudit task.

Signed-off-by: Finn Carroll <carrofin@amazon.com>

* Change regex to split on '-'/'.'. Ignore version.

Signed-off-by: Finn Carroll <carrofin@amazon.com>

* Split on .jar for sub folder prefix.

Signed-off-by: Finn Carroll <carrofin@amazon.com>

---------

Signed-off-by: Finn Carroll <carrofin@amazon.com>
---
 .../gradle/precommit/ThirdPartyAuditTask.java | 33 ++++++++++++++-----
 1 file changed, 24 insertions(+), 9 deletions(-)

diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java
index a74781ac44720..6842f0e541abe 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java
@@ -229,8 +229,7 @@ public Set<File> getJarsToScan() {
     @TaskAction
     public void runThirdPartyAudit() throws IOException {
         Set<File> jars = getJarsToScan();
-
-        extractJars(jars);
+        Set<File> extractedJars = extractJars(jars);
 
         final String forbiddenApisOutput = runForbiddenAPIsCli();
 
@@ -248,7 +247,7 @@ public void runThirdPartyAudit() throws IOException {
 
         Set<String> jdkJarHellClasses = null;
         if (this.jarHellEnabled) {
-            jdkJarHellClasses = runJdkJarHellCheck();
+            jdkJarHellClasses = runJdkJarHellCheck(extractedJars);
         }
 
         if (missingClassExcludes != null) {
@@ -301,16 +300,26 @@ private void logForbiddenAPIsOutput(String forbiddenApisOutput) {
         getLogger().error("Forbidden APIs output:\n{}==end of forbidden APIs==", forbiddenApisOutput);
     }
 
-    private void extractJars(Set<File> jars) {
+    /**
+     * Extract project jars to build directory as specified by getJarExpandDir.
+     * Handle multi release jars by keeping versions closest to `targetCompatibility` version.
+     * @param jars to extract to build dir
+     * @return File set of extracted jars
+     */
+    private Set<File> extractJars(Set<File> jars) {
+        Set<File> extractedJars = new TreeSet<>();
         File jarExpandDir = getJarExpandDir();
         // We need to clean up to make sure old dependencies don't linger
         getProject().delete(jarExpandDir);
 
         jars.forEach(jar -> {
+            String jarPrefix = jar.getName().replace(".jar", "");
+            File jarSubDir = new File(jarExpandDir, jarPrefix);
+            extractedJars.add(jarSubDir);
             FileTree jarFiles = getProject().zipTree(jar);
             getProject().copy(spec -> {
                 spec.from(jarFiles);
-                spec.into(jarExpandDir);
+                spec.into(jarSubDir);
                 // exclude classes from multi release jars
                 spec.exclude("META-INF/versions/**");
             });
@@ -329,7 +338,7 @@ private void extractJars(Set<File> jars) {
                 Integer.parseInt(targetCompatibility.get().getMajorVersion())
             ).forEach(majorVersion -> getProject().copy(spec -> {
                 spec.from(getProject().zipTree(jar));
-                spec.into(jarExpandDir);
+                spec.into(jarSubDir);
                 String metaInfPrefix = "META-INF/versions/" + majorVersion;
                 spec.include(metaInfPrefix + "/**");
                 // Drop the version specific prefix
@@ -337,6 +346,8 @@ private void extractJars(Set<File> jars) {
                 spec.setIncludeEmptyDirs(false);
             }));
         });
+
+        return extractedJars;
     }
 
     private void assertNoJarHell(Set<String> jdkJarHellClasses) {
@@ -398,7 +409,12 @@ private String runForbiddenAPIsCli() throws IOException {
         return forbiddenApisOutput;
     }
 
-    private Set<String> runJdkJarHellCheck() throws IOException {
+    /**
+     * Execute java with JDK_JAR_HELL_MAIN_CLASS against provided jars with OpenSearch core in the classpath.
+     * @param jars to scan for jarHell violations.
+     * @return standard out of jarHell process.
+     */
+    private Set<String> runJdkJarHellCheck(Set<File> jars) throws IOException {
         ByteArrayOutputStream standardOut = new ByteArrayOutputStream();
         InjectedExecOps execOps = getProject().getObjects().newInstance(InjectedExecOps.class);
         ExecResult execResult = execOps.getExecOps().javaexec(spec -> {
@@ -407,9 +423,8 @@ private Set<String> runJdkJarHellCheck() throws IOException {
                 getRuntimeConfiguration(),
                 getProject().getConfigurations().getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME)
             );
-
             spec.getMainClass().set(JDK_JAR_HELL_MAIN_CLASS);
-            spec.args(getJarExpandDir());
+            spec.args(jars);
             spec.setIgnoreExitValue(true);
             if (javaHome != null) {
                 spec.setExecutable(javaHome + "/bin/java");

From 8ea0c80e90caee8fdbe161c9483ea7a6467b1b63 Mon Sep 17 00:00:00 2001
From: kkewwei <kewei.11@bytedance.com>
Date: Sat, 28 Dec 2024 07:29:45 +0800
Subject: [PATCH 11/61] Retrieve value from DocValues in a flat_object filed
 (#16802)

---
 CHANGELOG.md                                  |  1 +
 .../92_flat_object_support_doc_values.yml     | 52 ++++++++++++++-
 .../search/fields/SearchFieldsIT.java         | 22 +++----
 .../index/mapper/DocValueFetcher.java         |  6 +-
 .../index/mapper/FlatObjectFieldMapper.java   | 63 ++++++++++++++++++-
 .../mapper/FlatObjectFieldMapperTests.java    | 22 +++++++
 6 files changed, 150 insertions(+), 16 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 44236c4bc25c0..a503728a44fda 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -24,6 +24,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 - Added a precaution to handle extreme date values during sorting to prevent `arithmetic_exception: long overflow` ([#16812](https://github.com/opensearch-project/OpenSearch/pull/16812)).
 - Add search replica stats to segment replication stats API ([#16678](https://github.com/opensearch-project/OpenSearch/pull/16678))
 - Introduce a setting to disable download of full cluster state from remote on term mismatch([#16798](https://github.com/opensearch-project/OpenSearch/pull/16798/))
+- Added ability to retrieve value from DocValues in a flat_object filed([#16802](https://github.com/opensearch-project/OpenSearch/pull/16802))
 
 ### Dependencies
 - Bump `com.google.cloud:google-cloud-core-http` from 2.23.0 to 2.47.0 ([#16504](https://github.com/opensearch-project/OpenSearch/pull/16504))
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/92_flat_object_support_doc_values.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/92_flat_object_support_doc_values.yml
index 9ec39660a4928..c840276ee1157 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/92_flat_object_support_doc_values.yml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/92_flat_object_support_doc_values.yml
@@ -1,8 +1,9 @@
 ---
 # The test setup includes:
-# - Create flat_object mapping for flat_object_doc_values_test index
-# - Index 9 example documents
-# - Search tests about doc_values and index
+# - 1.Create flat_object mapping for flat_object_doc_values_test index
+# - 2.Index 9 example documents
+# - 3.Search tests about doc_values and index
+# - 4.Fetch doc_value from flat_object field
 
 setup:
   - skip:
@@ -786,3 +787,48 @@ teardown:
 
   - length: { hits.hits: 1 }
   - match: { hits.hits.0._source.order: "order8" }
+
+  # Stored Fields with exact dot path.
+  - do:
+      search:
+        body: {
+          _source: false,
+          query: {
+            bool: {
+              must: [
+                {
+                  term: {
+                    order: "order0"
+                  }
+                }
+              ]
+            }
+          },
+          stored_fields: "_none_",
+          docvalue_fields: [ "issue.labels.name","order" ]
+        }
+
+  - length: { hits.hits: 1 }
+  - match: { hits.hits.0.fields: { "order" : [ "order0" ], "issue.labels.name": [ "abc0" ] } }
+
+  - do:
+      search:
+        body: {
+          _source: false,
+          query: {
+            bool: {
+              must: [
+                {
+                  term: {
+                    order: "order0"
+                  }
+                }
+              ]
+            }
+          },
+          stored_fields: "_none_",
+          docvalue_fields: [ "issue.labels.name" ]
+        }
+
+  - length: { hits.hits: 1 }
+  - match: { hits.hits.0.fields: { "issue.labels.name": [ "abc0" ] } }
diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java
index 2ce96092203e8..60a6e59014e11 100644
--- a/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java
@@ -1023,7 +1023,7 @@ public void testDocValueFields() throws Exception {
             .startObject("ip_field")
             .field("type", "ip")
             .endObject()
-            .startObject("flat_object_field")
+            .startObject("flat_object_field1")
             .field("type", "flat_object")
             .endObject()
             .endObject()
@@ -1050,9 +1050,11 @@ public void testDocValueFields() throws Exception {
                     .field("boolean_field", true)
                     .field("binary_field", new byte[] { 42, 100 })
                     .field("ip_field", "::1")
-                    .field("flat_object_field")
+                    .field("flat_object_field1")
                     .startObject()
+                    .field("fooa", "bara")
                     .field("foo", "bar")
+                    .field("foob", "barb")
                     .endObject()
                     .endObject()
             )
@@ -1075,7 +1077,7 @@ public void testDocValueFields() throws Exception {
             .addDocValueField("boolean_field")
             .addDocValueField("binary_field")
             .addDocValueField("ip_field")
-            .addDocValueField("flat_object_field");
+            .addDocValueField("flat_object_field1.foo");
         SearchResponse searchResponse = builder.get();
 
         assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L));
@@ -1097,7 +1099,7 @@ public void testDocValueFields() throws Exception {
                     "keyword_field",
                     "binary_field",
                     "ip_field",
-                    "flat_object_field"
+                    "flat_object_field1.foo"
                 )
             )
         );
@@ -1116,7 +1118,7 @@ public void testDocValueFields() throws Exception {
         assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValue(), equalTo("foo"));
         assertThat(searchResponse.getHits().getAt(0).getFields().get("binary_field").getValue(), equalTo("KmQ"));
         assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValue(), equalTo("::1"));
-        assertThat(searchResponse.getHits().getAt(0).getFields().get("flat_object_field").getValue(), equalTo("flat_object_field.foo"));
+        assertThat(searchResponse.getHits().getAt(0).getFields().get("flat_object_field1.foo").getValue(), equalTo("bar"));
 
         builder = client().prepareSearch().setQuery(matchAllQuery()).addDocValueField("*field");
         searchResponse = builder.get();
@@ -1139,8 +1141,7 @@ public void testDocValueFields() throws Exception {
                     "text_field",
                     "keyword_field",
                     "binary_field",
-                    "ip_field",
-                    "flat_object_field"
+                    "ip_field"
                 )
             )
         );
@@ -1160,7 +1161,6 @@ public void testDocValueFields() throws Exception {
         assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValue(), equalTo("foo"));
         assertThat(searchResponse.getHits().getAt(0).getFields().get("binary_field").getValue(), equalTo("KmQ"));
         assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValue(), equalTo("::1"));
-        assertThat(searchResponse.getHits().getAt(0).getFields().get("flat_object_field").getValue(), equalTo("flat_object_field.foo"));
 
         builder = client().prepareSearch()
             .setQuery(matchAllQuery())
@@ -1176,7 +1176,7 @@ public void testDocValueFields() throws Exception {
             .addDocValueField("boolean_field", "use_field_mapping")
             .addDocValueField("binary_field", "use_field_mapping")
             .addDocValueField("ip_field", "use_field_mapping")
-            .addDocValueField("flat_object_field", "use_field_mapping");
+            .addDocValueField("flat_object_field1.foo", null);
         ;
         searchResponse = builder.get();
 
@@ -1199,7 +1199,7 @@ public void testDocValueFields() throws Exception {
                     "keyword_field",
                     "binary_field",
                     "ip_field",
-                    "flat_object_field"
+                    "flat_object_field1.foo"
                 )
             )
         );
@@ -1219,7 +1219,7 @@ public void testDocValueFields() throws Exception {
         assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValue(), equalTo("foo"));
         assertThat(searchResponse.getHits().getAt(0).getFields().get("binary_field").getValue(), equalTo("KmQ"));
         assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValue(), equalTo("::1"));
-        assertThat(searchResponse.getHits().getAt(0).getFields().get("flat_object_field").getValue(), equalTo("flat_object_field.foo"));
+        assertThat(searchResponse.getHits().getAt(0).getFields().get("flat_object_field1.foo").getValue(), equalTo("bar"));
 
         builder = client().prepareSearch()
             .setQuery(matchAllQuery())
diff --git a/server/src/main/java/org/opensearch/index/mapper/DocValueFetcher.java b/server/src/main/java/org/opensearch/index/mapper/DocValueFetcher.java
index 827792cdb1091..48da9b30ac1b0 100644
--- a/server/src/main/java/org/opensearch/index/mapper/DocValueFetcher.java
+++ b/server/src/main/java/org/opensearch/index/mapper/DocValueFetcher.java
@@ -43,6 +43,7 @@
 import java.util.List;
 
 import static java.util.Collections.emptyList;
+import static org.opensearch.index.mapper.FlatObjectFieldMapper.DOC_VALUE_NO_MATCH;
 
 /**
  * Value fetcher that loads from doc values.
@@ -70,7 +71,10 @@ public List<Object> fetchValues(SourceLookup lookup) throws IOException {
         }
         List<Object> result = new ArrayList<Object>(leaf.docValueCount());
         for (int i = 0, count = leaf.docValueCount(); i < count; ++i) {
-            result.add(leaf.nextValue());
+            Object value = leaf.nextValue();
+            if (value != DOC_VALUE_NO_MATCH) {
+                result.add(value);
+            }
         }
         return result;
     }
diff --git a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java
index 0ccdb40f9d33a..13063a4761006 100644
--- a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java
+++ b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java
@@ -28,6 +28,7 @@
 import org.opensearch.common.unit.Fuzziness;
 import org.opensearch.common.xcontent.JsonToStringXContentParser;
 import org.opensearch.core.common.ParsingException;
+import org.opensearch.core.common.io.stream.StreamOutput;
 import org.opensearch.core.xcontent.DeprecationHandler;
 import org.opensearch.core.xcontent.NamedXContentRegistry;
 import org.opensearch.core.xcontent.XContentParser;
@@ -36,11 +37,13 @@
 import org.opensearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData;
 import org.opensearch.index.mapper.KeywordFieldMapper.KeywordFieldType;
 import org.opensearch.index.query.QueryShardContext;
+import org.opensearch.search.DocValueFormat;
 import org.opensearch.search.aggregations.support.CoreValuesSourceType;
 import org.opensearch.search.lookup.SearchLookup;
 
 import java.io.IOException;
 import java.io.UncheckedIOException;
+import java.time.ZoneId;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Iterator;
@@ -63,6 +66,7 @@
 public final class FlatObjectFieldMapper extends DynamicKeyFieldMapper {
 
     public static final String CONTENT_TYPE = "flat_object";
+    public static final Object DOC_VALUE_NO_MATCH = new Object();
 
     /**
      * In flat_object field mapper, field type is similar to keyword field type
@@ -272,7 +276,7 @@ NamedAnalyzer normalizer() {
         @Override
         public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, Supplier<SearchLookup> searchLookup) {
             failIfNoDocValues();
-            return new SortedSetOrdinalsIndexFieldData.Builder(name(), CoreValuesSourceType.BYTES);
+            return new SortedSetOrdinalsIndexFieldData.Builder(valueFieldType().name(), CoreValuesSourceType.BYTES);
         }
 
         @Override
@@ -304,6 +308,30 @@ protected String parseSourceValue(Object value) {
             };
         }
 
+        @Override
+        public DocValueFormat docValueFormat(@Nullable String format, ZoneId timeZone) {
+            if (format != null) {
+                throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] does not support custom formats");
+            }
+            if (timeZone != null) {
+                throw new IllegalArgumentException(
+                    "Field [" + name() + "] of type [" + typeName() + "] does not support custom time zones"
+                );
+            }
+            if (mappedFieldTypeName != null) {
+                return new FlatObjectDocValueFormat(mappedFieldTypeName + DOT_SYMBOL + name() + EQUAL_SYMBOL);
+            } else {
+                throw new IllegalArgumentException(
+                    "Field [" + name() + "] of type [" + typeName() + "] does not support doc_value in root field"
+                );
+            }
+        }
+
+        @Override
+        public boolean isAggregatable() {
+            return false;
+        }
+
         @Override
         public Object valueForDisplay(Object value) {
             if (value == null) {
@@ -530,6 +558,39 @@ public Query wildcardQuery(
             return valueFieldType().wildcardQuery(rewriteValue(value), method, caseInsensitve, context);
         }
 
+        /**
+         * A doc_value formatter for flat_object field.
+         */
+        public class FlatObjectDocValueFormat implements DocValueFormat {
+            private static final String NAME = "flat_object";
+            private final String prefix;
+
+            public FlatObjectDocValueFormat(String prefix) {
+                this.prefix = prefix;
+            }
+
+            @Override
+            public String getWriteableName() {
+                return NAME;
+            }
+
+            @Override
+            public void writeTo(StreamOutput out) {}
+
+            @Override
+            public Object format(BytesRef value) {
+                String parsedValue = inputToString(value);
+                if (parsedValue.startsWith(prefix) == false) {
+                    return DOC_VALUE_NO_MATCH;
+                }
+                return parsedValue.substring(prefix.length());
+            }
+
+            @Override
+            public BytesRef parseBytesRef(String value) {
+                return new BytesRef((String) valueFieldType.rewriteForDocValue(rewriteValue(value)));
+            }
+        }
     }
 
     private final ValueFieldMapper valueFieldMapper;
diff --git a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java
index afd9e994ce3ae..7e6aa00c87290 100644
--- a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java
+++ b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java
@@ -21,6 +21,7 @@
 import org.opensearch.core.xcontent.ToXContent;
 import org.opensearch.core.xcontent.XContentBuilder;
 import org.opensearch.index.query.QueryShardContext;
+import org.opensearch.search.DocValueFormat;
 
 import java.io.IOException;
 
@@ -397,6 +398,27 @@ public void testDeduplicationValue() throws IOException {
         assertEquals(new BytesRef("field.labels=3"), fieldValueAndPaths[4].binaryValue());
     }
 
+    public void testFetchDocValues() throws IOException {
+        MapperService mapperService = createMapperService(fieldMapping(b -> b.field("type", "flat_object")));
+        {
+            // test valueWithPathField
+            MappedFieldType ft = mapperService.fieldType("field.name");
+            DocValueFormat format = ft.docValueFormat(null, null);
+            String storedValue = "field.field.name=1234";
+
+            Object object = format.format(new BytesRef(storedValue));
+            assertEquals("1234", object);
+        }
+
+        {
+            // test valueField
+            MappedFieldType ft = mapperService.fieldType("field");
+            Throwable throwable = assertThrows(IllegalArgumentException.class, () -> ft.docValueFormat(null, null));
+            assertEquals("Field [field] of type [flat_object] does not support doc_value in root field", throwable.getMessage());
+        }
+
+    }
+
     @Override
     protected void registerParameters(ParameterChecker checker) throws IOException {
         // In the future we will want to make sure parameter updates are covered.

From 1352bbf0e50a590e02759a41e000e656a2c7203c Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 30 Dec 2024 10:14:30 -0500
Subject: [PATCH 12/61] Bump com.microsoft.azure:msal4j from 1.17.2 to 1.18.0
 in /plugins/repository-azure (#16918)

* Bump com.microsoft.azure:msal4j in /plugins/repository-azure

Bumps [com.microsoft.azure:msal4j](https://github.com/AzureAD/microsoft-authentication-library-for-java) from 1.17.2 to 1.18.0.
- [Release notes](https://github.com/AzureAD/microsoft-authentication-library-for-java/releases)
- [Changelog](https://github.com/AzureAD/microsoft-authentication-library-for-java/blob/dev/changelog.txt)
- [Commits](https://github.com/AzureAD/microsoft-authentication-library-for-java/compare/v1.17.2...v1.18.0)

---
updated-dependencies:
- dependency-name: com.microsoft.azure:msal4j
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

* Updating SHAs

Signed-off-by: dependabot[bot] <support@github.com>

* Update changelog

Signed-off-by: dependabot[bot] <support@github.com>

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot[bot] <dependabot[bot]@users.noreply.github.com>
---
 CHANGELOG.md                                             | 1 +
 plugins/repository-azure/build.gradle                    | 2 +-
 plugins/repository-azure/licenses/msal4j-1.17.2.jar.sha1 | 1 -
 plugins/repository-azure/licenses/msal4j-1.18.0.jar.sha1 | 1 +
 4 files changed, 3 insertions(+), 2 deletions(-)
 delete mode 100644 plugins/repository-azure/licenses/msal4j-1.17.2.jar.sha1
 create mode 100644 plugins/repository-azure/licenses/msal4j-1.18.0.jar.sha1

diff --git a/CHANGELOG.md b/CHANGELOG.md
index a503728a44fda..3f0eed5f5bbc8 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -51,6 +51,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 - Bump `com.azure:azure-core` from 1.51.0 to 1.54.1 ([#16856](https://github.com/opensearch-project/OpenSearch/pull/16856))
 - Bump `com.nimbusds:oauth2-oidc-sdk` from 11.19.1 to 11.20.1 ([#16895](https://github.com/opensearch-project/OpenSearch/pull/16895))
 - Bump `com.netflix.nebula.ospackage-base` from 11.10.0 to 11.10.1 ([#16896](https://github.com/opensearch-project/OpenSearch/pull/16896))
+- Bump `com.microsoft.azure:msal4j` from 1.17.2 to 1.18.0 ([#16918](https://github.com/opensearch-project/OpenSearch/pull/16918))
 
 ### Changed
 - Indexed IP field supports `terms_query` with more than 1025 IP masks [#16391](https://github.com/opensearch-project/OpenSearch/pull/16391)
diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle
index 543b374776c95..03ea07623dbaf 100644
--- a/plugins/repository-azure/build.gradle
+++ b/plugins/repository-azure/build.gradle
@@ -61,7 +61,7 @@ dependencies {
   // Start of transitive dependencies for azure-identity
   api 'com.microsoft.azure:msal4j-persistence-extension:1.3.0'
   api "net.java.dev.jna:jna-platform:${versions.jna}"
-  api 'com.microsoft.azure:msal4j:1.17.2'
+  api 'com.microsoft.azure:msal4j:1.18.0'
   api 'com.nimbusds:oauth2-oidc-sdk:11.20.1'
   api 'com.nimbusds:nimbus-jose-jwt:9.41.1'
   api 'com.nimbusds:content-type:2.3'
diff --git a/plugins/repository-azure/licenses/msal4j-1.17.2.jar.sha1 b/plugins/repository-azure/licenses/msal4j-1.17.2.jar.sha1
deleted file mode 100644
index b5219ee17e9fa..0000000000000
--- a/plugins/repository-azure/licenses/msal4j-1.17.2.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-a6211e3d71d0388929babaa0ff0951b30d001852
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/msal4j-1.18.0.jar.sha1 b/plugins/repository-azure/licenses/msal4j-1.18.0.jar.sha1
new file mode 100644
index 0000000000000..292259e9d862d
--- /dev/null
+++ b/plugins/repository-azure/licenses/msal4j-1.18.0.jar.sha1
@@ -0,0 +1 @@
+a47e4e9257a5d9cdb8282c331278492968e06250
\ No newline at end of file

From 7ae66d0d63308b009927647ce0a892231f1fad29 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 30 Dec 2024 14:20:44 -0500
Subject: [PATCH 13/61] Bump org.apache.commons:commons-text from 1.12.0 to
 1.13.0 in /test/fixtures/hdfs-fixture (#16919)

* Bump org.apache.commons:commons-text in /test/fixtures/hdfs-fixture

Bumps org.apache.commons:commons-text from 1.12.0 to 1.13.0.

---
updated-dependencies:
- dependency-name: org.apache.commons:commons-text
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

* Update changelog

Signed-off-by: dependabot[bot] <support@github.com>

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot[bot] <dependabot[bot]@users.noreply.github.com>
---
 CHANGELOG.md                            | 1 +
 test/fixtures/hdfs-fixture/build.gradle | 2 +-
 2 files changed, 2 insertions(+), 1 deletion(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 3f0eed5f5bbc8..e9e57a09704b0 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -52,6 +52,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 - Bump `com.nimbusds:oauth2-oidc-sdk` from 11.19.1 to 11.20.1 ([#16895](https://github.com/opensearch-project/OpenSearch/pull/16895))
 - Bump `com.netflix.nebula.ospackage-base` from 11.10.0 to 11.10.1 ([#16896](https://github.com/opensearch-project/OpenSearch/pull/16896))
 - Bump `com.microsoft.azure:msal4j` from 1.17.2 to 1.18.0 ([#16918](https://github.com/opensearch-project/OpenSearch/pull/16918))
+- Bump `org.apache.commons:commons-text` from 1.12.0 to 1.13.0 ([#16919](https://github.com/opensearch-project/OpenSearch/pull/16919))
 
 ### Changed
 - Indexed IP field supports `terms_query` with more than 1025 IP masks [#16391](https://github.com/opensearch-project/OpenSearch/pull/16391)
diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle
index 06b35a2622cf4..02aab575bbaf0 100644
--- a/test/fixtures/hdfs-fixture/build.gradle
+++ b/test/fixtures/hdfs-fixture/build.gradle
@@ -72,7 +72,7 @@ dependencies {
   api "org.eclipse.jetty:jetty-server:${versions.jetty}"
   api "org.eclipse.jetty.websocket:javax-websocket-server-impl:${versions.jetty}"
   api 'org.apache.zookeeper:zookeeper:3.9.3'
-  api "org.apache.commons:commons-text:1.12.0"
+  api "org.apache.commons:commons-text:1.13.0"
   api "commons-net:commons-net:3.11.1"
   api "ch.qos.logback:logback-core:1.5.12"
   api "ch.qos.logback:logback-classic:1.5.15"

From 7a0e8fbed7116173e11f52236c347affd4e22a16 Mon Sep 17 00:00:00 2001
From: Finn <carrofin@amazon.com>
Date: Tue, 31 Dec 2024 10:00:58 -0800
Subject: [PATCH 14/61] Add gRPC server as transport-grpc plugin (#16534)

Introduce auxiliary transport to NetworkPlugin and add gRPC plugin.

Auxiliary transports are optional lifecycle components provided by
network plugins which run in parallel to the http server/native
transport. They are distinct from the existing NetworkPlugin
interfaces of 'getTransports' and 'getHttpTransports' as auxiliary
transports are optional. Each AuxTransport implements it's own
'aux.transport.type' and 'aux.transport.<type>.ports' setting. Since
Security.java initializes previous to Node.java during bootstrap
socket binding permissions are granted based on
'aux.transport.<type>.ports' for each enabled 'aux.transport.type',
falling back to a default if no ports are specified.

Signed-off-by: Finn Carroll <carrofin@amazon.com>
---
 CHANGELOG.md                                  |   1 +
 gradle/libs.versions.toml                     |   2 +-
 .../licenses/grpc-api-1.68.0.jar.sha1         |   1 -
 .../licenses/grpc-api-1.68.2.jar.sha1         |   1 +
 .../licenses/grpc-api-1.68.0.jar.sha1         |   1 -
 .../licenses/grpc-api-1.68.2.jar.sha1         |   1 +
 plugins/transport-grpc/build.gradle           | 168 +++++++++++
 .../error_prone_annotations-2.24.1.jar.sha1   |   1 +
 .../error_prone_annotations-LICENSE.txt       | 202 +++++++++++++
 .../error_prone_annotations-NOTICE.txt        |   0
 .../licenses/failureaccess-1.0.1.jar.sha1     |   1 +
 .../licenses/failureaccess-LICENSE.txt        | 202 +++++++++++++
 .../licenses/failureaccess-NOTICE.txt         |   0
 .../transport-grpc/licenses/grpc-LICENSE.txt  | 202 +++++++++++++
 .../transport-grpc/licenses/grpc-NOTICE.txt   |  62 ++++
 .../licenses/grpc-api-1.68.2.jar.sha1         |   1 +
 .../licenses/grpc-core-1.68.2.jar.sha1        |   1 +
 .../grpc-netty-shaded-1.68.2.jar.sha1         |   1 +
 .../licenses/grpc-protobuf-1.68.2.jar.sha1    |   1 +
 .../grpc-protobuf-lite-1.68.2.jar.sha1        |   1 +
 .../licenses/grpc-services-1.68.2.jar.sha1    |   1 +
 .../licenses/grpc-stub-1.68.2.jar.sha1        |   1 +
 .../licenses/grpc-util-1.68.2.jar.sha1        |   1 +
 .../licenses/guava-33.2.1-jre.jar.sha1        |   1 +
 .../transport-grpc/licenses/guava-LICENSE.txt | 202 +++++++++++++
 .../transport-grpc/licenses/guava-NOTICE.txt  |   0
 .../licenses/perfmark-api-0.26.0.jar.sha1     |   1 +
 .../licenses/perfmark-api-LICENSE.txt         | 201 +++++++++++++
 .../licenses/perfmark-api-NOTICE.txt          |  40 +++
 .../opensearch/transport/grpc/GrpcPlugin.java |  69 +++++
 .../grpc/Netty4GrpcServerTransport.java       | 277 ++++++++++++++++++
 .../transport/grpc/package-info.java          |  13 +
 .../plugin-metadata/plugin-security.policy    |  18 ++
 .../grpc/Netty4GrpcServerTransportTests.java  |  49 ++++
 .../org/opensearch/bootstrap/Security.java    |  29 ++
 .../common/network/NetworkModule.java         |  42 +++
 .../common/settings/ClusterSettings.java      |   2 +
 .../http/AbstractHttpServerTransport.java     |  63 ++--
 .../main/java/org/opensearch/node/Node.java   |   7 +
 .../org/opensearch/plugins/NetworkPlugin.java |  51 ++++
 .../opensearch/transport/TcpTransport.java    |  38 +--
 .../org/opensearch/transport/Transport.java   |  50 ++++
 .../AbstractHttpServerTransportTests.java     |  38 +--
 .../transport/PublishPortTests.java           |  38 +--
 44 files changed, 1958 insertions(+), 124 deletions(-)
 delete mode 100644 plugins/discovery-gce/licenses/grpc-api-1.68.0.jar.sha1
 create mode 100644 plugins/discovery-gce/licenses/grpc-api-1.68.2.jar.sha1
 delete mode 100644 plugins/repository-gcs/licenses/grpc-api-1.68.0.jar.sha1
 create mode 100644 plugins/repository-gcs/licenses/grpc-api-1.68.2.jar.sha1
 create mode 100644 plugins/transport-grpc/build.gradle
 create mode 100644 plugins/transport-grpc/licenses/error_prone_annotations-2.24.1.jar.sha1
 create mode 100644 plugins/transport-grpc/licenses/error_prone_annotations-LICENSE.txt
 create mode 100644 plugins/transport-grpc/licenses/error_prone_annotations-NOTICE.txt
 create mode 100644 plugins/transport-grpc/licenses/failureaccess-1.0.1.jar.sha1
 create mode 100644 plugins/transport-grpc/licenses/failureaccess-LICENSE.txt
 create mode 100644 plugins/transport-grpc/licenses/failureaccess-NOTICE.txt
 create mode 100644 plugins/transport-grpc/licenses/grpc-LICENSE.txt
 create mode 100644 plugins/transport-grpc/licenses/grpc-NOTICE.txt
 create mode 100644 plugins/transport-grpc/licenses/grpc-api-1.68.2.jar.sha1
 create mode 100644 plugins/transport-grpc/licenses/grpc-core-1.68.2.jar.sha1
 create mode 100644 plugins/transport-grpc/licenses/grpc-netty-shaded-1.68.2.jar.sha1
 create mode 100644 plugins/transport-grpc/licenses/grpc-protobuf-1.68.2.jar.sha1
 create mode 100644 plugins/transport-grpc/licenses/grpc-protobuf-lite-1.68.2.jar.sha1
 create mode 100644 plugins/transport-grpc/licenses/grpc-services-1.68.2.jar.sha1
 create mode 100644 plugins/transport-grpc/licenses/grpc-stub-1.68.2.jar.sha1
 create mode 100644 plugins/transport-grpc/licenses/grpc-util-1.68.2.jar.sha1
 create mode 100644 plugins/transport-grpc/licenses/guava-33.2.1-jre.jar.sha1
 create mode 100644 plugins/transport-grpc/licenses/guava-LICENSE.txt
 create mode 100644 plugins/transport-grpc/licenses/guava-NOTICE.txt
 create mode 100644 plugins/transport-grpc/licenses/perfmark-api-0.26.0.jar.sha1
 create mode 100644 plugins/transport-grpc/licenses/perfmark-api-LICENSE.txt
 create mode 100644 plugins/transport-grpc/licenses/perfmark-api-NOTICE.txt
 create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/GrpcPlugin.java
 create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/Netty4GrpcServerTransport.java
 create mode 100644 plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/package-info.java
 create mode 100644 plugins/transport-grpc/src/main/plugin-metadata/plugin-security.policy
 create mode 100644 plugins/transport-grpc/src/test/java/org/opensearch/transport/grpc/Netty4GrpcServerTransportTests.java

diff --git a/CHANGELOG.md b/CHANGELOG.md
index e9e57a09704b0..a7f99722dd584 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -25,6 +25,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 - Add search replica stats to segment replication stats API ([#16678](https://github.com/opensearch-project/OpenSearch/pull/16678))
 - Introduce a setting to disable download of full cluster state from remote on term mismatch([#16798](https://github.com/opensearch-project/OpenSearch/pull/16798/))
 - Added ability to retrieve value from DocValues in a flat_object filed([#16802](https://github.com/opensearch-project/OpenSearch/pull/16802))
+- Introduce framework for auxiliary transports and an experimental gRPC transport plugin ([#16534](https://github.com/opensearch-project/OpenSearch/pull/16534))
 
 ### Dependencies
 - Bump `com.google.cloud:google-cloud-core-http` from 2.23.0 to 2.47.0 ([#16504](https://github.com/opensearch-project/OpenSearch/pull/16504))
diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml
index ff0920f9d6057..f357fb248520c 100644
--- a/gradle/libs.versions.toml
+++ b/gradle/libs.versions.toml
@@ -27,7 +27,7 @@ google_http_client = "1.44.1"
 google_auth       = "1.29.0"
 tdigest           = "3.3"
 hdrhistogram      = "2.2.2"
-grpc              = "1.68.0"
+grpc              = "1.68.2"
 
 # when updating the JNA version, also update the version in buildSrc/build.gradle
 jna               = "5.13.0"
diff --git a/plugins/discovery-gce/licenses/grpc-api-1.68.0.jar.sha1 b/plugins/discovery-gce/licenses/grpc-api-1.68.0.jar.sha1
deleted file mode 100644
index bf45716c5b8ce..0000000000000
--- a/plugins/discovery-gce/licenses/grpc-api-1.68.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-9a9f25c58d8d5b0fcf37ae889a50fec87e34ac08
\ No newline at end of file
diff --git a/plugins/discovery-gce/licenses/grpc-api-1.68.2.jar.sha1 b/plugins/discovery-gce/licenses/grpc-api-1.68.2.jar.sha1
new file mode 100644
index 0000000000000..1844172dec982
--- /dev/null
+++ b/plugins/discovery-gce/licenses/grpc-api-1.68.2.jar.sha1
@@ -0,0 +1 @@
+a257a5dd25dda1c97a99b56d5b9c1e56c12ae554
\ No newline at end of file
diff --git a/plugins/repository-gcs/licenses/grpc-api-1.68.0.jar.sha1 b/plugins/repository-gcs/licenses/grpc-api-1.68.0.jar.sha1
deleted file mode 100644
index bf45716c5b8ce..0000000000000
--- a/plugins/repository-gcs/licenses/grpc-api-1.68.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-9a9f25c58d8d5b0fcf37ae889a50fec87e34ac08
\ No newline at end of file
diff --git a/plugins/repository-gcs/licenses/grpc-api-1.68.2.jar.sha1 b/plugins/repository-gcs/licenses/grpc-api-1.68.2.jar.sha1
new file mode 100644
index 0000000000000..1844172dec982
--- /dev/null
+++ b/plugins/repository-gcs/licenses/grpc-api-1.68.2.jar.sha1
@@ -0,0 +1 @@
+a257a5dd25dda1c97a99b56d5b9c1e56c12ae554
\ No newline at end of file
diff --git a/plugins/transport-grpc/build.gradle b/plugins/transport-grpc/build.gradle
new file mode 100644
index 0000000000000..47f62b2b8c3f3
--- /dev/null
+++ b/plugins/transport-grpc/build.gradle
@@ -0,0 +1,168 @@
+import org.gradle.api.attributes.java.TargetJvmEnvironment
+
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+opensearchplugin {
+  description 'gRPC based transport implementation'
+  classname 'org.opensearch.transport.grpc.GrpcPlugin'
+}
+
+dependencies {
+  compileOnly "com.google.code.findbugs:jsr305:3.0.2"
+  runtimeOnly "com.google.guava:guava:${versions.guava}"
+  implementation "com.google.errorprone:error_prone_annotations:2.24.1"
+  implementation "com.google.guava:failureaccess:1.0.1"
+  implementation "io.grpc:grpc-api:${versions.grpc}"
+  implementation "io.grpc:grpc-core:${versions.grpc}"
+  implementation "io.grpc:grpc-netty-shaded:${versions.grpc}"
+  implementation "io.grpc:grpc-protobuf-lite:${versions.grpc}"
+  implementation "io.grpc:grpc-protobuf:${versions.grpc}"
+  implementation "io.grpc:grpc-services:${versions.grpc}"
+  implementation "io.grpc:grpc-stub:${versions.grpc}"
+  implementation "io.grpc:grpc-util:${versions.grpc}"
+  implementation "io.perfmark:perfmark-api:0.26.0"
+}
+
+tasks.named("dependencyLicenses").configure {
+  mapping from: /grpc-.*/, to: 'grpc'
+}
+
+thirdPartyAudit {
+  ignoreMissingClasses(
+    'com.aayushatharva.brotli4j.Brotli4jLoader',
+    'com.aayushatharva.brotli4j.decoder.DecoderJNI$Status',
+    'com.aayushatharva.brotli4j.decoder.DecoderJNI$Wrapper',
+    'com.aayushatharva.brotli4j.encoder.BrotliEncoderChannel',
+    'com.aayushatharva.brotli4j.encoder.Encoder$Mode',
+    'com.aayushatharva.brotli4j.encoder.Encoder$Parameters',
+    // classes are missing
+
+    // from io.netty.logging.CommonsLoggerFactory (netty)
+    'org.apache.commons.logging.Log',
+    'org.apache.commons.logging.LogFactory',
+
+    // from Log4j (deliberate, Netty will fallback to Log4j 2)
+    'org.apache.log4j.Level',
+    'org.apache.log4j.Logger',
+
+    // from io.netty.handler.ssl.util.BouncyCastleSelfSignedCertGenerator (netty)
+    'org.bouncycastle.cert.X509v3CertificateBuilder',
+    'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter',
+    'org.bouncycastle.operator.jcajce.JcaContentSignerBuilder',
+    'org.bouncycastle.openssl.PEMEncryptedKeyPair',
+    'org.bouncycastle.openssl.PEMParser',
+    'org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter',
+    'org.bouncycastle.openssl.jcajce.JceOpenSSLPKCS8DecryptorProviderBuilder',
+    'org.bouncycastle.openssl.jcajce.JcePEMDecryptorProviderBuilder',
+    'org.bouncycastle.pkcs.PKCS8EncryptedPrivateKeyInfo',
+
+    // from io.netty.handler.ssl.JettyNpnSslEngine (netty)
+    'org.eclipse.jetty.npn.NextProtoNego$ClientProvider',
+    'org.eclipse.jetty.npn.NextProtoNego$ServerProvider',
+    'org.eclipse.jetty.npn.NextProtoNego',
+
+    // from io.netty.handler.codec.marshalling.ChannelBufferByteInput (netty)
+    'org.jboss.marshalling.ByteInput',
+
+    // from io.netty.handler.codec.marshalling.ChannelBufferByteOutput (netty)
+    'org.jboss.marshalling.ByteOutput',
+
+    // from io.netty.handler.codec.marshalling.CompatibleMarshallingEncoder (netty)
+    'org.jboss.marshalling.Marshaller',
+
+    // from io.netty.handler.codec.marshalling.ContextBoundUnmarshallerProvider (netty)
+    'org.jboss.marshalling.MarshallerFactory',
+    'org.jboss.marshalling.MarshallingConfiguration',
+    'org.jboss.marshalling.Unmarshaller',
+
+    // from io.netty.util.internal.logging.InternalLoggerFactory (netty) - it's optional
+    'org.slf4j.helpers.FormattingTuple',
+    'org.slf4j.helpers.MessageFormatter',
+    'org.slf4j.Logger',
+    'org.slf4j.LoggerFactory',
+    'org.slf4j.spi.LocationAwareLogger',
+
+    'com.google.gson.stream.JsonReader',
+    'com.google.gson.stream.JsonToken',
+    'com.google.protobuf.util.Durations',
+    'com.google.protobuf.util.Timestamps',
+    'com.google.protobuf.nano.CodedOutputByteBufferNano',
+    'com.google.protobuf.nano.MessageNano',
+    'com.google.rpc.Status',
+    'com.google.rpc.Status$Builder',
+    'com.ning.compress.BufferRecycler',
+    'com.ning.compress.lzf.ChunkDecoder',
+    'com.ning.compress.lzf.ChunkEncoder',
+    'com.ning.compress.lzf.LZFChunk',
+    'com.ning.compress.lzf.LZFEncoder',
+    'com.ning.compress.lzf.util.ChunkDecoderFactory',
+    'com.ning.compress.lzf.util.ChunkEncoderFactory',
+    'lzma.sdk.lzma.Encoder',
+    'net.jpountz.lz4.LZ4Compressor',
+    'net.jpountz.lz4.LZ4Factory',
+    'net.jpountz.lz4.LZ4FastDecompressor',
+    'net.jpountz.xxhash.XXHash32',
+    'net.jpountz.xxhash.XXHashFactory',
+    'org.eclipse.jetty.alpn.ALPN$ClientProvider',
+    'org.eclipse.jetty.alpn.ALPN$ServerProvider',
+    'org.eclipse.jetty.alpn.ALPN',
+
+    'org.conscrypt.AllocatedBuffer',
+    'org.conscrypt.BufferAllocator',
+    'org.conscrypt.Conscrypt',
+    'org.conscrypt.HandshakeListener',
+
+    'reactor.blockhound.BlockHound$Builder',
+    'reactor.blockhound.integration.BlockHoundIntegration'
+  )
+
+  ignoreViolations(
+    // uses internal java api: sun.misc.Unsafe
+    'com.google.common.cache.Striped64',
+    'com.google.common.cache.Striped64$1',
+    'com.google.common.cache.Striped64$Cell',
+    'com.google.common.hash.Striped64',
+    'com.google.common.hash.Striped64$1',
+    'com.google.common.hash.Striped64$Cell',
+    'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray',
+    'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$1',
+    'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$2',
+    'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper',
+    'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1',
+    'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator',
+    'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1',
+
+    'io.grpc.netty.shaded.io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator',
+    'io.grpc.netty.shaded.io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$1',
+    'io.grpc.netty.shaded.io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$2',
+    'io.grpc.netty.shaded.io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$3',
+    'io.grpc.netty.shaded.io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$4',
+    'io.grpc.netty.shaded.io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$5',
+    'io.grpc.netty.shaded.io.netty.util.internal.PlatformDependent0',
+    'io.grpc.netty.shaded.io.netty.util.internal.PlatformDependent0$1',
+    'io.grpc.netty.shaded.io.netty.util.internal.PlatformDependent0$2',
+    'io.grpc.netty.shaded.io.netty.util.internal.PlatformDependent0$3',
+    'io.grpc.netty.shaded.io.netty.util.internal.PlatformDependent0$4',
+    'io.grpc.netty.shaded.io.netty.util.internal.PlatformDependent0$6',
+    'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueConsumerNodeRef',
+    'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueProducerNodeRef',
+    'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueColdProducerFields',
+    'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueConsumerFields',
+    'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueProducerFields',
+    'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.LinkedQueueNode',
+    'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueConsumerIndexField',
+    'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueProducerIndexField',
+    'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField',
+    'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField',
+    'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField',
+    'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess',
+    'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.util.UnsafeLongArrayAccess',
+    'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess'
+  )
+}
diff --git a/plugins/transport-grpc/licenses/error_prone_annotations-2.24.1.jar.sha1 b/plugins/transport-grpc/licenses/error_prone_annotations-2.24.1.jar.sha1
new file mode 100644
index 0000000000000..67723f6f51248
--- /dev/null
+++ b/plugins/transport-grpc/licenses/error_prone_annotations-2.24.1.jar.sha1
@@ -0,0 +1 @@
+32b299e45105aa9b0df8279c74dc1edfcf313ff0
\ No newline at end of file
diff --git a/plugins/transport-grpc/licenses/error_prone_annotations-LICENSE.txt b/plugins/transport-grpc/licenses/error_prone_annotations-LICENSE.txt
new file mode 100644
index 0000000000000..d645695673349
--- /dev/null
+++ b/plugins/transport-grpc/licenses/error_prone_annotations-LICENSE.txt
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/plugins/transport-grpc/licenses/error_prone_annotations-NOTICE.txt b/plugins/transport-grpc/licenses/error_prone_annotations-NOTICE.txt
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/plugins/transport-grpc/licenses/failureaccess-1.0.1.jar.sha1 b/plugins/transport-grpc/licenses/failureaccess-1.0.1.jar.sha1
new file mode 100644
index 0000000000000..4798b37e20691
--- /dev/null
+++ b/plugins/transport-grpc/licenses/failureaccess-1.0.1.jar.sha1
@@ -0,0 +1 @@
+1dcf1de382a0bf95a3d8b0849546c88bac1292c9
\ No newline at end of file
diff --git a/plugins/transport-grpc/licenses/failureaccess-LICENSE.txt b/plugins/transport-grpc/licenses/failureaccess-LICENSE.txt
new file mode 100644
index 0000000000000..d645695673349
--- /dev/null
+++ b/plugins/transport-grpc/licenses/failureaccess-LICENSE.txt
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/plugins/transport-grpc/licenses/failureaccess-NOTICE.txt b/plugins/transport-grpc/licenses/failureaccess-NOTICE.txt
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/plugins/transport-grpc/licenses/grpc-LICENSE.txt b/plugins/transport-grpc/licenses/grpc-LICENSE.txt
new file mode 100644
index 0000000000000..d645695673349
--- /dev/null
+++ b/plugins/transport-grpc/licenses/grpc-LICENSE.txt
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/plugins/transport-grpc/licenses/grpc-NOTICE.txt b/plugins/transport-grpc/licenses/grpc-NOTICE.txt
new file mode 100644
index 0000000000000..f70c5620cf75a
--- /dev/null
+++ b/plugins/transport-grpc/licenses/grpc-NOTICE.txt
@@ -0,0 +1,62 @@
+Copyright 2014 The gRPC Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+-----------------------------------------------------------------------
+
+This product contains a modified portion of 'OkHttp', an open source
+HTTP & SPDY client for Android and Java applications, which can be obtained
+at:
+
+  * LICENSE:
+    * okhttp/third_party/okhttp/LICENSE (Apache License 2.0)
+  * HOMEPAGE:
+    * https://github.com/square/okhttp
+  * LOCATION_IN_GRPC:
+    * okhttp/third_party/okhttp
+
+This product contains a modified portion of 'Envoy', an open source
+cloud-native high-performance edge/middle/service proxy, which can be
+obtained at:
+
+  * LICENSE:
+    * xds/third_party/envoy/LICENSE (Apache License 2.0)
+  * NOTICE:
+    * xds/third_party/envoy/NOTICE
+  * HOMEPAGE:
+    * https://www.envoyproxy.io
+  * LOCATION_IN_GRPC:
+    * xds/third_party/envoy
+
+This product contains a modified portion of 'protoc-gen-validate (PGV)',
+an open source protoc plugin to generate polyglot message validators,
+which can be obtained at:
+
+  * LICENSE:
+    * xds/third_party/protoc-gen-validate/LICENSE (Apache License 2.0)
+  * NOTICE:
+      * xds/third_party/protoc-gen-validate/NOTICE
+  * HOMEPAGE:
+    * https://github.com/envoyproxy/protoc-gen-validate
+  * LOCATION_IN_GRPC:
+    * xds/third_party/protoc-gen-validate
+
+This product contains a modified portion of 'udpa',
+an open source universal data plane API, which can be obtained at:
+
+  * LICENSE:
+    * xds/third_party/udpa/LICENSE (Apache License 2.0)
+  * HOMEPAGE:
+    * https://github.com/cncf/udpa
+  * LOCATION_IN_GRPC:
+    * xds/third_party/udpa
diff --git a/plugins/transport-grpc/licenses/grpc-api-1.68.2.jar.sha1 b/plugins/transport-grpc/licenses/grpc-api-1.68.2.jar.sha1
new file mode 100644
index 0000000000000..1844172dec982
--- /dev/null
+++ b/plugins/transport-grpc/licenses/grpc-api-1.68.2.jar.sha1
@@ -0,0 +1 @@
+a257a5dd25dda1c97a99b56d5b9c1e56c12ae554
\ No newline at end of file
diff --git a/plugins/transport-grpc/licenses/grpc-core-1.68.2.jar.sha1 b/plugins/transport-grpc/licenses/grpc-core-1.68.2.jar.sha1
new file mode 100644
index 0000000000000..e20345d29e914
--- /dev/null
+++ b/plugins/transport-grpc/licenses/grpc-core-1.68.2.jar.sha1
@@ -0,0 +1 @@
+b0fd51a1c029785d1c9ae2cfc80a296b60dfcfdb
\ No newline at end of file
diff --git a/plugins/transport-grpc/licenses/grpc-netty-shaded-1.68.2.jar.sha1 b/plugins/transport-grpc/licenses/grpc-netty-shaded-1.68.2.jar.sha1
new file mode 100644
index 0000000000000..53fa705a66129
--- /dev/null
+++ b/plugins/transport-grpc/licenses/grpc-netty-shaded-1.68.2.jar.sha1
@@ -0,0 +1 @@
+8ea4186fbdcc5432664364ed53e03cf0d458c3ec
\ No newline at end of file
diff --git a/plugins/transport-grpc/licenses/grpc-protobuf-1.68.2.jar.sha1 b/plugins/transport-grpc/licenses/grpc-protobuf-1.68.2.jar.sha1
new file mode 100644
index 0000000000000..e861b41837f33
--- /dev/null
+++ b/plugins/transport-grpc/licenses/grpc-protobuf-1.68.2.jar.sha1
@@ -0,0 +1 @@
+35b28e0d57874021cd31e76dd4a795f76a82471e
\ No newline at end of file
diff --git a/plugins/transport-grpc/licenses/grpc-protobuf-lite-1.68.2.jar.sha1 b/plugins/transport-grpc/licenses/grpc-protobuf-lite-1.68.2.jar.sha1
new file mode 100644
index 0000000000000..b2401f9752829
--- /dev/null
+++ b/plugins/transport-grpc/licenses/grpc-protobuf-lite-1.68.2.jar.sha1
@@ -0,0 +1 @@
+a53064b896adcfefe74362a33e111492351dfc03
\ No newline at end of file
diff --git a/plugins/transport-grpc/licenses/grpc-services-1.68.2.jar.sha1 b/plugins/transport-grpc/licenses/grpc-services-1.68.2.jar.sha1
new file mode 100644
index 0000000000000..c4edf923791e5
--- /dev/null
+++ b/plugins/transport-grpc/licenses/grpc-services-1.68.2.jar.sha1
@@ -0,0 +1 @@
+6c2a0b0640544b9010a42bcf76f2791116a75c9d
\ No newline at end of file
diff --git a/plugins/transport-grpc/licenses/grpc-stub-1.68.2.jar.sha1 b/plugins/transport-grpc/licenses/grpc-stub-1.68.2.jar.sha1
new file mode 100644
index 0000000000000..118464f8f48ff
--- /dev/null
+++ b/plugins/transport-grpc/licenses/grpc-stub-1.68.2.jar.sha1
@@ -0,0 +1 @@
+d58ee1cf723b4b5536d44b67e328c163580a8d98
\ No newline at end of file
diff --git a/plugins/transport-grpc/licenses/grpc-util-1.68.2.jar.sha1 b/plugins/transport-grpc/licenses/grpc-util-1.68.2.jar.sha1
new file mode 100644
index 0000000000000..c3261b012e502
--- /dev/null
+++ b/plugins/transport-grpc/licenses/grpc-util-1.68.2.jar.sha1
@@ -0,0 +1 @@
+2d195570e9256d1357d584146a8e6b19587d4044
\ No newline at end of file
diff --git a/plugins/transport-grpc/licenses/guava-33.2.1-jre.jar.sha1 b/plugins/transport-grpc/licenses/guava-33.2.1-jre.jar.sha1
new file mode 100644
index 0000000000000..27d5304e326df
--- /dev/null
+++ b/plugins/transport-grpc/licenses/guava-33.2.1-jre.jar.sha1
@@ -0,0 +1 @@
+818e780da2c66c63bbb6480fef1f3855eeafa3e4
\ No newline at end of file
diff --git a/plugins/transport-grpc/licenses/guava-LICENSE.txt b/plugins/transport-grpc/licenses/guava-LICENSE.txt
new file mode 100644
index 0000000000000..d645695673349
--- /dev/null
+++ b/plugins/transport-grpc/licenses/guava-LICENSE.txt
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/plugins/transport-grpc/licenses/guava-NOTICE.txt b/plugins/transport-grpc/licenses/guava-NOTICE.txt
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/plugins/transport-grpc/licenses/perfmark-api-0.26.0.jar.sha1 b/plugins/transport-grpc/licenses/perfmark-api-0.26.0.jar.sha1
new file mode 100644
index 0000000000000..abf1becd13298
--- /dev/null
+++ b/plugins/transport-grpc/licenses/perfmark-api-0.26.0.jar.sha1
@@ -0,0 +1 @@
+ef65452adaf20bf7d12ef55913aba24037b82738
\ No newline at end of file
diff --git a/plugins/transport-grpc/licenses/perfmark-api-LICENSE.txt b/plugins/transport-grpc/licenses/perfmark-api-LICENSE.txt
new file mode 100644
index 0000000000000..261eeb9e9f8b2
--- /dev/null
+++ b/plugins/transport-grpc/licenses/perfmark-api-LICENSE.txt
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/plugins/transport-grpc/licenses/perfmark-api-NOTICE.txt b/plugins/transport-grpc/licenses/perfmark-api-NOTICE.txt
new file mode 100644
index 0000000000000..7d74b6569cf64
--- /dev/null
+++ b/plugins/transport-grpc/licenses/perfmark-api-NOTICE.txt
@@ -0,0 +1,40 @@
+Copyright 2019 Google LLC
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+-----------------------------------------------------------------------
+
+This product contains a modified portion of 'Catapult', an open source
+Trace Event viewer for Chome, Linux, and Android applications, which can
+be obtained at:
+
+  * LICENSE:
+    * traceviewer/src/main/resources/io/perfmark/traceviewer/third_party/catapult/LICENSE (New BSD License)
+  * HOMEPAGE:
+    * https://github.com/catapult-project/catapult
+
+This product contains a modified portion of 'Polymer', a library for Web
+Components, which can be obtained at:
+  * LICENSE:
+    * traceviewer/src/main/resources/io/perfmark/traceviewer/third_party/polymer/LICENSE (New BSD License)
+  * HOMEPAGE:
+    * https://github.com/Polymer/polymer
+
+
+This product contains a modified portion of 'ASM', an open source
+Java Bytecode library, which can be obtained at:
+
+  * LICENSE:
+    * agent/src/main/resources/io/perfmark/agent/third_party/asm/LICENSE (BSD style License)
+  * HOMEPAGE:
+    * https://asm.ow2.io/
diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/GrpcPlugin.java b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/GrpcPlugin.java
new file mode 100644
index 0000000000000..0a464e135350b
--- /dev/null
+++ b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/GrpcPlugin.java
@@ -0,0 +1,69 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+package org.opensearch.transport.grpc;
+
+import org.opensearch.common.network.NetworkService;
+import org.opensearch.common.settings.ClusterSettings;
+import org.opensearch.common.settings.Setting;
+import org.opensearch.common.settings.Settings;
+import org.opensearch.core.indices.breaker.CircuitBreakerService;
+import org.opensearch.plugins.NetworkPlugin;
+import org.opensearch.plugins.Plugin;
+import org.opensearch.telemetry.tracing.Tracer;
+import org.opensearch.threadpool.ThreadPool;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.function.Supplier;
+
+import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.GRPC_TRANSPORT_SETTING_KEY;
+import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_BIND_HOST;
+import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_HOST;
+import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PORTS;
+import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PUBLISH_HOST;
+import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PUBLISH_PORT;
+import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_WORKER_COUNT;
+
+/**
+ * Main class for the gRPC plugin.
+ */
+public final class GrpcPlugin extends Plugin implements NetworkPlugin {
+
+    /**
+     * Creates a new GrpcPlugin instance.
+     */
+    public GrpcPlugin() {}
+
+    @Override
+    public Map<String, Supplier<AuxTransport>> getAuxTransports(
+        Settings settings,
+        ThreadPool threadPool,
+        CircuitBreakerService circuitBreakerService,
+        NetworkService networkService,
+        ClusterSettings clusterSettings,
+        Tracer tracer
+    ) {
+        return Collections.singletonMap(
+            GRPC_TRANSPORT_SETTING_KEY,
+            () -> new Netty4GrpcServerTransport(settings, Collections.emptyList(), networkService)
+        );
+    }
+
+    @Override
+    public List<Setting<?>> getSettings() {
+        return List.of(
+            SETTING_GRPC_PORTS,
+            SETTING_GRPC_HOST,
+            SETTING_GRPC_PUBLISH_HOST,
+            SETTING_GRPC_BIND_HOST,
+            SETTING_GRPC_WORKER_COUNT,
+            SETTING_GRPC_PUBLISH_PORT
+        );
+    }
+}
diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/Netty4GrpcServerTransport.java b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/Netty4GrpcServerTransport.java
new file mode 100644
index 0000000000000..61c0722772b92
--- /dev/null
+++ b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/Netty4GrpcServerTransport.java
@@ -0,0 +1,277 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.transport.grpc;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.opensearch.common.network.NetworkService;
+import org.opensearch.common.settings.Setting;
+import org.opensearch.common.settings.Settings;
+import org.opensearch.common.transport.PortsRange;
+import org.opensearch.common.util.concurrent.OpenSearchExecutors;
+import org.opensearch.core.common.Strings;
+import org.opensearch.core.common.transport.BoundTransportAddress;
+import org.opensearch.core.common.transport.TransportAddress;
+import org.opensearch.plugins.NetworkPlugin;
+import org.opensearch.transport.BindTransportException;
+
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Objects;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.Function;
+
+import io.grpc.BindableService;
+import io.grpc.InsecureServerCredentials;
+import io.grpc.Server;
+import io.grpc.netty.shaded.io.grpc.netty.NettyServerBuilder;
+import io.grpc.netty.shaded.io.netty.channel.EventLoopGroup;
+import io.grpc.netty.shaded.io.netty.channel.nio.NioEventLoopGroup;
+import io.grpc.netty.shaded.io.netty.channel.socket.nio.NioServerSocketChannel;
+import io.grpc.protobuf.services.HealthStatusManager;
+import io.grpc.protobuf.services.ProtoReflectionService;
+
+import static java.util.Collections.emptyList;
+import static org.opensearch.common.settings.Setting.intSetting;
+import static org.opensearch.common.settings.Setting.listSetting;
+import static org.opensearch.common.util.concurrent.OpenSearchExecutors.daemonThreadFactory;
+import static org.opensearch.transport.Transport.resolveTransportPublishPort;
+
+/**
+ * Netty4 gRPC server implemented as a LifecycleComponent.
+ * Services injected through BindableService list.
+ */
+public class Netty4GrpcServerTransport extends NetworkPlugin.AuxTransport {
+    private static final Logger logger = LogManager.getLogger(Netty4GrpcServerTransport.class);
+
+    /**
+     * Type key for configuring settings of this auxiliary transport.
+     */
+    public static final String GRPC_TRANSPORT_SETTING_KEY = "experimental-transport-grpc";
+
+    /**
+     * Port range on which to bind.
+     * Note this setting is configured through AffixSetting AUX_TRANSPORT_PORTS where the aux transport type matches the GRPC_TRANSPORT_SETTING_KEY.
+     */
+    public static final Setting<PortsRange> SETTING_GRPC_PORTS = AUX_TRANSPORT_PORTS.getConcreteSettingForNamespace(
+        GRPC_TRANSPORT_SETTING_KEY
+    );
+
+    /**
+     * Port published to peers for this server.
+     */
+    public static final Setting<Integer> SETTING_GRPC_PUBLISH_PORT = intSetting("grpc.publish_port", -1, -1, Setting.Property.NodeScope);
+
+    /**
+     * Host list to bind and publish.
+     * For distinct bind/publish hosts configure SETTING_GRPC_BIND_HOST + SETTING_GRPC_PUBLISH_HOST separately.
+     */
+    public static final Setting<List<String>> SETTING_GRPC_HOST = listSetting(
+        "grpc.host",
+        emptyList(),
+        Function.identity(),
+        Setting.Property.NodeScope
+    );
+
+    /**
+     * Host list to bind.
+     */
+    public static final Setting<List<String>> SETTING_GRPC_BIND_HOST = listSetting(
+        "grpc.bind_host",
+        SETTING_GRPC_HOST,
+        Function.identity(),
+        Setting.Property.NodeScope
+    );
+
+    /**
+     * Host list published to peers.
+     */
+    public static final Setting<List<String>> SETTING_GRPC_PUBLISH_HOST = listSetting(
+        "grpc.publish_host",
+        SETTING_GRPC_HOST,
+        Function.identity(),
+        Setting.Property.NodeScope
+    );
+
+    /**
+     * Configure size of thread pool backing this transport server.
+     */
+    public static final Setting<Integer> SETTING_GRPC_WORKER_COUNT = new Setting<>(
+        "grpc.netty.worker_count",
+        (s) -> Integer.toString(OpenSearchExecutors.allocatedProcessors(s)),
+        (s) -> Setting.parseInt(s, 1, "grpc.netty.worker_count"),
+        Setting.Property.NodeScope
+    );
+
+    private final Settings settings;
+    private final NetworkService networkService;
+    private final List<BindableService> services;
+    private final CopyOnWriteArrayList<Server> servers = new CopyOnWriteArrayList<>();
+    private final String[] bindHosts;
+    private final String[] publishHosts;
+    private final PortsRange port;
+    private final int nettyEventLoopThreads;
+
+    private volatile BoundTransportAddress boundAddress;
+    private volatile EventLoopGroup eventLoopGroup;
+
+    /**
+     * Creates a new Netty4GrpcServerTransport instance.
+     * @param settings the configured settings.
+     * @param services the gRPC compatible services to be registered with the server.
+     * @param networkService the bind/publish addresses.
+     */
+    public Netty4GrpcServerTransport(Settings settings, List<BindableService> services, NetworkService networkService) {
+        this.settings = Objects.requireNonNull(settings);
+        this.services = Objects.requireNonNull(services);
+        this.networkService = Objects.requireNonNull(networkService);
+
+        final List<String> httpBindHost = SETTING_GRPC_BIND_HOST.get(settings);
+        this.bindHosts = (httpBindHost.isEmpty() ? NetworkService.GLOBAL_NETWORK_BIND_HOST_SETTING.get(settings) : httpBindHost).toArray(
+            Strings.EMPTY_ARRAY
+        );
+
+        final List<String> httpPublishHost = SETTING_GRPC_PUBLISH_HOST.get(settings);
+        this.publishHosts = (httpPublishHost.isEmpty() ? NetworkService.GLOBAL_NETWORK_PUBLISH_HOST_SETTING.get(settings) : httpPublishHost)
+            .toArray(Strings.EMPTY_ARRAY);
+
+        this.port = SETTING_GRPC_PORTS.get(settings);
+        this.nettyEventLoopThreads = SETTING_GRPC_WORKER_COUNT.get(settings);
+    }
+
+    BoundTransportAddress boundAddress() {
+        return this.boundAddress;
+    }
+
+    @Override
+    protected void doStart() {
+        boolean success = false;
+        try {
+            this.eventLoopGroup = new NioEventLoopGroup(nettyEventLoopThreads, daemonThreadFactory(settings, "grpc_event_loop"));
+            bindServer();
+            success = true;
+            logger.info("Started gRPC server on port {}", port);
+        } finally {
+            if (!success) {
+                doStop();
+            }
+        }
+    }
+
+    @Override
+    protected void doStop() {
+        for (Server server : servers) {
+            if (server != null) {
+                server.shutdown();
+                try {
+                    server.awaitTermination(30, TimeUnit.SECONDS);
+                } catch (InterruptedException e) {
+                    Thread.currentThread().interrupt();
+                    logger.warn("Interrupted while shutting down gRPC server");
+                } finally {
+                    server.shutdownNow();
+                }
+            }
+        }
+        if (eventLoopGroup != null) {
+            try {
+                eventLoopGroup.shutdownGracefully(0, 10, TimeUnit.SECONDS).await();
+            } catch (InterruptedException e) {
+                Thread.currentThread().interrupt();
+                logger.warn("Failed to shut down event loop group");
+            }
+        }
+    }
+
+    @Override
+    protected void doClose() {
+
+    }
+
+    private void bindServer() {
+        InetAddress[] hostAddresses;
+        try {
+            hostAddresses = networkService.resolveBindHostAddresses(bindHosts);
+        } catch (IOException e) {
+            throw new BindTransportException("Failed to resolve host [" + Arrays.toString(bindHosts) + "]", e);
+        }
+
+        List<TransportAddress> boundAddresses = new ArrayList<>(hostAddresses.length);
+        for (InetAddress address : hostAddresses) {
+            boundAddresses.add(bindAddress(address, port));
+        }
+
+        final InetAddress publishInetAddress;
+        try {
+            publishInetAddress = networkService.resolvePublishHostAddresses(publishHosts);
+        } catch (Exception e) {
+            throw new BindTransportException("Failed to resolve publish address", e);
+        }
+
+        final int publishPort = resolveTransportPublishPort(SETTING_GRPC_PUBLISH_PORT.get(settings), boundAddresses, publishInetAddress);
+        if (publishPort < 0) {
+            throw new BindTransportException(
+                "Failed to auto-resolve grpc publish port, multiple bound addresses "
+                    + boundAddresses
+                    + " with distinct ports and none of them matched the publish address ("
+                    + publishInetAddress
+                    + "). "
+                    + "Please specify a unique port by setting "
+                    + SETTING_GRPC_PORTS.getKey()
+                    + " or "
+                    + SETTING_GRPC_PUBLISH_PORT.getKey()
+            );
+        }
+
+        TransportAddress publishAddress = new TransportAddress(new InetSocketAddress(publishInetAddress, publishPort));
+        this.boundAddress = new BoundTransportAddress(boundAddresses.toArray(new TransportAddress[0]), publishAddress);
+        logger.info("{}", boundAddress);
+    }
+
+    private TransportAddress bindAddress(InetAddress hostAddress, PortsRange portRange) {
+        AtomicReference<Exception> lastException = new AtomicReference<>();
+        AtomicReference<TransportAddress> addr = new AtomicReference<>();
+
+        boolean success = portRange.iterate(portNumber -> {
+            try {
+
+                final InetSocketAddress address = new InetSocketAddress(hostAddress, portNumber);
+                final NettyServerBuilder serverBuilder = NettyServerBuilder.forAddress(address, InsecureServerCredentials.create())
+                    .bossEventLoopGroup(eventLoopGroup)
+                    .workerEventLoopGroup(eventLoopGroup)
+                    .channelType(NioServerSocketChannel.class)
+                    .addService(new HealthStatusManager().getHealthService())
+                    .addService(ProtoReflectionService.newInstance());
+
+                services.forEach(serverBuilder::addService);
+
+                Server srv = serverBuilder.build().start();
+                servers.add(srv);
+                addr.set(new TransportAddress(hostAddress, portNumber));
+                logger.debug("Bound gRPC to address {{}}", address);
+                return true;
+            } catch (Exception e) {
+                lastException.set(e);
+                return false;
+            }
+        });
+
+        if (!success) {
+            throw new RuntimeException("Failed to bind to " + hostAddress + " on ports " + portRange, lastException.get());
+        }
+
+        return addr.get();
+    }
+}
diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/package-info.java b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/package-info.java
new file mode 100644
index 0000000000000..4a5d9d02b5b91
--- /dev/null
+++ b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/package-info.java
@@ -0,0 +1,13 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+/**
+ * gRPC transport implementation for OpenSearch.
+ * Provides network communication using the gRPC protocol.
+ */
+package org.opensearch.transport.grpc;
diff --git a/plugins/transport-grpc/src/main/plugin-metadata/plugin-security.policy b/plugins/transport-grpc/src/main/plugin-metadata/plugin-security.policy
new file mode 100644
index 0000000000000..398de576b6c5a
--- /dev/null
+++ b/plugins/transport-grpc/src/main/plugin-metadata/plugin-security.policy
@@ -0,0 +1,18 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+grant codeBase "${codebase.grpc-netty-shaded}" {
+   // for reading the system-wide configuration for the backlog of established sockets
+   permission java.io.FilePermission "/proc/sys/net/core/somaxconn", "read";
+
+   // netty makes and accepts socket connections
+   permission java.net.SocketPermission "*", "accept,connect";
+
+   // Netty sets custom classloader for some of its internal threads
+   permission java.lang.RuntimePermission "*", "setContextClassLoader";
+};
diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/transport/grpc/Netty4GrpcServerTransportTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/transport/grpc/Netty4GrpcServerTransportTests.java
new file mode 100644
index 0000000000000..ebeff62c2c23c
--- /dev/null
+++ b/plugins/transport-grpc/src/test/java/org/opensearch/transport/grpc/Netty4GrpcServerTransportTests.java
@@ -0,0 +1,49 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.transport.grpc;
+
+import org.opensearch.common.network.NetworkService;
+import org.opensearch.common.settings.Settings;
+import org.opensearch.test.OpenSearchTestCase;
+import org.hamcrest.MatcherAssert;
+import org.junit.Before;
+
+import java.util.List;
+
+import io.grpc.BindableService;
+
+import static org.hamcrest.Matchers.emptyArray;
+import static org.hamcrest.Matchers.not;
+
+public class Netty4GrpcServerTransportTests extends OpenSearchTestCase {
+
+    private NetworkService networkService;
+    private List<BindableService> services;
+
+    @Before
+    public void setup() {
+        networkService = new NetworkService(List.of());
+        services = List.of();
+    }
+
+    public void test() {
+        try (Netty4GrpcServerTransport transport = new Netty4GrpcServerTransport(createSettings(), services, networkService)) {
+            transport.start();
+
+            MatcherAssert.assertThat(transport.boundAddress().boundAddresses(), not(emptyArray()));
+            assertNotNull(transport.boundAddress().publishAddress().address());
+
+            transport.stop();
+        }
+    }
+
+    private static Settings createSettings() {
+        return Settings.builder().put(Netty4GrpcServerTransport.SETTING_GRPC_PORTS.getKey(), getPortRange()).build();
+    }
+}
diff --git a/server/src/main/java/org/opensearch/bootstrap/Security.java b/server/src/main/java/org/opensearch/bootstrap/Security.java
index 53b1d990f9a0c..9f1dcbe8fb587 100644
--- a/server/src/main/java/org/opensearch/bootstrap/Security.java
+++ b/server/src/main/java/org/opensearch/bootstrap/Security.java
@@ -35,7 +35,9 @@
 import org.opensearch.cli.Command;
 import org.opensearch.common.SuppressForbidden;
 import org.opensearch.common.io.PathUtils;
+import org.opensearch.common.settings.Setting;
 import org.opensearch.common.settings.Settings;
+import org.opensearch.common.transport.PortsRange;
 import org.opensearch.env.Environment;
 import org.opensearch.http.HttpTransportSettings;
 import org.opensearch.plugins.PluginInfo;
@@ -71,6 +73,9 @@
 
 import static org.opensearch.bootstrap.FilePermissionUtils.addDirectoryPath;
 import static org.opensearch.bootstrap.FilePermissionUtils.addSingleFilePath;
+import static org.opensearch.plugins.NetworkPlugin.AuxTransport.AUX_PORT_DEFAULTS;
+import static org.opensearch.plugins.NetworkPlugin.AuxTransport.AUX_TRANSPORT_PORTS;
+import static org.opensearch.plugins.NetworkPlugin.AuxTransport.AUX_TRANSPORT_TYPES_SETTING;
 
 /**
  * Initializes SecurityManager with necessary permissions.
@@ -402,6 +407,7 @@ static void addFilePermissions(Permissions policy, Environment environment) thro
     private static void addBindPermissions(Permissions policy, Settings settings) {
         addSocketPermissionForHttp(policy, settings);
         addSocketPermissionForTransportProfiles(policy, settings);
+        addSocketPermissionForAux(policy, settings);
     }
 
     /**
@@ -416,6 +422,29 @@ private static void addSocketPermissionForHttp(final Permissions policy, final S
         addSocketPermissionForPortRange(policy, httpRange);
     }
 
+    /**
+     * Add dynamic {@link SocketPermission} based on AffixSetting AUX_TRANSPORT_PORTS.
+     * If an auxiliary transport type is enabled but has no corresponding port range setting fall back to AUX_PORT_DEFAULTS.
+     *
+     * @param policy the {@link Permissions} instance to apply the dynamic {@link SocketPermission}s to.
+     * @param settings the {@link Settings} instance to read the gRPC settings from
+     */
+    private static void addSocketPermissionForAux(final Permissions policy, final Settings settings) {
+        Set<PortsRange> portsRanges = new HashSet<>();
+        for (String auxType : AUX_TRANSPORT_TYPES_SETTING.get(settings)) {
+            Setting<PortsRange> auxTypePortSettings = AUX_TRANSPORT_PORTS.getConcreteSettingForNamespace(auxType);
+            if (auxTypePortSettings.exists(settings)) {
+                portsRanges.add(auxTypePortSettings.get(settings));
+            } else {
+                portsRanges.add(new PortsRange(AUX_PORT_DEFAULTS));
+            }
+        }
+
+        for (PortsRange portRange : portsRanges) {
+            addSocketPermissionForPortRange(policy, portRange.getPortRangeString());
+        }
+    }
+
     /**
      * Add dynamic {@link SocketPermission} based on transport settings. This method will first check if there is a port range specified in
      * the transport profile specified by {@code profileSettings} and will fall back to {@code settings}.
diff --git a/server/src/main/java/org/opensearch/common/network/NetworkModule.java b/server/src/main/java/org/opensearch/common/network/NetworkModule.java
index bb8da190a6f35..5d55fb52c323d 100644
--- a/server/src/main/java/org/opensearch/common/network/NetworkModule.java
+++ b/server/src/main/java/org/opensearch/common/network/NetworkModule.java
@@ -80,6 +80,9 @@
 import java.util.function.Supplier;
 import java.util.stream.Collectors;
 
+import static org.opensearch.plugins.NetworkPlugin.AuxTransport.AUX_TRANSPORT_TYPES_KEY;
+import static org.opensearch.plugins.NetworkPlugin.AuxTransport.AUX_TRANSPORT_TYPES_SETTING;
+
 /**
  * A module to handle registering and binding all network related classes.
  *
@@ -157,6 +160,8 @@ public final class NetworkModule {
 
     private final Map<String, Supplier<Transport>> transportFactories = new HashMap<>();
     private final Map<String, Supplier<HttpServerTransport>> transportHttpFactories = new HashMap<>();
+    private final Map<String, Supplier<NetworkPlugin.AuxTransport>> transportAuxFactories = new HashMap<>();
+
     private final List<TransportInterceptor> transportInterceptors = new ArrayList<>();
 
     /**
@@ -222,6 +227,18 @@ public NetworkModule(
                 registerHttpTransport(entry.getKey(), entry.getValue());
             }
 
+            Map<String, Supplier<NetworkPlugin.AuxTransport>> auxTransportFactory = plugin.getAuxTransports(
+                settings,
+                threadPool,
+                circuitBreakerService,
+                networkService,
+                clusterSettings,
+                tracer
+            );
+            for (Map.Entry<String, Supplier<NetworkPlugin.AuxTransport>> entry : auxTransportFactory.entrySet()) {
+                registerAuxTransport(entry.getKey(), entry.getValue());
+            }
+
             Map<String, Supplier<Transport>> transportFactory = plugin.getTransports(
                 settings,
                 threadPool,
@@ -305,6 +322,12 @@ private void registerHttpTransport(String key, Supplier<HttpServerTransport> fac
         }
     }
 
+    private void registerAuxTransport(String key, Supplier<NetworkPlugin.AuxTransport> factory) {
+        if (transportAuxFactories.putIfAbsent(key, factory) != null) {
+            throw new IllegalArgumentException("transport for name: " + key + " is already registered");
+        }
+    }
+
     /**
      * Register an allocation command.
      * <p>
@@ -346,6 +369,25 @@ public Supplier<HttpServerTransport> getHttpServerTransportSupplier() {
         return factory;
     }
 
+    /**
+     * Optional client/server transports that run in parallel to HttpServerTransport.
+     * Multiple transport types can be registered and enabled via AUX_TRANSPORT_TYPES_SETTING.
+     * An IllegalStateException is thrown if a transport type is enabled not registered.
+     */
+    public List<NetworkPlugin.AuxTransport> getAuxServerTransportList() {
+        List<NetworkPlugin.AuxTransport> serverTransportSuppliers = new ArrayList<>();
+
+        for (String transportType : AUX_TRANSPORT_TYPES_SETTING.get(settings)) {
+            final Supplier<NetworkPlugin.AuxTransport> factory = transportAuxFactories.get(transportType);
+            if (factory == null) {
+                throw new IllegalStateException("Unsupported " + AUX_TRANSPORT_TYPES_KEY + " [" + transportType + "]");
+            }
+            serverTransportSuppliers.add(factory.get());
+        }
+
+        return serverTransportSuppliers;
+    }
+
     public Supplier<Transport> getTransportSupplier() {
         final String name;
         if (TRANSPORT_TYPE_SETTING.exists(settings)) {
diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java
index 007a68b8eaa29..c27efa080ac4e 100644
--- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java
+++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java
@@ -149,6 +149,7 @@
 import org.opensearch.node.resource.tracker.ResourceTrackerSettings;
 import org.opensearch.persistent.PersistentTasksClusterService;
 import org.opensearch.persistent.decider.EnableAssignmentDecider;
+import org.opensearch.plugins.NetworkPlugin;
 import org.opensearch.plugins.PluginsService;
 import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlSettings;
 import org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings;
@@ -362,6 +363,7 @@ public void apply(Settings value, Settings current, Settings previous) {
                 NetworkModule.TRANSPORT_SSL_DUAL_MODE_ENABLED,
                 NetworkModule.TRANSPORT_SSL_ENFORCE_HOSTNAME_VERIFICATION,
                 NetworkModule.TRANSPORT_SSL_ENFORCE_HOSTNAME_VERIFICATION_RESOLVE_HOST_NAME,
+                NetworkPlugin.AuxTransport.AUX_TRANSPORT_TYPES_SETTING,
                 HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS,
                 HttpTransportSettings.SETTING_CORS_ENABLED,
                 HttpTransportSettings.SETTING_CORS_MAX_AGE,
diff --git a/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java b/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java
index 991fbf12072be..7f78ae0b9d2ff 100644
--- a/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java
+++ b/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java
@@ -62,6 +62,7 @@
 import org.opensearch.telemetry.tracing.channels.TraceableRestChannel;
 import org.opensearch.threadpool.ThreadPool;
 import org.opensearch.transport.BindTransportException;
+import org.opensearch.transport.Transport;
 
 import java.io.IOException;
 import java.net.InetAddress;
@@ -71,7 +72,6 @@
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
-import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -192,7 +192,25 @@ protected void bindServer() {
             throw new BindTransportException("Failed to resolve publish address", e);
         }
 
-        final int publishPort = resolvePublishPort(settings, boundAddresses, publishInetAddress);
+        final int publishPort = Transport.resolveTransportPublishPort(
+            SETTING_HTTP_PUBLISH_PORT.get(settings),
+            boundAddresses,
+            publishInetAddress
+        );
+        if (publishPort < 0) {
+            throw new BindHttpException(
+                "Failed to auto-resolve http publish port, multiple bound addresses "
+                    + boundAddresses
+                    + " with distinct ports and none of them matched the publish address ("
+                    + publishInetAddress
+                    + "). "
+                    + "Please specify a unique port by setting "
+                    + SETTING_HTTP_PORT.getKey()
+                    + " or "
+                    + SETTING_HTTP_PUBLISH_PORT.getKey()
+            );
+        }
+
         TransportAddress publishAddress = new TransportAddress(new InetSocketAddress(publishInetAddress, publishPort));
         this.boundAddress = new BoundTransportAddress(boundAddresses.toArray(new TransportAddress[0]), publishAddress);
         logger.info("{}", boundAddress);
@@ -258,47 +276,6 @@ protected void doClose() {}
      */
     protected abstract void stopInternal();
 
-    // package private for tests
-    static int resolvePublishPort(Settings settings, List<TransportAddress> boundAddresses, InetAddress publishInetAddress) {
-        int publishPort = SETTING_HTTP_PUBLISH_PORT.get(settings);
-
-        if (publishPort < 0) {
-            for (TransportAddress boundAddress : boundAddresses) {
-                InetAddress boundInetAddress = boundAddress.address().getAddress();
-                if (boundInetAddress.isAnyLocalAddress() || boundInetAddress.equals(publishInetAddress)) {
-                    publishPort = boundAddress.getPort();
-                    break;
-                }
-            }
-        }
-
-        // if no matching boundAddress found, check if there is a unique port for all bound addresses
-        if (publishPort < 0) {
-            final Set<Integer> ports = new HashSet<>();
-            for (TransportAddress boundAddress : boundAddresses) {
-                ports.add(boundAddress.getPort());
-            }
-            if (ports.size() == 1) {
-                publishPort = ports.iterator().next();
-            }
-        }
-
-        if (publishPort < 0) {
-            throw new BindHttpException(
-                "Failed to auto-resolve http publish port, multiple bound addresses "
-                    + boundAddresses
-                    + " with distinct ports and none of them matched the publish address ("
-                    + publishInetAddress
-                    + "). "
-                    + "Please specify a unique port by setting "
-                    + SETTING_HTTP_PORT.getKey()
-                    + " or "
-                    + SETTING_HTTP_PUBLISH_PORT.getKey()
-            );
-        }
-        return publishPort;
-    }
-
     public void onException(HttpChannel channel, Exception e) {
         channel.handleException(e);
         if (lifecycle.started() == false) {
diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java
index c78ee6711dcda..704a23890b07a 100644
--- a/server/src/main/java/org/opensearch/node/Node.java
+++ b/server/src/main/java/org/opensearch/node/Node.java
@@ -1216,6 +1216,9 @@ protected Node(
                 SearchExecutionStatsCollector.makeWrapper(responseCollectorService)
             );
             final HttpServerTransport httpServerTransport = newHttpTransport(networkModule);
+
+            pluginComponents.addAll(newAuxTransports(networkModule));
+
             final IndexingPressureService indexingPressureService = new IndexingPressureService(settings, clusterService);
             // Going forward, IndexingPressureService will have required constructs for exposing listeners/interfaces for plugin
             // development. Then we can deprecate Getter and Setter for IndexingPressureService in ClusterService (#478).
@@ -2113,6 +2116,10 @@ protected HttpServerTransport newHttpTransport(NetworkModule networkModule) {
         return networkModule.getHttpServerTransportSupplier().get();
     }
 
+    protected List<NetworkPlugin.AuxTransport> newAuxTransports(NetworkModule networkModule) {
+        return networkModule.getAuxServerTransportList();
+    }
+
     private static class LocalNodeFactory implements Function<BoundTransportAddress, DiscoveryNode> {
         private final SetOnce<DiscoveryNode> localNode = new SetOnce<>();
         private final String persistentNodeId;
diff --git a/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java b/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java
index 138ef6f71280d..516aa94534f94 100644
--- a/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java
+++ b/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java
@@ -31,9 +31,13 @@
 
 package org.opensearch.plugins;
 
+import org.opensearch.common.annotation.ExperimentalApi;
+import org.opensearch.common.lifecycle.AbstractLifecycleComponent;
 import org.opensearch.common.network.NetworkService;
 import org.opensearch.common.settings.ClusterSettings;
+import org.opensearch.common.settings.Setting;
 import org.opensearch.common.settings.Settings;
+import org.opensearch.common.transport.PortsRange;
 import org.opensearch.common.util.BigArrays;
 import org.opensearch.common.util.PageCacheRecycler;
 import org.opensearch.common.util.concurrent.ThreadContext;
@@ -49,8 +53,12 @@
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
+import java.util.function.Function;
 import java.util.function.Supplier;
 
+import static java.util.Collections.emptyList;
+import static org.opensearch.common.settings.Setting.affixKeySetting;
+
 /**
  * Plugin for extending network and transport related classes
  *
@@ -58,6 +66,49 @@
  */
 public interface NetworkPlugin {
 
+    /**
+     * Auxiliary transports are lifecycle components with an associated port range.
+     * These pluggable client/server transport implementations have their lifecycle managed by Node.
+     *
+     * Auxiliary transports are additionally defined by a port range on which they bind. Opening permissions on these
+     * ports is awkward as {@link org.opensearch.bootstrap.Security} is configured previous to Node initialization during
+     * bootstrap. To allow pluggable AuxTransports access to configurable port ranges we require the port range be provided
+     * through an {@link org.opensearch.common.settings.Setting.AffixSetting} of the form 'AUX_SETTINGS_PREFIX.{aux-transport-key}.ports'.
+     */
+    abstract class AuxTransport extends AbstractLifecycleComponent {
+        public static final String AUX_SETTINGS_PREFIX = "aux.transport.";
+        public static final String AUX_TRANSPORT_TYPES_KEY = AUX_SETTINGS_PREFIX + "types";
+        public static final String AUX_PORT_DEFAULTS = "9400-9500";
+        public static final Setting.AffixSetting<PortsRange> AUX_TRANSPORT_PORTS = affixKeySetting(
+            AUX_SETTINGS_PREFIX,
+            "ports",
+            key -> new Setting<>(key, AUX_PORT_DEFAULTS, PortsRange::new, Setting.Property.NodeScope)
+        );
+
+        public static final Setting<List<String>> AUX_TRANSPORT_TYPES_SETTING = Setting.listSetting(
+            AUX_TRANSPORT_TYPES_KEY,
+            emptyList(),
+            Function.identity(),
+            Setting.Property.NodeScope
+        );
+    }
+
+    /**
+     * Auxiliary transports are optional and run in parallel to the default HttpServerTransport.
+     * Returns a map of AuxTransport suppliers.
+     */
+    @ExperimentalApi
+    default Map<String, Supplier<AuxTransport>> getAuxTransports(
+        Settings settings,
+        ThreadPool threadPool,
+        CircuitBreakerService circuitBreakerService,
+        NetworkService networkService,
+        ClusterSettings clusterSettings,
+        Tracer tracer
+    ) {
+        return Collections.emptyMap();
+    }
+
     /**
      * Returns a list of {@link TransportInterceptor} instances that are used to intercept incoming and outgoing
      * transport (inter-node) requests. This must not return <code>null</code>
diff --git a/server/src/main/java/org/opensearch/transport/TcpTransport.java b/server/src/main/java/org/opensearch/transport/TcpTransport.java
index f56cd146ce953..f80a29872a78d 100644
--- a/server/src/main/java/org/opensearch/transport/TcpTransport.java
+++ b/server/src/main/java/org/opensearch/transport/TcpTransport.java
@@ -521,38 +521,8 @@ private BoundTransportAddress createBoundTransportAddress(ProfileSettings profil
             throw new BindTransportException("Failed to resolve publish address", e);
         }
 
-        final int publishPort = resolvePublishPort(profileSettings, boundAddresses, publishInetAddress);
-        final TransportAddress publishAddress = new TransportAddress(new InetSocketAddress(publishInetAddress, publishPort));
-        return new BoundTransportAddress(transportBoundAddresses, publishAddress);
-    }
-
-    // package private for tests
-    static int resolvePublishPort(ProfileSettings profileSettings, List<InetSocketAddress> boundAddresses, InetAddress publishInetAddress) {
-        int publishPort = profileSettings.publishPort;
-
-        // if port not explicitly provided, search for port of address in boundAddresses that matches publishInetAddress
-        if (publishPort < 0) {
-            for (InetSocketAddress boundAddress : boundAddresses) {
-                InetAddress boundInetAddress = boundAddress.getAddress();
-                if (boundInetAddress.isAnyLocalAddress() || boundInetAddress.equals(publishInetAddress)) {
-                    publishPort = boundAddress.getPort();
-                    break;
-                }
-            }
-        }
-
-        // if no matching boundAddress found, check if there is a unique port for all bound addresses
-        if (publishPort < 0) {
-            final Set<Integer> ports = new HashSet<>();
-            for (InetSocketAddress boundAddress : boundAddresses) {
-                ports.add(boundAddress.getPort());
-            }
-            if (ports.size() == 1) {
-                publishPort = ports.iterator().next();
-            }
-        }
-
-        if (publishPort < 0) {
+        final int publishPort = Transport.resolvePublishPort(profileSettings.publishPort, boundAddresses, publishInetAddress);
+        if (publishPort == -1) {
             String profileExplanation = profileSettings.isDefaultProfile ? "" : " for profile " + profileSettings.profileName;
             throw new BindTransportException(
                 "Failed to auto-resolve publish port"
@@ -568,7 +538,9 @@ static int resolvePublishPort(ProfileSettings profileSettings, List<InetSocketAd
                     + TransportSettings.PUBLISH_PORT.getKey()
             );
         }
-        return publishPort;
+
+        final TransportAddress publishAddress = new TransportAddress(new InetSocketAddress(publishInetAddress, publishPort));
+        return new BoundTransportAddress(transportBoundAddresses, publishAddress);
     }
 
     @Override
diff --git a/server/src/main/java/org/opensearch/transport/Transport.java b/server/src/main/java/org/opensearch/transport/Transport.java
index b89393615c95f..bb0ec22154a81 100644
--- a/server/src/main/java/org/opensearch/transport/Transport.java
+++ b/server/src/main/java/org/opensearch/transport/Transport.java
@@ -47,13 +47,18 @@
 
 import java.io.Closeable;
 import java.io.IOException;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
 import java.net.UnknownHostException;
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.function.Predicate;
+import java.util.stream.Collectors;
 
 /**
  * OpenSearch Transport Interface
@@ -111,6 +116,51 @@ default boolean isSecure() {
 
     RequestHandlers getRequestHandlers();
 
+    /**
+     * Resolve the publishPort for a server provided a list of boundAddresses and a publishInetAddress.
+     * Resolution strategy is as follows:
+     * If a configured port exists resolve to that port.
+     * If a bound address matches the publishInetAddress resolve to that port.
+     * If a bound address is a wildcard address resolve to that port.
+     * If all bound addresses share the same port resolve to that port.
+     *
+     * @param publishPort -1 if no configured publish port exists
+     * @param boundAddresses addresses bound by the server
+     * @param publishInetAddress address published for the server
+     * @return Resolved port. If publishPort is negative and no port can be resolved return publishPort.
+     */
+    static int resolvePublishPort(int publishPort, List<InetSocketAddress> boundAddresses, InetAddress publishInetAddress) {
+        if (publishPort < 0) {
+            for (InetSocketAddress boundAddress : boundAddresses) {
+                InetAddress boundInetAddress = boundAddress.getAddress();
+                if (boundInetAddress.isAnyLocalAddress() || boundInetAddress.equals(publishInetAddress)) {
+                    publishPort = boundAddress.getPort();
+                    break;
+                }
+            }
+        }
+
+        if (publishPort < 0) {
+            final Set<Integer> ports = new HashSet<>();
+            for (InetSocketAddress boundAddress : boundAddresses) {
+                ports.add(boundAddress.getPort());
+            }
+            if (ports.size() == 1) {
+                publishPort = ports.iterator().next();
+            }
+        }
+
+        return publishPort;
+    }
+
+    static int resolveTransportPublishPort(int publishPort, List<TransportAddress> boundAddresses, InetAddress publishInetAddress) {
+        return Transport.resolvePublishPort(
+            publishPort,
+            boundAddresses.stream().map(TransportAddress::address).collect(Collectors.toList()),
+            publishInetAddress
+        );
+    }
+
     /**
      * A unidirectional connection to a {@link DiscoveryNode}
      *
diff --git a/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java b/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java
index c34f13041cb11..a4295289c3109 100644
--- a/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java
+++ b/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java
@@ -59,6 +59,7 @@
 import org.opensearch.test.rest.FakeRestRequest;
 import org.opensearch.threadpool.TestThreadPool;
 import org.opensearch.threadpool.ThreadPool;
+import org.opensearch.transport.Transport;
 import org.junit.After;
 import org.junit.Before;
 
@@ -70,8 +71,6 @@
 
 import static java.net.InetAddress.getByName;
 import static java.util.Arrays.asList;
-import static org.opensearch.http.AbstractHttpServerTransport.resolvePublishPort;
-import static org.hamcrest.Matchers.containsString;
 import static org.hamcrest.Matchers.equalTo;
 
 public class AbstractHttpServerTransportTests extends OpenSearchTestCase {
@@ -101,47 +100,40 @@ public void testHttpPublishPort() throws Exception {
         int boundPort = randomIntBetween(9000, 9100);
         int otherBoundPort = randomIntBetween(9200, 9300);
 
-        int publishPort = resolvePublishPort(
-            Settings.builder().put(HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT.getKey(), 9080).build(),
-            randomAddresses(),
-            getByName("127.0.0.2")
-        );
+        int publishPort = Transport.resolveTransportPublishPort(9080, randomAddresses(), getByName("127.0.0.2"));
         assertThat("Publish port should be explicitly set to 9080", publishPort, equalTo(9080));
 
-        publishPort = resolvePublishPort(
-            Settings.EMPTY,
+        publishPort = Transport.resolveTransportPublishPort(
+            -1,
             asList(address("127.0.0.1", boundPort), address("127.0.0.2", otherBoundPort)),
             getByName("127.0.0.1")
         );
         assertThat("Publish port should be derived from matched address", publishPort, equalTo(boundPort));
 
-        publishPort = resolvePublishPort(
-            Settings.EMPTY,
+        publishPort = Transport.resolveTransportPublishPort(
+            -1,
             asList(address("127.0.0.1", boundPort), address("127.0.0.2", boundPort)),
             getByName("127.0.0.3")
         );
         assertThat("Publish port should be derived from unique port of bound addresses", publishPort, equalTo(boundPort));
 
-        final BindHttpException e = expectThrows(
-            BindHttpException.class,
-            () -> resolvePublishPort(
-                Settings.EMPTY,
-                asList(address("127.0.0.1", boundPort), address("127.0.0.2", otherBoundPort)),
-                getByName("127.0.0.3")
-            )
+        publishPort = Transport.resolveTransportPublishPort(
+            -1,
+            asList(address("127.0.0.1", boundPort), address("127.0.0.2", otherBoundPort)),
+            getByName("127.0.0.3")
         );
-        assertThat(e.getMessage(), containsString("Failed to auto-resolve http publish port"));
+        assertThat(publishPort, equalTo(-1));
 
-        publishPort = resolvePublishPort(
-            Settings.EMPTY,
+        publishPort = Transport.resolveTransportPublishPort(
+            -1,
             asList(address("0.0.0.0", boundPort), address("127.0.0.2", otherBoundPort)),
             getByName("127.0.0.1")
         );
         assertThat("Publish port should be derived from matching wildcard address", publishPort, equalTo(boundPort));
 
         if (NetworkUtils.SUPPORTS_V6) {
-            publishPort = resolvePublishPort(
-                Settings.EMPTY,
+            publishPort = Transport.resolveTransportPublishPort(
+                -1,
                 asList(address("0.0.0.0", boundPort), address("127.0.0.2", otherBoundPort)),
                 getByName("::1")
             );
diff --git a/server/src/test/java/org/opensearch/transport/PublishPortTests.java b/server/src/test/java/org/opensearch/transport/PublishPortTests.java
index 6a41409f6f181..2e5a57c4cdd60 100644
--- a/server/src/test/java/org/opensearch/transport/PublishPortTests.java
+++ b/server/src/test/java/org/opensearch/transport/PublishPortTests.java
@@ -43,8 +43,6 @@
 
 import static java.net.InetAddress.getByName;
 import static java.util.Arrays.asList;
-import static org.opensearch.transport.TcpTransport.resolvePublishPort;
-import static org.hamcrest.Matchers.containsString;
 import static org.hamcrest.Matchers.equalTo;
 
 public class PublishPortTests extends OpenSearchTestCase {
@@ -73,48 +71,44 @@ public void testPublishPort() throws Exception {
 
         }
 
-        int publishPort = resolvePublishPort(
-            new TcpTransport.ProfileSettings(settings, profile),
+        int publishPort = Transport.resolvePublishPort(
+            new TcpTransport.ProfileSettings(settings, profile).publishPort,
             randomAddresses(),
             getByName("127.0.0.2")
         );
         assertThat("Publish port should be explicitly set", publishPort, equalTo(useProfile ? 9080 : 9081));
 
-        publishPort = resolvePublishPort(
-            new TcpTransport.ProfileSettings(baseSettings, profile),
+        publishPort = Transport.resolvePublishPort(
+            new TcpTransport.ProfileSettings(baseSettings, profile).publishPort,
             asList(address("127.0.0.1", boundPort), address("127.0.0.2", otherBoundPort)),
             getByName("127.0.0.1")
         );
         assertThat("Publish port should be derived from matched address", publishPort, equalTo(boundPort));
 
-        publishPort = resolvePublishPort(
-            new TcpTransport.ProfileSettings(baseSettings, profile),
+        publishPort = Transport.resolvePublishPort(
+            new TcpTransport.ProfileSettings(baseSettings, profile).publishPort,
             asList(address("127.0.0.1", boundPort), address("127.0.0.2", boundPort)),
             getByName("127.0.0.3")
         );
         assertThat("Publish port should be derived from unique port of bound addresses", publishPort, equalTo(boundPort));
 
-        try {
-            resolvePublishPort(
-                new TcpTransport.ProfileSettings(baseSettings, profile),
-                asList(address("127.0.0.1", boundPort), address("127.0.0.2", otherBoundPort)),
-                getByName("127.0.0.3")
-            );
-            fail("Expected BindTransportException as publish_port not specified and non-unique port of bound addresses");
-        } catch (BindTransportException e) {
-            assertThat(e.getMessage(), containsString("Failed to auto-resolve publish port"));
-        }
+        int resPort = Transport.resolvePublishPort(
+            new TcpTransport.ProfileSettings(baseSettings, profile).publishPort,
+            asList(address("127.0.0.1", boundPort), address("127.0.0.2", otherBoundPort)),
+            getByName("127.0.0.3")
+        );
+        assertThat("as publish_port not specified and non-unique port of bound addresses", resPort, equalTo(-1));
 
-        publishPort = resolvePublishPort(
-            new TcpTransport.ProfileSettings(baseSettings, profile),
+        publishPort = Transport.resolvePublishPort(
+            new TcpTransport.ProfileSettings(baseSettings, profile).publishPort,
             asList(address("0.0.0.0", boundPort), address("127.0.0.2", otherBoundPort)),
             getByName("127.0.0.1")
         );
         assertThat("Publish port should be derived from matching wildcard address", publishPort, equalTo(boundPort));
 
         if (NetworkUtils.SUPPORTS_V6) {
-            publishPort = resolvePublishPort(
-                new TcpTransport.ProfileSettings(baseSettings, profile),
+            publishPort = Transport.resolvePublishPort(
+                new TcpTransport.ProfileSettings(baseSettings, profile).publishPort,
                 asList(address("0.0.0.0", boundPort), address("127.0.0.2", otherBoundPort)),
                 getByName("::1")
             );

From ab546ae7989b522f96020c75334014495b5149ab Mon Sep 17 00:00:00 2001
From: gaobinlong <gbinlong@amazon.com>
Date: Thu, 2 Jan 2025 23:24:08 +0800
Subject: [PATCH 15/61] Update script supports java.lang.String.sha1() and
 java.lang.String.sha256() methods (#16923)

* Update script supports java.lang.String.sha1() and java.lang.String.sha256() methods

Signed-off-by: Gao Binlong <gbinlong@amazon.com>

* Modify change log

Signed-off-by: Gao Binlong <gbinlong@amazon.com>

---------

Signed-off-by: Gao Binlong <gbinlong@amazon.com>
---
 CHANGELOG.md                                  |  1 +
 .../painless/PainlessModulePlugin.java        |  6 +++
 .../painless/spi/org.opensearch.update.txt    | 14 +++++++
 .../rest-api-spec/test/painless/15_update.yml | 36 ++++++++++++++++++
 .../test/reindex/85_scripting.yml             | 38 +++++++++++++++++++
 .../test/update_by_query/80_scripting.yml     | 35 +++++++++++++++++
 6 files changed, 130 insertions(+)
 create mode 100644 modules/lang-painless/src/main/resources/org/opensearch/painless/spi/org.opensearch.update.txt

diff --git a/CHANGELOG.md b/CHANGELOG.md
index a7f99722dd584..45bc56b505eb3 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -21,6 +21,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 - Support prefix list for remote repository attributes([#16271](https://github.com/opensearch-project/OpenSearch/pull/16271))
 - Add new configuration setting `synonym_analyzer`, to the `synonym` and `synonym_graph` filters, enabling the specification of a custom analyzer for reading the synonym file ([#16488](https://github.com/opensearch-project/OpenSearch/pull/16488)).
 - Add stats for remote publication failure and move download failure stats to remote methods([#16682](https://github.com/opensearch-project/OpenSearch/pull/16682/))
+- Update script supports java.lang.String.sha1() and java.lang.String.sha256() methods ([#16923](https://github.com/opensearch-project/OpenSearch/pull/16923))
 - Added a precaution to handle extreme date values during sorting to prevent `arithmetic_exception: long overflow` ([#16812](https://github.com/opensearch-project/OpenSearch/pull/16812)).
 - Add search replica stats to segment replication stats API ([#16678](https://github.com/opensearch-project/OpenSearch/pull/16678))
 - Introduce a setting to disable download of full cluster state from remote on term mismatch([#16798](https://github.com/opensearch-project/OpenSearch/pull/16798/))
diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessModulePlugin.java b/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessModulePlugin.java
index 55dc23f665d2e..b3f6f7d0730fd 100644
--- a/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessModulePlugin.java
+++ b/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessModulePlugin.java
@@ -66,6 +66,7 @@
 import org.opensearch.script.ScriptContext;
 import org.opensearch.script.ScriptEngine;
 import org.opensearch.script.ScriptService;
+import org.opensearch.script.UpdateScript;
 import org.opensearch.search.aggregations.pipeline.MovingFunctionScript;
 import org.opensearch.threadpool.ThreadPool;
 import org.opensearch.watcher.ResourceWatcherService;
@@ -109,6 +110,11 @@ public final class PainlessModulePlugin extends Plugin implements ScriptPlugin,
         ingest.add(AllowlistLoader.loadFromResourceFiles(Allowlist.class, "org.opensearch.ingest.txt"));
         map.put(IngestScript.CONTEXT, ingest);
 
+        // Functions available to update scripts
+        List<Allowlist> update = new ArrayList<>(Allowlist.BASE_ALLOWLISTS);
+        update.add(AllowlistLoader.loadFromResourceFiles(Allowlist.class, "org.opensearch.update.txt"));
+        map.put(UpdateScript.CONTEXT, update);
+
         // Functions available to derived fields
         List<Allowlist> derived = new ArrayList<>(Allowlist.BASE_ALLOWLISTS);
         derived.add(AllowlistLoader.loadFromResourceFiles(Allowlist.class, "org.opensearch.derived.txt"));
diff --git a/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/org.opensearch.update.txt b/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/org.opensearch.update.txt
new file mode 100644
index 0000000000000..144614b3862b0
--- /dev/null
+++ b/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/org.opensearch.update.txt
@@ -0,0 +1,14 @@
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+# The OpenSearch Contributors require contributions made to
+# this file be licensed under the Apache-2.0 license or a
+# compatible open source license.
+#
+
+# This file contains an allowlist for the update scripts
+
+class java.lang.String {
+  String org.opensearch.painless.api.Augmentation sha1()
+  String org.opensearch.painless.api.Augmentation sha256()
+}
diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/15_update.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/15_update.yml
index cb118ed9d562f..e0f3068810ed8 100644
--- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/15_update.yml
+++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/15_update.yml
@@ -123,3 +123,39 @@
   - match: { error.root_cause.0.type: "illegal_argument_exception" }
   - match: { error.type: "illegal_argument_exception" }
   - match: { error.reason: "Iterable object is self-referencing itself" }
+
+# update script supports java.lang.String.sha1() and java.lang.String.sha256() methods
+# related issue: https://github.com/opensearch-project/OpenSearch/issues/16423
+---
+"Update script supports sha1() and sha256() method for strings":
+  - skip:
+      version: " - 2.18.99"
+      reason: "introduced in 2.19.0"
+  - do:
+      index:
+        index:  test_1
+        id:     1
+        body:
+          foo:    bar
+
+  - do:
+      update:
+        index:  test_1
+        id:     1
+        body:
+          script:
+            lang:   painless
+            source: "ctx._source.foo_sha1 = ctx._source.foo.sha1();ctx._source.foo_sha256 = ctx._source.foo.sha256();"
+
+  - match: { _index:   test_1 }
+  - match: { _id:      "1"    }
+  - match: { _version: 2      }
+
+  - do:
+      get:
+        index:  test_1
+        id:     1
+
+  - match: { _source.foo:        bar }
+  - match: { _source.foo_sha1:      "62cdb7020ff920e5aa642c3d4066950dd1f01f4d"   }
+  - match: { _source.foo_sha256:      "fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9"   }
diff --git a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/85_scripting.yml b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/85_scripting.yml
index 9c38b13bb1ff0..5c218aa00ca4f 100644
--- a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/85_scripting.yml
+++ b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/85_scripting.yml
@@ -440,3 +440,41 @@
             lang: painless
             source: syntax errors are fun!
   - match: {error.reason: 'compile error'}
+
+# script in reindex supports java.lang.String.sha1() and java.lang.String.sha256() methods
+# related issue: https://github.com/opensearch-project/OpenSearch/issues/16423
+---
+"Script supports sha1() and sha256() method for strings":
+  - skip:
+      version: " - 2.18.99"
+      reason: "introduced in 2.19.0"
+  - do:
+      index:
+        index:  twitter
+        id:     1
+        body:   { "user": "foobar" }
+  - do:
+      indices.refresh: {}
+
+  - do:
+      reindex:
+        refresh: true
+        body:
+          source:
+            index: twitter
+          dest:
+            index: new_twitter
+          script:
+            lang: painless
+            source: ctx._source.user_sha1 = ctx._source.user.sha1();ctx._source.user_sha256 = ctx._source.user.sha256()
+  - match: {created: 1}
+  - match: {noops: 0}
+
+  - do:
+      get:
+        index:  new_twitter
+        id:     1
+
+  - match: { _source.user:        foobar }
+  - match: { _source.user_sha1:      "8843d7f92416211de9ebb963ff4ce28125932878"   }
+  - match: { _source.user_sha256:      "c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2"   }
diff --git a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/80_scripting.yml b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/80_scripting.yml
index a8de49d812677..b52b1428e08bb 100644
--- a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/80_scripting.yml
+++ b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/80_scripting.yml
@@ -432,3 +432,38 @@
             lang: painless
             source: syntax errors are fun!
   - match: {error.reason: 'compile error'}
+
+# script in update_by_query supports java.lang.String.sha1() and java.lang.String.sha256() methods
+# related issue: https://github.com/opensearch-project/OpenSearch/issues/16423
+---
+"Script supports sha1() and sha256() method for strings":
+  - skip:
+      version: " - 2.18.99"
+      reason: "introduced in 2.19.0"
+  - do:
+      index:
+        index:  twitter
+        id:     1
+        body:   { "user": "foobar" }
+  - do:
+      indices.refresh: {}
+
+  - do:
+      update_by_query:
+        index:   twitter
+        refresh: true
+        body:
+          script:
+            lang: painless
+            source: ctx._source.user_sha1 = ctx._source.user.sha1();ctx._source.user_sha256 = ctx._source.user.sha256()
+  - match: {updated: 1}
+  - match: {noops: 0}
+
+  - do:
+      get:
+        index:  twitter
+        id:     1
+
+  - match: { _source.user:        foobar }
+  - match: { _source.user_sha1:      "8843d7f92416211de9ebb963ff4ce28125932878"   }
+  - match: { _source.user_sha256:      "c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2"   }

From 542b5510cdba4ec23ed2d8c96f089f53007edb05 Mon Sep 17 00:00:00 2001
From: Prudhvi Godithi <pgodithi@amazon.com>
Date: Thu, 2 Jan 2025 12:11:46 -0800
Subject: [PATCH 16/61] Workflow benchmark-pull-request.yml fix (#16925)

Signed-off-by: Prudhvi Godithi <pgodithi@amazon.com>
---
 .github/workflows/benchmark-pull-request.yml | 13 ++++++++-----
 1 file changed, 8 insertions(+), 5 deletions(-)

diff --git a/.github/workflows/benchmark-pull-request.yml b/.github/workflows/benchmark-pull-request.yml
index c494df6e27ce3..e6ccc31160bf9 100644
--- a/.github/workflows/benchmark-pull-request.yml
+++ b/.github/workflows/benchmark-pull-request.yml
@@ -4,7 +4,10 @@ on:
     types: [created]
 jobs:
   run-performance-benchmark-on-pull-request:
-    if: ${{ (github.event.issue.pull_request) && (contains(github.event.comment.body, '"run-benchmark-test"')) }}
+    if: |
+      github.repository == 'opensearch-project/OpenSearch' &&
+      github.event.issue.pull_request &&
+      contains(github.event.comment.body, '"run-benchmark-test"')
     runs-on: ubuntu-latest
     permissions:
       id-token: write
@@ -111,7 +114,7 @@ jobs:
         uses: actions/github-script@v7
         with:
           github-token: ${{ secrets.GITHUB_TOKEN }}
-          result-encoding: string
+          result-encoding: json
           script: |
             // Get the collaborators - filtered to maintainer permissions
             const maintainersResponse = await github.request('GET /repos/{owner}/{repo}/collaborators', {
@@ -121,12 +124,12 @@ jobs:
               affiliation: 'all',
               per_page: 100
               });
-            return maintainersResponse.data.map(item => item.login).join(', ');
+            return maintainersResponse.data.map(item => item.login);
       - uses: trstringer/manual-approval@v1
-        if: (!contains(steps.get_approvers.outputs.result, github.event.comment.user.login))
+        if: ${{ !contains(fromJSON(steps.get_approvers.outputs.result), github.event.comment.user.login) }}
         with:
           secret: ${{ github.TOKEN }}
-          approvers: ${{ steps.get_approvers.outputs.result }}
+          approvers: ${{ join(fromJSON(steps.get_approvers.outputs.result), ', ') }}
           minimum-approvals: 1
           issue-title: 'Request to approve/deny benchmark run for PR #${{ env.PR_NUMBER }}'
           issue-body: "Please approve or deny the benchmark run for PR #${{ env.PR_NUMBER }}"

From 1f94b34197347ef2027170aae455c62386b2a342 Mon Sep 17 00:00:00 2001
From: Rishabh Singh <rishabhksingh@gmail.com>
Date: Fri, 3 Jan 2025 13:02:37 -0800
Subject: [PATCH 17/61] Add benchmark confirm for lucene-10 big5 index snapshot
 (#16940)

Signed-off-by: Rishabh Singh <sngri@amazon.com>
---
 .github/benchmark-configs.json | 17 +++++++++++++++++
 1 file changed, 17 insertions(+)

diff --git a/.github/benchmark-configs.json b/.github/benchmark-configs.json
index 732f2f9b96ae3..b3590f8a2f942 100644
--- a/.github/benchmark-configs.json
+++ b/.github/benchmark-configs.json
@@ -239,5 +239,22 @@
       "data_instance_config": "4vCPU, 32G Mem, 16G Heap"
     },
     "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline"
+  },
+  "id_15": {
+    "description": "Search only test-procedure for big5, uses lucene-10 index snapshot to restore the data for OS-3.0.0",
+    "supported_major_versions": ["3"],
+    "cluster-benchmark-configs": {
+      "SINGLE_NODE_CLUSTER": "true",
+      "MIN_DISTRIBUTION": "true",
+      "TEST_WORKLOAD": "big5",
+      "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-3x\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-3x\",\"snapshot_name\":\"big5_1_shard_single_client\"}",
+      "CAPTURE_NODE_STAT": "true",
+      "TEST_PROCEDURE": "restore-from-snapshot"
+    },
+    "cluster_configuration": {
+      "size": "Single-Node",
+      "data_instance_config": "4vCPU, 32G Mem, 16G Heap"
+    },
+    "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline"
   }
 }

From 703eddab489607ee25a7db8428c076d89826b7c6 Mon Sep 17 00:00:00 2001
From: Andriy Redko <andriy.redko@aiven.io>
Date: Fri, 3 Jan 2025 18:07:04 -0500
Subject: [PATCH 18/61] Remove duplicate DCO check (#16942)

Signed-off-by: Andriy Redko <drreta@gmail.com>
---
 .github/workflows/dco.yml | 19 -------------------
 1 file changed, 19 deletions(-)
 delete mode 100644 .github/workflows/dco.yml

diff --git a/.github/workflows/dco.yml b/.github/workflows/dco.yml
deleted file mode 100644
index ef842bb405d60..0000000000000
--- a/.github/workflows/dco.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-name: Developer Certificate of Origin Check
-
-on: [pull_request]
-
-jobs:
-  dco-check:
-    runs-on: ubuntu-latest
-
-    steps:
-    - name: Get PR Commits
-      id: 'get-pr-commits'
-      uses: tim-actions/get-pr-commits@v1.3.1
-      with:
-        token: ${{ secrets.GITHUB_TOKEN }}
-    - name: DCO Check
-      uses: tim-actions/dco@v1.1.0
-      with:
-        commits: ${{ steps.get-pr-commits.outputs.commits }}
-

From 845fbfa10407f3264e6aab8812eff4ef0ad8be24 Mon Sep 17 00:00:00 2001
From: Craig Perkins <cwperx@amazon.com>
Date: Fri, 3 Jan 2025 18:11:49 -0500
Subject: [PATCH 19/61] Allow extended plugins to be optional (#16909)

* Make extended plugins optional

Signed-off-by: Craig Perkins <cwperx@amazon.com>

* Make extended plugins optional

Signed-off-by: Craig Perkins <cwperx@amazon.com>

* Load extensions for classpath plugins

Signed-off-by: Craig Perkins <cwperx@amazon.com>

* Ensure only single instance for each classpath extension

Signed-off-by: Craig Perkins <cwperx@amazon.com>

* Add test for classpath plugin extended plugin loading

Signed-off-by: Craig Perkins <cwperx@amazon.com>

* Modify test to allow optional extended plugin

Signed-off-by: Craig Perkins <cwperx@amazon.com>

* Only optional extended plugins

Signed-off-by: Craig Perkins <cwperx@amazon.com>

* Add additional warning message

Signed-off-by: Craig Perkins <cwperx@amazon.com>

* Add to CHANGELOG

Signed-off-by: Craig Perkins <cwperx@amazon.com>

* Add tag to make extended plugin optional

Signed-off-by: Craig Perkins <cwperx@amazon.com>

* Only send plugin names when serializing PluginInfo

Signed-off-by: Craig Perkins <cwperx@amazon.com>

* Keep track of optional extended plugins in separate set

Signed-off-by: Craig Perkins <cwperx@amazon.com>

* Include in ser/de of PluginInfo

Signed-off-by: Craig Perkins <cwperx@amazon.com>

* Change to 3_0_0

Signed-off-by: Craig Perkins <cwperx@amazon.com>

---------

Signed-off-by: Craig Perkins <cwperx@amazon.com>
---
 CHANGELOG.md                                  |  1 +
 .../org/opensearch/plugins/PluginInfo.java    | 33 +++++++++++++++++--
 .../opensearch/plugins/PluginsService.java    | 15 ++++++++-
 .../opensearch/plugins/PluginInfoTests.java   | 27 +++++++++++++++
 .../plugins/PluginsServiceTests.java          | 29 +++++++++++++++-
 5 files changed, 101 insertions(+), 4 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 45bc56b505eb3..5f813fecf66cf 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -59,6 +59,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 ### Changed
 - Indexed IP field supports `terms_query` with more than 1025 IP masks [#16391](https://github.com/opensearch-project/OpenSearch/pull/16391)
 - Make entries for dependencies from server/build.gradle to gradle version catalog ([#16707](https://github.com/opensearch-project/OpenSearch/pull/16707))
+- Allow extended plugins to be optional ([#16909](https://github.com/opensearch-project/OpenSearch/pull/16909))
 
 ### Deprecated
 - Performing update operation with default pipeline or final pipeline is deprecated ([#16712](https://github.com/opensearch-project/OpenSearch/pull/16712))
diff --git a/server/src/main/java/org/opensearch/plugins/PluginInfo.java b/server/src/main/java/org/opensearch/plugins/PluginInfo.java
index b6030f4ded5e5..7173a653ebc9a 100644
--- a/server/src/main/java/org/opensearch/plugins/PluginInfo.java
+++ b/server/src/main/java/org/opensearch/plugins/PluginInfo.java
@@ -86,6 +86,8 @@ public class PluginInfo implements Writeable, ToXContentObject {
     private final String classname;
     private final String customFolderName;
     private final List<String> extendedPlugins;
+    // Optional extended plugins are a subset of extendedPlugins that only contains the optional extended plugins
+    private final List<String> optionalExtendedPlugins;
     private final boolean hasNativeController;
 
     /**
@@ -149,7 +151,11 @@ public PluginInfo(
         this.javaVersion = javaVersion;
         this.classname = classname;
         this.customFolderName = customFolderName;
-        this.extendedPlugins = Collections.unmodifiableList(extendedPlugins);
+        this.extendedPlugins = extendedPlugins.stream().map(s -> s.split(";")[0]).collect(Collectors.toUnmodifiableList());
+        this.optionalExtendedPlugins = extendedPlugins.stream()
+            .filter(PluginInfo::isOptionalExtension)
+            .map(s -> s.split(";")[0])
+            .collect(Collectors.toUnmodifiableList());
         this.hasNativeController = hasNativeController;
     }
 
@@ -209,6 +215,16 @@ public PluginInfo(final StreamInput in) throws IOException {
         this.customFolderName = in.readString();
         this.extendedPlugins = in.readStringList();
         this.hasNativeController = in.readBoolean();
+        if (in.getVersion().onOrAfter(Version.V_3_0_0)) {
+            this.optionalExtendedPlugins = in.readStringList();
+        } else {
+            this.optionalExtendedPlugins = new ArrayList<>();
+        }
+    }
+
+    static boolean isOptionalExtension(String extendedPlugin) {
+        String[] dependency = extendedPlugin.split(";");
+        return dependency.length > 1 && "optional=true".equals(dependency[1]);
     }
 
     @Override
@@ -234,6 +250,9 @@ This works for currently supported range notations (=,~)
         }
         out.writeStringCollection(extendedPlugins);
         out.writeBoolean(hasNativeController);
+        if (out.getVersion().onOrAfter(Version.V_3_0_0)) {
+            out.writeStringCollection(optionalExtendedPlugins);
+        }
     }
 
     /**
@@ -417,8 +436,17 @@ public String getFolderName() {
      *
      * @return the names of the plugins extended
      */
+    public boolean isExtendedPluginOptional(String extendedPlugin) {
+        return optionalExtendedPlugins.contains(extendedPlugin);
+    }
+
+    /**
+     * Other plugins this plugin extends through SPI
+     *
+     * @return the names of the plugins extended
+     */
     public List<String> getExtendedPlugins() {
-        return extendedPlugins;
+        return extendedPlugins.stream().map(s -> s.split(";")[0]).collect(Collectors.toUnmodifiableList());
     }
 
     /**
@@ -493,6 +521,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
             builder.field("custom_foldername", customFolderName);
             builder.field("extended_plugins", extendedPlugins);
             builder.field("has_native_controller", hasNativeController);
+            builder.field("optional_extended_plugins", optionalExtendedPlugins);
         }
         builder.endObject();
 
diff --git a/server/src/main/java/org/opensearch/plugins/PluginsService.java b/server/src/main/java/org/opensearch/plugins/PluginsService.java
index f08c9c738f1b4..9bc1f1334122e 100644
--- a/server/src/main/java/org/opensearch/plugins/PluginsService.java
+++ b/server/src/main/java/org/opensearch/plugins/PluginsService.java
@@ -524,7 +524,13 @@ private static void addSortedBundle(
         for (String dependency : bundle.plugin.getExtendedPlugins()) {
             Bundle depBundle = bundles.get(dependency);
             if (depBundle == null) {
-                throw new IllegalArgumentException("Missing plugin [" + dependency + "], dependency of [" + name + "]");
+                if (bundle.plugin.isExtendedPluginOptional(dependency)) {
+                    logger.warn("Missing plugin [" + dependency + "], dependency of [" + name + "]");
+                    logger.warn("Some features of this plugin may not function without the dependencies being installed.\n");
+                    continue;
+                } else {
+                    throw new IllegalArgumentException("Missing plugin [" + dependency + "], dependency of [" + name + "]");
+                }
             }
             addSortedBundle(depBundle, bundles, sortedBundles, dependencyStack);
             assert sortedBundles.contains(depBundle);
@@ -653,6 +659,9 @@ static void checkBundleJarHell(Set<URL> classpath, Bundle bundle, Map<String, Se
             Set<URL> urls = new HashSet<>();
             for (String extendedPlugin : exts) {
                 Set<URL> pluginUrls = transitiveUrls.get(extendedPlugin);
+                if (pluginUrls == null && bundle.plugin.isExtendedPluginOptional(extendedPlugin)) {
+                    continue;
+                }
                 assert pluginUrls != null : "transitive urls should have already been set for " + extendedPlugin;
 
                 Set<URL> intersection = new HashSet<>(urls);
@@ -704,6 +713,10 @@ private Plugin loadBundle(Bundle bundle, Map<String, Plugin> loaded) {
         List<ClassLoader> extendedLoaders = new ArrayList<>();
         for (String extendedPluginName : bundle.plugin.getExtendedPlugins()) {
             Plugin extendedPlugin = loaded.get(extendedPluginName);
+            if (extendedPlugin == null && bundle.plugin.isExtendedPluginOptional(extendedPluginName)) {
+                // extended plugin is optional and is not installed
+                continue;
+            }
             assert extendedPlugin != null;
             if (ExtensiblePlugin.class.isInstance(extendedPlugin) == false) {
                 throw new IllegalStateException("Plugin [" + name + "] cannot extend non-extensible plugin [" + extendedPluginName + "]");
diff --git a/server/src/test/java/org/opensearch/plugins/PluginInfoTests.java b/server/src/test/java/org/opensearch/plugins/PluginInfoTests.java
index 12c7dc870c104..76294d85c64d4 100644
--- a/server/src/test/java/org/opensearch/plugins/PluginInfoTests.java
+++ b/server/src/test/java/org/opensearch/plugins/PluginInfoTests.java
@@ -44,6 +44,7 @@
 import org.opensearch.semver.SemverRange;
 import org.opensearch.test.OpenSearchTestCase;
 
+import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.nio.file.Path;
 import java.util.ArrayList;
@@ -55,6 +56,7 @@
 import static org.hamcrest.Matchers.containsString;
 import static org.hamcrest.Matchers.empty;
 import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
 
 public class PluginInfoTests extends OpenSearchTestCase {
 
@@ -281,6 +283,30 @@ public void testReadFromPropertiesJvmMissingClassname() throws Exception {
         assertThat(e.getMessage(), containsString("property [classname] is missing"));
     }
 
+    public void testExtendedPluginsSingleOptionalExtension() throws IOException {
+        Path pluginDir = createTempDir().resolve("fake-plugin");
+        PluginTestUtil.writePluginProperties(
+            pluginDir,
+            "description",
+            "fake desc",
+            "name",
+            "my_plugin",
+            "version",
+            "1.0",
+            "opensearch.version",
+            Version.CURRENT.toString(),
+            "java.version",
+            System.getProperty("java.specification.version"),
+            "classname",
+            "FakePlugin",
+            "extended.plugins",
+            "foo;optional=true"
+        );
+        PluginInfo info = PluginInfo.readFromProperties(pluginDir);
+        assertThat(info.getExtendedPlugins(), contains("foo"));
+        assertThat(info.isExtendedPluginOptional("foo"), is(true));
+    }
+
     public void testExtendedPluginsSingleExtension() throws Exception {
         Path pluginDir = createTempDir().resolve("fake-plugin");
         PluginTestUtil.writePluginProperties(
@@ -302,6 +328,7 @@ public void testExtendedPluginsSingleExtension() throws Exception {
         );
         PluginInfo info = PluginInfo.readFromProperties(pluginDir);
         assertThat(info.getExtendedPlugins(), contains("foo"));
+        assertThat(info.isExtendedPluginOptional("foo"), is(false));
     }
 
     public void testExtendedPluginsMultipleExtensions() throws Exception {
diff --git a/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java b/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java
index bd9ee33856f14..f5702fa1a7ade 100644
--- a/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java
+++ b/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java
@@ -361,7 +361,7 @@ public void testSortBundlesNoDeps() throws Exception {
         assertThat(sortedBundles, Matchers.contains(bundle1, bundle2, bundle3));
     }
 
-    public void testSortBundlesMissingDep() throws Exception {
+    public void testSortBundlesMissingRequiredDep() throws Exception {
         Path pluginDir = createTempDir();
         PluginInfo info = new PluginInfo("foo", "desc", "1.0", Version.CURRENT, "1.8", "MyPlugin", Collections.singletonList("dne"), false);
         PluginsService.Bundle bundle = new PluginsService.Bundle(info, pluginDir);
@@ -372,6 +372,33 @@ public void testSortBundlesMissingDep() throws Exception {
         assertEquals("Missing plugin [dne], dependency of [foo]", e.getMessage());
     }
 
+    public void testSortBundlesMissingOptionalDep() throws Exception {
+        try (MockLogAppender mockLogAppender = MockLogAppender.createForLoggers(LogManager.getLogger(PluginsService.class))) {
+            mockLogAppender.addExpectation(
+                new MockLogAppender.SeenEventExpectation(
+                    "[.test] warning",
+                    "org.opensearch.plugins.PluginsService",
+                    Level.WARN,
+                    "Missing plugin [dne], dependency of [foo]"
+                )
+            );
+            Path pluginDir = createTempDir();
+            PluginInfo info = new PluginInfo(
+                "foo",
+                "desc",
+                "1.0",
+                Version.CURRENT,
+                "1.8",
+                "MyPlugin",
+                Collections.singletonList("dne;optional=true"),
+                false
+            );
+            PluginsService.Bundle bundle = new PluginsService.Bundle(info, pluginDir);
+            PluginsService.sortBundles(Collections.singleton(bundle));
+            mockLogAppender.assertAllExpectationsMatched();
+        }
+    }
+
     public void testSortBundlesCommonDep() throws Exception {
         Path pluginDir = createTempDir();
         Set<PluginsService.Bundle> bundles = new LinkedHashSet<>(); // control iteration order

From c0f7806753c74776465bb483f0201bc5897c15a2 Mon Sep 17 00:00:00 2001
From: Craig Perkins <cwperx@amazon.com>
Date: Fri, 3 Jan 2025 21:48:15 -0500
Subject: [PATCH 20/61] Change version in PluginInfo to V_2_19_0 after backport
 to 2.x merged (#16947)

Signed-off-by: Craig Perkins <cwperx@amazon.com>
---
 server/src/main/java/org/opensearch/plugins/PluginInfo.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/server/src/main/java/org/opensearch/plugins/PluginInfo.java b/server/src/main/java/org/opensearch/plugins/PluginInfo.java
index 7173a653ebc9a..4ff699e8017ba 100644
--- a/server/src/main/java/org/opensearch/plugins/PluginInfo.java
+++ b/server/src/main/java/org/opensearch/plugins/PluginInfo.java
@@ -215,7 +215,7 @@ public PluginInfo(final StreamInput in) throws IOException {
         this.customFolderName = in.readString();
         this.extendedPlugins = in.readStringList();
         this.hasNativeController = in.readBoolean();
-        if (in.getVersion().onOrAfter(Version.V_3_0_0)) {
+        if (in.getVersion().onOrAfter(Version.V_2_19_0)) {
             this.optionalExtendedPlugins = in.readStringList();
         } else {
             this.optionalExtendedPlugins = new ArrayList<>();
@@ -250,7 +250,7 @@ This works for currently supported range notations (=,~)
         }
         out.writeStringCollection(extendedPlugins);
         out.writeBoolean(hasNativeController);
-        if (out.getVersion().onOrAfter(Version.V_3_0_0)) {
+        if (out.getVersion().onOrAfter(Version.V_2_19_0)) {
             out.writeStringCollection(optionalExtendedPlugins);
         }
     }

From d7641ca8788441e384fbde6c58b6f2530ec8772d Mon Sep 17 00:00:00 2001
From: Bharathwaj G <bharath78910@gmail.com>
Date: Mon, 6 Jan 2025 14:06:16 +0530
Subject: [PATCH 21/61] Support object fields in star-tree index (#16728)

---------

Signed-off-by: bharath-techie <bharath78910@gmail.com>
---
 CHANGELOG.md                                  |   2 +
 .../index/mapper/StarTreeMapperIT.java        | 371 +++++++++++++++++-
 .../index/mapper/DocumentParser.java          |  14 +-
 .../index/mapper/MapperService.java           |  25 ++
 .../index/mapper/StarTreeMapper.java          |  43 +-
 .../index/mapper/StarTreeMapperTests.java     |  79 +++-
 6 files changed, 528 insertions(+), 6 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 5f813fecf66cf..0efb53beb6e31 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -27,6 +27,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 - Introduce a setting to disable download of full cluster state from remote on term mismatch([#16798](https://github.com/opensearch-project/OpenSearch/pull/16798/))
 - Added ability to retrieve value from DocValues in a flat_object filed([#16802](https://github.com/opensearch-project/OpenSearch/pull/16802))
 - Introduce framework for auxiliary transports and an experimental gRPC transport plugin ([#16534](https://github.com/opensearch-project/OpenSearch/pull/16534))
+- Changes to support IP field in star tree indexing([#16641](https://github.com/opensearch-project/OpenSearch/pull/16641/))
+- Support object fields in star-tree index([#16728](https://github.com/opensearch-project/OpenSearch/pull/16728/))
 
 ### Dependencies
 - Bump `com.google.cloud:google-cloud-core-http` from 2.23.0 to 2.47.0 ([#16504](https://github.com/opensearch-project/OpenSearch/pull/16504))
diff --git a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java
index 3f9053576329c..1d01f717aad1f 100644
--- a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java
@@ -26,6 +26,8 @@
 import org.opensearch.index.compositeindex.datacube.DataCubeDateTimeUnit;
 import org.opensearch.index.compositeindex.datacube.DateDimension;
 import org.opensearch.index.compositeindex.datacube.MetricStat;
+import org.opensearch.index.compositeindex.datacube.NumericDimension;
+import org.opensearch.index.compositeindex.datacube.OrdinalDimension;
 import org.opensearch.index.compositeindex.datacube.startree.StarTreeFieldConfiguration;
 import org.opensearch.index.compositeindex.datacube.startree.StarTreeIndexSettings;
 import org.opensearch.index.compositeindex.datacube.startree.utils.date.DateTimeUnitAdapter;
@@ -41,6 +43,7 @@
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 
 import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder;
@@ -121,6 +124,187 @@ private static XContentBuilder createMinimalTestMapping(boolean invalidDim, bool
         }
     }
 
+    private static XContentBuilder createNestedTestMapping() {
+        try {
+            return jsonBuilder().startObject()
+                .startObject("composite")
+                .startObject("startree-1")
+                .field("type", "star_tree")
+                .startObject("config")
+                .startObject("date_dimension")
+                .field("name", "timestamp")
+                .endObject()
+                .startArray("ordered_dimensions")
+                .startObject()
+                .field("name", "nested.nested1.status")
+                .endObject()
+                .startObject()
+                .field("name", "nested.nested1.keyword_dv")
+                .endObject()
+                .endArray()
+                .startArray("metrics")
+                .startObject()
+                .field("name", "nested3.numeric_dv")
+                .endObject()
+                .endArray()
+                .endObject()
+                .endObject()
+                .endObject()
+                .startObject("properties")
+                .startObject("timestamp")
+                .field("type", "date")
+                .endObject()
+                .startObject("nested3")
+                .startObject("properties")
+                .startObject("numeric_dv")
+                .field("type", "integer")
+                .field("doc_values", true)
+                .endObject()
+                .endObject()
+                .endObject()
+                .startObject("numeric")
+                .field("type", "integer")
+                .field("doc_values", false)
+                .endObject()
+                .startObject("nested")
+                .startObject("properties")
+                .startObject("nested1")
+                .startObject("properties")
+                .startObject("status")
+                .field("type", "integer")
+                .field("doc_values", true)
+                .endObject()
+                .startObject("keyword_dv")
+                .field("type", "keyword")
+                .field("doc_values", true)
+                .endObject()
+                .endObject()
+                .endObject()
+                .endObject()
+                .endObject()
+                .startObject("nested-not-startree")
+                .startObject("properties")
+                .startObject("nested1")
+                .startObject("properties")
+                .startObject("status")
+                .field("type", "integer")
+                .field("doc_values", true)
+                .endObject()
+                .startObject("keyword_dv")
+                .field("type", "keyword")
+                .field("doc_values", true)
+                .endObject()
+                .endObject()
+                .endObject()
+                .endObject()
+                .endObject()
+                .startObject("keyword")
+                .field("type", "keyword")
+                .field("doc_values", false)
+                .endObject()
+                .startObject("ip")
+                .field("type", "ip")
+                .field("doc_values", false)
+                .endObject()
+                .endObject()
+                .endObject();
+        } catch (IOException e) {
+            throw new IllegalStateException(e);
+        }
+    }
+
+    private static XContentBuilder createNestedTestMappingForArray() {
+        try {
+            return jsonBuilder().startObject()
+                .startObject("composite")
+                .startObject("startree-1")
+                .field("type", "star_tree")
+                .startObject("config")
+                .startObject("date_dimension")
+                .field("name", "timestamp")
+                .endObject()
+                .startArray("ordered_dimensions")
+                .startObject()
+                .field("name", "status")
+                .endObject()
+                .startObject()
+                .field("name", "nested.nested1.keyword_dv")
+                .endObject()
+                .endArray()
+                .startArray("metrics")
+                .startObject()
+                .field("name", "nested3.numeric_dv")
+                .endObject()
+                .endArray()
+                .endObject()
+                .endObject()
+                .endObject()
+                .startObject("properties")
+                .startObject("timestamp")
+                .field("type", "date")
+                .endObject()
+                .startObject("status")
+                .field("type", "integer")
+                .endObject()
+                .startObject("nested3")
+                .startObject("properties")
+                .startObject("numeric_dv")
+                .field("type", "integer")
+                .field("doc_values", true)
+                .endObject()
+                .endObject()
+                .endObject()
+                .startObject("numeric")
+                .field("type", "integer")
+                .field("doc_values", false)
+                .endObject()
+                .startObject("nested")
+                .startObject("properties")
+                .startObject("nested1")
+                .startObject("properties")
+                .startObject("status")
+                .field("type", "integer")
+                .field("doc_values", true)
+                .endObject()
+                .startObject("keyword_dv")
+                .field("type", "keyword")
+                .field("doc_values", true)
+                .endObject()
+                .endObject()
+                .endObject()
+                .endObject()
+                .endObject()
+                .startObject("nested-not-startree")
+                .startObject("properties")
+                .startObject("nested1")
+                .startObject("properties")
+                .startObject("status")
+                .field("type", "integer")
+                .field("doc_values", true)
+                .endObject()
+                .startObject("keyword_dv")
+                .field("type", "keyword")
+                .field("doc_values", true)
+                .endObject()
+                .endObject()
+                .endObject()
+                .endObject()
+                .endObject()
+                .startObject("keyword")
+                .field("type", "keyword")
+                .field("doc_values", false)
+                .endObject()
+                .startObject("ip")
+                .field("type", "ip")
+                .field("doc_values", false)
+                .endObject()
+                .endObject()
+                .endObject();
+        } catch (IOException e) {
+            throw new IllegalStateException(e);
+        }
+    }
+
     private static XContentBuilder createDateTestMapping(boolean duplicate) {
         try {
             return jsonBuilder().startObject()
@@ -475,6 +659,46 @@ public void testValidCompositeIndexWithDates() {
         }
     }
 
+    public void testValidCompositeIndexWithNestedFields() {
+        prepareCreate(TEST_INDEX).setMapping(createNestedTestMapping()).setSettings(settings).get();
+        Iterable<IndicesService> dataNodeInstances = internalCluster().getDataNodeInstances(IndicesService.class);
+        for (IndicesService service : dataNodeInstances) {
+            final Index index = resolveIndex("test");
+            if (service.hasIndex(index)) {
+                IndexService indexService = service.indexService(index);
+                Set<CompositeMappedFieldType> fts = indexService.mapperService().getCompositeFieldTypes();
+
+                for (CompositeMappedFieldType ft : fts) {
+                    assertTrue(ft instanceof StarTreeMapper.StarTreeFieldType);
+                    StarTreeMapper.StarTreeFieldType starTreeFieldType = (StarTreeMapper.StarTreeFieldType) ft;
+                    assertEquals("timestamp", starTreeFieldType.getDimensions().get(0).getField());
+                    assertTrue(starTreeFieldType.getDimensions().get(0) instanceof DateDimension);
+                    DateDimension dateDim = (DateDimension) starTreeFieldType.getDimensions().get(0);
+                    List<DateTimeUnitRounding> expectedTimeUnits = Arrays.asList(
+                        new DateTimeUnitAdapter(Rounding.DateTimeUnit.MINUTES_OF_HOUR),
+                        DataCubeDateTimeUnit.HALF_HOUR_OF_DAY
+                    );
+                    for (int i = 0; i < dateDim.getIntervals().size(); i++) {
+                        assertEquals(expectedTimeUnits.get(i).shortName(), dateDim.getSortedCalendarIntervals().get(i).shortName());
+                    }
+                    assertEquals("nested.nested1.status", starTreeFieldType.getDimensions().get(1).getField());
+                    assertTrue(starTreeFieldType.getDimensions().get(1) instanceof NumericDimension);
+                    assertEquals("nested.nested1.keyword_dv", starTreeFieldType.getDimensions().get(2).getField());
+                    assertTrue(starTreeFieldType.getDimensions().get(2) instanceof OrdinalDimension);
+                    assertEquals("nested3.numeric_dv", starTreeFieldType.getMetrics().get(0).getField());
+                    List<MetricStat> expectedMetrics = Arrays.asList(MetricStat.VALUE_COUNT, MetricStat.SUM, MetricStat.AVG);
+                    assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics());
+                    assertEquals(10000, starTreeFieldType.getStarTreeConfig().maxLeafDocs());
+                    assertEquals(
+                        StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP,
+                        starTreeFieldType.getStarTreeConfig().getBuildMode()
+                    );
+                    assertEquals(Collections.emptySet(), starTreeFieldType.getStarTreeConfig().getSkipStarNodeCreationInDims());
+                }
+            }
+        }
+    }
+
     public void testValidCompositeIndexWithDuplicateDates() {
         prepareCreate(TEST_INDEX).setMapping(createDateTestMapping(true)).setSettings(settings).get();
         Iterable<IndicesService> dataNodeInstances = internalCluster().getDataNodeInstances(IndicesService.class);
@@ -563,11 +787,156 @@ public void testCompositeIndexWithArraysInCompositeField() throws IOException {
             () -> client().prepareIndex(TEST_INDEX).setSource(doc).get()
         );
         assertEquals(
-            "object mapping for [_doc] with array for [numeric_dv] cannot be accepted as field is also part of composite index mapping which does not accept arrays",
+            "object mapping for [_doc] with array for [numeric_dv] cannot be accepted, as the field is also part of composite index mapping which does not accept arrays",
             ex.getMessage()
         );
     }
 
+    public void testCompositeIndexWithArraysInNestedCompositeField() throws IOException {
+        // here nested.nested1.status is part of the composite field but "nested" field itself is an array
+        prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createNestedTestMapping()).get();
+        // Attempt to index a document with an array field
+        XContentBuilder doc = jsonBuilder().startObject()
+            .field("timestamp", "2023-06-01T12:00:00Z")
+            .startArray("nested")
+            .startObject()
+            .startArray("nested1")
+            .startObject()
+            .field("status", 10)
+            .endObject()
+            .startObject()
+            .field("status", 10)
+            .endObject()
+            .startObject()
+            .field("status", 10)
+            .endObject()
+            .endArray()
+            .endObject()
+            .endArray()
+            .endObject();
+        // Index the document and refresh
+        MapperParsingException ex = expectThrows(
+            MapperParsingException.class,
+            () -> client().prepareIndex(TEST_INDEX).setSource(doc).get()
+        );
+        assertEquals(
+            "object mapping for [_doc] with array for [nested] cannot be accepted, as the field is also part of composite index mapping which does not accept arrays",
+            ex.getMessage()
+        );
+    }
+
+    public void testCompositeIndexWithArraysInChildNestedCompositeField() throws IOException {
+        prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createNestedTestMapping()).get();
+        // here nested.nested1.status is part of the composite field but "nested.nested1" field is an array
+        XContentBuilder doc = jsonBuilder().startObject()
+            .field("timestamp", "2023-06-01T12:00:00Z")
+            .startObject("nested")
+            .startArray("nested1")
+            .startObject()
+            .field("status", 10)
+            .endObject()
+            .startObject()
+            .field("status", 10)
+            .endObject()
+            .startObject()
+            .field("status", 10)
+            .endObject()
+            .endArray()
+            .endObject()
+            .endObject();
+        // Index the document and refresh
+        MapperParsingException ex = expectThrows(
+            MapperParsingException.class,
+            () -> client().prepareIndex(TEST_INDEX).setSource(doc).get()
+        );
+        assertEquals(
+            "object mapping for [nested] with array for [nested1] cannot be accepted, as the field is also part of composite index mapping which does not accept arrays",
+            ex.getMessage()
+        );
+    }
+
+    public void testCompositeIndexWithArraysInNestedCompositeFieldSameNameAsNormalField() throws IOException {
+        prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createNestedTestMappingForArray()).get();
+        // here status is part of the composite field but "nested.nested1.status" field is an array which is not
+        // part of composite field
+        XContentBuilder doc = jsonBuilder().startObject()
+            .field("timestamp", "2023-06-01T12:00:00Z")
+            .startObject("nested")
+            .startObject("nested1")
+            .startArray("status")
+            .value(10)
+            .value(20)
+            .value(30)
+            .endArray()
+            .endObject()
+            .endObject()
+            .field("status", "200")
+            .endObject();
+        // Index the document and refresh
+        // Index the document and refresh
+        IndexResponse indexResponse = client().prepareIndex(TEST_INDEX).setSource(doc).get();
+
+        assertEquals(RestStatus.CREATED, indexResponse.status());
+
+        client().admin().indices().prepareRefresh(TEST_INDEX).get();
+        // Verify the document was indexed
+        SearchResponse searchResponse = client().prepareSearch(TEST_INDEX).setQuery(QueryBuilders.matchAllQuery()).get();
+
+        assertEquals(1, searchResponse.getHits().getTotalHits().value);
+
+        // Verify the values in the indexed document
+        SearchHit hit = searchResponse.getHits().getAt(0);
+        assertEquals("2023-06-01T12:00:00Z", hit.getSourceAsMap().get("timestamp"));
+
+        int values = Integer.parseInt((String) hit.getSourceAsMap().get("status"));
+        assertEquals(200, values);
+    }
+
+    public void testCompositeIndexWithNestedArraysInNonCompositeField() throws IOException {
+        prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createNestedTestMapping()).get();
+        // Attempt to index a document with an array field
+        XContentBuilder doc = jsonBuilder().startObject()
+            .field("timestamp", "2023-06-01T12:00:00Z")
+            .startObject("nested-not-startree")
+            .startArray("nested1")
+            .startObject()
+            .field("status", 10)
+            .endObject()
+            .startObject()
+            .field("status", 20)
+            .endObject()
+            .startObject()
+            .field("status", 30)
+            .endObject()
+            .endArray()
+            .endObject()
+            .endObject();
+
+        // Index the document and refresh
+        IndexResponse indexResponse = client().prepareIndex(TEST_INDEX).setSource(doc).get();
+
+        assertEquals(RestStatus.CREATED, indexResponse.status());
+
+        client().admin().indices().prepareRefresh(TEST_INDEX).get();
+        // Verify the document was indexed
+        SearchResponse searchResponse = client().prepareSearch(TEST_INDEX).setQuery(QueryBuilders.matchAllQuery()).get();
+
+        assertEquals(1, searchResponse.getHits().getTotalHits().value);
+
+        // Verify the values in the indexed document
+        SearchHit hit = searchResponse.getHits().getAt(0);
+        assertEquals("2023-06-01T12:00:00Z", hit.getSourceAsMap().get("timestamp"));
+
+        List<Object> values = (List<Object>) ((Map<String, Object>) (hit.getSourceAsMap().get("nested-not-startree"))).get("nested1");
+        assertEquals(3, values.size());
+        int i = 1;
+        for (Object val : values) {
+            Map<String, Object> valMap = (Map<String, Object>) val;
+            assertEquals(10 * i, valMap.get("status"));
+            i++;
+        }
+    }
+
     public void testCompositeIndexWithArraysInNonCompositeField() throws IOException {
         prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createMinimalTestMapping(false, false, false)).get();
         // Attempt to index a document with an array field
diff --git a/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java b/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java
index 50ff816695156..134baa70f80c2 100644
--- a/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java
+++ b/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java
@@ -661,12 +661,22 @@ private static void parseNonDynamicArray(ParseContext context, ObjectMapper mapp
         throws IOException {
         XContentParser parser = context.parser();
         XContentParser.Token token;
+        String path = context.path().pathAsText(arrayFieldName);
+        boolean isNested = path.contains(".") || context.mapperService().isCompositeIndexFieldNestedField(path);
         // block array values for composite index fields
-        if (context.indexSettings().isCompositeIndex() && context.mapperService().isFieldPartOfCompositeIndex(arrayFieldName)) {
+        // Assume original index has 2 fields - status , nested.nested1.status
+        // case 1 : if status is part of composite index and nested.nested1.status is not part of composite index,
+        // then nested.nested1.status/nested.nested1/nested array should not be blocked
+        // case 2 : if nested.nested1.status is part of composite index and status is not part of composite index,
+        // then arrays in nested/nested.nested1 and nested.nested1.status fields should be blocked
+        // but arrays in status should not be blocked
+        if (context.indexSettings().isCompositeIndex()
+            && ((isNested == false && context.mapperService().isFieldPartOfCompositeIndex(arrayFieldName))
+                || (isNested && context.mapperService().isCompositeIndexFieldNestedField(path)))) {
             throw new MapperParsingException(
                 String.format(
                     Locale.ROOT,
-                    "object mapping for [%s] with array for [%s] cannot be accepted as field is also part of composite index mapping which does not accept arrays",
+                    "object mapping for [%s] with array for [%s] cannot be accepted, as the field is also part of composite index mapping which does not accept arrays",
                     mapper.name(),
                     arrayFieldName
                 )
diff --git a/server/src/main/java/org/opensearch/index/mapper/MapperService.java b/server/src/main/java/org/opensearch/index/mapper/MapperService.java
index 84b0b1d69432d..5a7c6a0102052 100644
--- a/server/src/main/java/org/opensearch/index/mapper/MapperService.java
+++ b/server/src/main/java/org/opensearch/index/mapper/MapperService.java
@@ -228,6 +228,7 @@ public enum MergeReason {
 
     private volatile Set<CompositeMappedFieldType> compositeMappedFieldTypes;
     private volatile Set<String> fieldsPartOfCompositeMappings;
+    private volatile Set<String> nestedFieldsPartOfCompositeMappings;
 
     public MapperService(
         IndexSettings indexSettings,
@@ -554,10 +555,29 @@ private synchronized Map<String, DocumentMapper> internalMerge(DocumentMapper ma
 
     private void buildCompositeFieldLookup() {
         Set<String> fieldsPartOfCompositeMappings = new HashSet<>();
+        Set<String> nestedFieldsPartOfCompositeMappings = new HashSet<>();
+
         for (CompositeMappedFieldType fieldType : compositeMappedFieldTypes) {
             fieldsPartOfCompositeMappings.addAll(fieldType.fields());
+
+            for (String field : fieldType.fields()) {
+                String[] parts = field.split("\\.");
+                if (parts.length > 1) {
+                    StringBuilder path = new StringBuilder();
+                    for (int i = 0; i < parts.length; i++) {
+                        if (i == 0) {
+                            path.append(parts[i]);
+                        } else {
+                            path.append(".").append(parts[i]);
+                        }
+                        nestedFieldsPartOfCompositeMappings.add(path.toString());
+                    }
+                }
+            }
         }
+
         this.fieldsPartOfCompositeMappings = fieldsPartOfCompositeMappings;
+        this.nestedFieldsPartOfCompositeMappings = nestedFieldsPartOfCompositeMappings;
     }
 
     private boolean assertSerialization(DocumentMapper mapper) {
@@ -690,6 +710,11 @@ public boolean isFieldPartOfCompositeIndex(String field) {
         return fieldsPartOfCompositeMappings.contains(field);
     }
 
+    public boolean isCompositeIndexFieldNestedField(String field) {
+        return nestedFieldsPartOfCompositeMappings.contains(field);
+
+    }
+
     public ObjectMapper getObjectMapper(String name) {
         return this.mapper == null ? null : this.mapper.objectMappers().get(name);
     }
diff --git a/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java b/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java
index 40f05a8b76755..7b361e12330a3 100644
--- a/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java
+++ b/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java
@@ -23,6 +23,7 @@
 import org.opensearch.search.lookup.SearchLookup;
 
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.HashSet;
 import java.util.LinkedHashSet;
 import java.util.LinkedList;
@@ -431,8 +432,46 @@ private static boolean isBuilderAllowedForMetric(Mapper.Builder builder) {
             return builder.isDataCubeMetricSupported();
         }
 
-        private Optional<Mapper.Builder> findMapperBuilderByName(String field, List<Mapper.Builder> mappersBuilders) {
-            return mappersBuilders.stream().filter(builder -> builder.name().equals(field)).findFirst();
+        private Optional<Mapper.Builder> findMapperBuilderByName(String name, List<Mapper.Builder> mappersBuilders) {
+            String[] parts = name.split("\\.");
+
+            // Start with the top-level builders
+            Optional<Mapper.Builder> currentBuilder = mappersBuilders.stream()
+                .filter(builder -> builder.name().equals(parts[0]))
+                .findFirst();
+
+            // If we can't find the first part, or if there's only one part, return the result
+            if (currentBuilder.isEmpty() || parts.length == 1) {
+                return currentBuilder;
+            }
+
+            // Navigate through the nested structure
+            try {
+                Mapper.Builder builder = currentBuilder.get();
+                for (int i = 1; i < parts.length; i++) {
+                    List<Mapper.Builder> childBuilders = getChildBuilders(builder);
+                    int finalI = i;
+                    builder = childBuilders.stream()
+                        .filter(b -> b.name().equals(parts[finalI]))
+                        .findFirst()
+                        .orElseThrow(
+                            () -> new IllegalArgumentException(
+                                String.format(Locale.ROOT, "Could not find nested field [%s] in path [%s]", parts[finalI], name)
+                            )
+                        );
+                }
+                return Optional.of(builder);
+            } catch (Exception e) {
+                return Optional.empty();
+            }
+        }
+
+        // Helper method to get child builders from a parent builder
+        private List<Mapper.Builder> getChildBuilders(Mapper.Builder builder) {
+            if (builder instanceof ObjectMapper.Builder) {
+                return ((ObjectMapper.Builder) builder).mappersBuilders;
+            }
+            return Collections.emptyList();
         }
 
         public Builder(String name, ObjectMapper.Builder objBuilder) {
diff --git a/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java
index 333cdbcab05c5..684704ad65b0a 100644
--- a/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java
+++ b/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java
@@ -111,7 +111,7 @@ public void testCompositeIndexWithArraysInCompositeField() throws IOException {
             () -> mapper.parse(source(b -> b.startArray("status").value(0).value(1).endArray()))
         );
         assertEquals(
-            "object mapping for [_doc] with array for [status] cannot be accepted as field is also part of composite index mapping which does not accept arrays",
+            "object mapping for [_doc] with array for [status] cannot be accepted, as the field is also part of composite index mapping which does not accept arrays",
             ex.getMessage()
         );
         ParsedDocument doc = mapper.parse(source(b -> b.startArray("size").value(0).value(1).endArray()));
@@ -284,6 +284,33 @@ public void testValidStarTreeDateDims() throws IOException {
         }
     }
 
+    public void testValidStarTreeNestedFields() throws IOException {
+        MapperService mapperService = createMapperService(getMinMappingWithNestedField());
+        Set<CompositeMappedFieldType> compositeFieldTypes = mapperService.getCompositeFieldTypes();
+        for (CompositeMappedFieldType type : compositeFieldTypes) {
+            StarTreeMapper.StarTreeFieldType starTreeFieldType = (StarTreeMapper.StarTreeFieldType) type;
+            assertEquals("@timestamp", starTreeFieldType.getDimensions().get(0).getField());
+            assertTrue(starTreeFieldType.getDimensions().get(0) instanceof DateDimension);
+            DateDimension dateDim = (DateDimension) starTreeFieldType.getDimensions().get(0);
+            List<String> expectedDimensionFields = Arrays.asList("@timestamp_minute", "@timestamp_half-hour");
+            assertEquals(expectedDimensionFields, dateDim.getSubDimensionNames());
+            List<DateTimeUnitRounding> expectedTimeUnits = Arrays.asList(
+                new DateTimeUnitAdapter(Rounding.DateTimeUnit.MINUTES_OF_HOUR),
+                DataCubeDateTimeUnit.HALF_HOUR_OF_DAY
+            );
+            for (int i = 0; i < expectedTimeUnits.size(); i++) {
+                assertEquals(expectedTimeUnits.get(i).shortName(), dateDim.getSortedCalendarIntervals().get(i).shortName());
+            }
+            assertEquals("nested.status", starTreeFieldType.getDimensions().get(1).getField());
+            assertEquals("nested.status", starTreeFieldType.getMetrics().get(0).getField());
+            List<MetricStat> expectedMetrics = Arrays.asList(MetricStat.VALUE_COUNT, MetricStat.SUM, MetricStat.AVG);
+            assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics());
+            assertEquals(10000, starTreeFieldType.getStarTreeConfig().maxLeafDocs());
+            assertEquals(StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, starTreeFieldType.getStarTreeConfig().getBuildMode());
+            assertEquals(Collections.emptySet(), starTreeFieldType.getStarTreeConfig().getSkipStarNodeCreationInDims());
+        }
+    }
+
     public void testInValidStarTreeMinDims() throws IOException {
         MapperParsingException ex = expectThrows(
             MapperParsingException.class,
@@ -1047,6 +1074,56 @@ private XContentBuilder getMinMappingWith2StarTrees() throws IOException {
         });
     }
 
+    private XContentBuilder getMinMappingWithNestedField() throws IOException {
+        return topMapping(b -> {
+            b.startObject("composite");
+            b.startObject("startree");
+            b.field("type", "star_tree");
+            b.startObject("config");
+
+            b.startArray("ordered_dimensions");
+            b.startObject();
+            b.field("name", "@timestamp");
+            b.endObject();
+            b.startObject();
+            b.field("name", "nested.status");
+            b.endObject();
+            b.endArray();
+
+            b.startArray("metrics");
+            b.startObject();
+            b.field("name", "nested.status");
+            b.endObject();
+            b.startObject();
+            b.field("name", "metric_field");
+            b.endObject();
+            b.endArray();
+
+            b.endObject();
+            b.endObject();
+
+            b.endObject();
+            b.startObject("properties");
+            b.startObject("@timestamp");
+            b.field("type", "date");
+            b.endObject();
+            b.startObject("nested");
+            b.startObject("properties");
+            b.startObject("status");
+            b.field("type", "integer");
+            b.endObject();
+            b.endObject();
+            b.endObject();
+            b.startObject("metric_field");
+            b.field("type", "integer");
+            b.endObject();
+            b.startObject("keyword1");
+            b.field("type", "keyword");
+            b.endObject();
+            b.endObject();
+        });
+    }
+
     private XContentBuilder getInvalidMapping(
         boolean singleDim,
         boolean invalidSkipDims,

From 4a53ff24adbec1d5aeb3d73548171870a3de925d Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 6 Jan 2025 09:57:50 -0500
Subject: [PATCH 22/61] Bump ch.qos.logback:logback-core from 1.5.12 to 1.5.16
 in /test/fixtures/hdfs-fixture (#16951)

* Bump ch.qos.logback:logback-core in /test/fixtures/hdfs-fixture

Bumps [ch.qos.logback:logback-core](https://github.com/qos-ch/logback) from 1.5.12 to 1.5.16.
- [Commits](https://github.com/qos-ch/logback/compare/v_1.5.12...v_1.5.16)

---
updated-dependencies:
- dependency-name: ch.qos.logback:logback-core
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

* Update changelog

Signed-off-by: dependabot[bot] <support@github.com>

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot[bot] <dependabot[bot]@users.noreply.github.com>
---
 CHANGELOG.md                            | 1 +
 test/fixtures/hdfs-fixture/build.gradle | 2 +-
 2 files changed, 2 insertions(+), 1 deletion(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 0efb53beb6e31..82bf9dd0fea0a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -57,6 +57,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 - Bump `com.netflix.nebula.ospackage-base` from 11.10.0 to 11.10.1 ([#16896](https://github.com/opensearch-project/OpenSearch/pull/16896))
 - Bump `com.microsoft.azure:msal4j` from 1.17.2 to 1.18.0 ([#16918](https://github.com/opensearch-project/OpenSearch/pull/16918))
 - Bump `org.apache.commons:commons-text` from 1.12.0 to 1.13.0 ([#16919](https://github.com/opensearch-project/OpenSearch/pull/16919))
+- Bump `ch.qos.logback:logback-core` from 1.5.12 to 1.5.16 ([#16951](https://github.com/opensearch-project/OpenSearch/pull/16951))
 
 ### Changed
 - Indexed IP field supports `terms_query` with more than 1025 IP masks [#16391](https://github.com/opensearch-project/OpenSearch/pull/16391)
diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle
index 02aab575bbaf0..bb2b7ebafdf81 100644
--- a/test/fixtures/hdfs-fixture/build.gradle
+++ b/test/fixtures/hdfs-fixture/build.gradle
@@ -74,7 +74,7 @@ dependencies {
   api 'org.apache.zookeeper:zookeeper:3.9.3'
   api "org.apache.commons:commons-text:1.13.0"
   api "commons-net:commons-net:3.11.1"
-  api "ch.qos.logback:logback-core:1.5.12"
+  api "ch.qos.logback:logback-core:1.5.16"
   api "ch.qos.logback:logback-classic:1.5.15"
   api "org.jboss.xnio:xnio-nio:3.8.16.Final"
   api 'org.jline:jline:3.28.0'

From e73ffdf1d1a11587f2d25ba69ddb46fc25994919 Mon Sep 17 00:00:00 2001
From: Ruirui Zhang <mariazrr@amazon.com>
Date: Mon, 6 Jan 2025 10:36:36 -0800
Subject: [PATCH 23/61] [Workload Management] Add Workload Management IT
 (#16359)

* add workload management IT
Signed-off-by: Ruirui Zhang <mariazrr@amazon.com>

* address comments
Signed-off-by: Ruirui Zhang <mariazrr@amazon.com>

---------

Signed-off-by: Ruirui Zhang <mariazrr@amazon.com>
---
 CHANGELOG.md                                  |   1 +
 .../backpressure/SearchBackpressureIT.java    |  10 +-
 .../opensearch/wlm/WorkloadManagementIT.java  | 434 ++++++++++++++++++
 3 files changed, 442 insertions(+), 3 deletions(-)
 create mode 100644 server/src/internalClusterTest/java/org/opensearch/wlm/WorkloadManagementIT.java

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 82bf9dd0fea0a..99bfecfc0eac6 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -18,6 +18,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 - Support for keyword fields in star-tree index ([#16233](https://github.com/opensearch-project/OpenSearch/pull/16233))
 - Add a flag in QueryShardContext to differentiate inner hit query ([#16600](https://github.com/opensearch-project/OpenSearch/pull/16600))
 - Add vertical scaling and SoftReference for snapshot repository data cache ([#16489](https://github.com/opensearch-project/OpenSearch/pull/16489))
+- [Workload Management] Add Workload Management IT ([#16359](https://github.com/opensearch-project/OpenSearch/pull/16359))
 - Support prefix list for remote repository attributes([#16271](https://github.com/opensearch-project/OpenSearch/pull/16271))
 - Add new configuration setting `synonym_analyzer`, to the `synonym` and `synonym_graph` filters, enabling the specification of a custom analyzer for reading the synonym file ([#16488](https://github.com/opensearch-project/OpenSearch/pull/16488)).
 - Add stats for remote publication failure and move download failure stats to remote methods([#16682](https://github.com/opensearch-project/OpenSearch/pull/16682/))
diff --git a/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java b/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java
index 40c9301ef4bce..d200b9177353a 100644
--- a/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java
@@ -314,7 +314,7 @@ public void testSearchCancellationWithBackpressureDisabled() throws InterruptedE
         assertNull("SearchShardTask shouldn't have cancelled for monitor_only mode", caughtException);
     }
 
-    private static class ExceptionCatchingListener implements ActionListener<TestResponse> {
+    public static class ExceptionCatchingListener implements ActionListener<TestResponse> {
         private final CountDownLatch latch;
         private Exception exception = null;
 
@@ -333,7 +333,11 @@ public void onFailure(Exception e) {
             latch.countDown();
         }
 
-        private Exception getException() {
+        public CountDownLatch getLatch() {
+            return latch;
+        }
+
+        public Exception getException() {
             return exception;
         }
     }
@@ -349,7 +353,7 @@ private Supplier<String> descriptionSupplier(String description) {
         return () -> description;
     }
 
-    interface TaskFactory<T extends Task> {
+    public interface TaskFactory<T extends Task> {
         T createTask(long id, String type, String action, String description, TaskId parentTaskId, Map<String, String> headers);
     }
 
diff --git a/server/src/internalClusterTest/java/org/opensearch/wlm/WorkloadManagementIT.java b/server/src/internalClusterTest/java/org/opensearch/wlm/WorkloadManagementIT.java
new file mode 100644
index 0000000000000..6b68a83da94e2
--- /dev/null
+++ b/server/src/internalClusterTest/java/org/opensearch/wlm/WorkloadManagementIT.java
@@ -0,0 +1,434 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.wlm;
+
+import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
+
+import org.opensearch.action.ActionRequest;
+import org.opensearch.action.ActionRequestValidationException;
+import org.opensearch.action.ActionType;
+import org.opensearch.action.search.SearchTask;
+import org.opensearch.action.support.ActionFilters;
+import org.opensearch.action.support.HandledTransportAction;
+import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest;
+import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction;
+import org.opensearch.cluster.ClusterState;
+import org.opensearch.cluster.ClusterStateUpdateTask;
+import org.opensearch.cluster.block.ClusterBlockException;
+import org.opensearch.cluster.block.ClusterBlockLevel;
+import org.opensearch.cluster.metadata.IndexNameExpressionResolver;
+import org.opensearch.cluster.metadata.Metadata;
+import org.opensearch.cluster.metadata.QueryGroup;
+import org.opensearch.cluster.service.ClusterService;
+import org.opensearch.common.inject.Inject;
+import org.opensearch.common.settings.Settings;
+import org.opensearch.common.unit.TimeValue;
+import org.opensearch.core.action.ActionListener;
+import org.opensearch.core.action.ActionResponse;
+import org.opensearch.core.common.io.stream.StreamInput;
+import org.opensearch.core.common.io.stream.StreamOutput;
+import org.opensearch.core.tasks.TaskCancelledException;
+import org.opensearch.core.tasks.TaskId;
+import org.opensearch.plugins.ActionPlugin;
+import org.opensearch.plugins.Plugin;
+import org.opensearch.search.backpressure.SearchBackpressureIT.ExceptionCatchingListener;
+import org.opensearch.search.backpressure.SearchBackpressureIT.TaskFactory;
+import org.opensearch.search.backpressure.SearchBackpressureIT.TestResponse;
+import org.opensearch.tasks.CancellableTask;
+import org.opensearch.tasks.Task;
+import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase;
+import org.opensearch.threadpool.ThreadPool;
+import org.opensearch.transport.TransportService;
+import org.hamcrest.MatcherAssert;
+import org.junit.After;
+import org.junit.Before;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING;
+import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
+import static org.opensearch.threadpool.ThreadPool.Names.SAME;
+import static org.opensearch.wlm.QueryGroupTask.QUERY_GROUP_ID_HEADER;
+import static org.hamcrest.Matchers.instanceOf;
+
+public class WorkloadManagementIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase {
+    final static String PUT = "PUT";
+    final static String MEMORY = "MEMORY";
+    final static String CPU = "CPU";
+    final static String ENABLED = "enabled";
+    final static String DELETE = "DELETE";
+    private static final TimeValue TIMEOUT = new TimeValue(1, TimeUnit.SECONDS);
+
+    public WorkloadManagementIT(Settings nodeSettings) {
+        super(nodeSettings);
+    }
+
+    @ParametersFactory
+    public static Collection<Object[]> parameters() {
+        return Arrays.asList(
+            new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() },
+            new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() }
+        );
+    }
+
+    @Override
+    protected Collection<Class<? extends Plugin>> nodePlugins() {
+        final List<Class<? extends Plugin>> plugins = new ArrayList<>(super.nodePlugins());
+        plugins.add(TestClusterUpdatePlugin.class);
+        return plugins;
+    }
+
+    @Before
+    public final void setupNodeSettings() {
+        Settings request = Settings.builder()
+            .put(WorkloadManagementSettings.NODE_LEVEL_MEMORY_REJECTION_THRESHOLD.getKey(), 0.8)
+            .put(WorkloadManagementSettings.NODE_LEVEL_MEMORY_CANCELLATION_THRESHOLD.getKey(), 0.9)
+            .put(WorkloadManagementSettings.NODE_LEVEL_CPU_REJECTION_THRESHOLD.getKey(), 0.8)
+            .put(WorkloadManagementSettings.NODE_LEVEL_CPU_CANCELLATION_THRESHOLD.getKey(), 0.9)
+            .build();
+        assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(request).get());
+    }
+
+    @After
+    public final void cleanupNodeSettings() {
+        assertAcked(
+            client().admin()
+                .cluster()
+                .prepareUpdateSettings()
+                .setPersistentSettings(Settings.builder().putNull("*"))
+                .setTransientSettings(Settings.builder().putNull("*"))
+        );
+    }
+
+    public void testHighCPUInEnforcedMode() throws InterruptedException {
+        Settings request = Settings.builder().put(WorkloadManagementSettings.WLM_MODE_SETTING.getKey(), ENABLED).build();
+        assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(request).get());
+        QueryGroup queryGroup = new QueryGroup(
+            "name",
+            new MutableQueryGroupFragment(
+                MutableQueryGroupFragment.ResiliencyMode.ENFORCED,
+                Map.of(ResourceType.CPU, 0.01, ResourceType.MEMORY, 0.01)
+            )
+        );
+        updateQueryGroupInClusterState(PUT, queryGroup);
+        Exception caughtException = executeQueryGroupTask(CPU, queryGroup.get_id());
+        assertNotNull("SearchTask should have been cancelled with TaskCancelledException", caughtException);
+        MatcherAssert.assertThat(caughtException, instanceOf(TaskCancelledException.class));
+        updateQueryGroupInClusterState(DELETE, queryGroup);
+    }
+
+    public void testHighCPUInMonitorMode() throws InterruptedException {
+        QueryGroup queryGroup = new QueryGroup(
+            "name",
+            new MutableQueryGroupFragment(
+                MutableQueryGroupFragment.ResiliencyMode.ENFORCED,
+                Map.of(ResourceType.CPU, 0.01, ResourceType.MEMORY, 0.01)
+            )
+        );
+        updateQueryGroupInClusterState(PUT, queryGroup);
+        Exception caughtException = executeQueryGroupTask(CPU, queryGroup.get_id());
+        assertNull(caughtException);
+        updateQueryGroupInClusterState(DELETE, queryGroup);
+    }
+
+    public void testHighMemoryInEnforcedMode() throws InterruptedException {
+        Settings request = Settings.builder().put(WorkloadManagementSettings.WLM_MODE_SETTING.getKey(), ENABLED).build();
+        assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(request).get());
+        QueryGroup queryGroup = new QueryGroup(
+            "name",
+            new MutableQueryGroupFragment(MutableQueryGroupFragment.ResiliencyMode.ENFORCED, Map.of(ResourceType.MEMORY, 0.01))
+        );
+        updateQueryGroupInClusterState(PUT, queryGroup);
+        Exception caughtException = executeQueryGroupTask(MEMORY, queryGroup.get_id());
+        assertNotNull("SearchTask should have been cancelled with TaskCancelledException", caughtException);
+        MatcherAssert.assertThat(caughtException, instanceOf(TaskCancelledException.class));
+        updateQueryGroupInClusterState(DELETE, queryGroup);
+    }
+
+    public void testHighMemoryInMonitorMode() throws InterruptedException {
+        QueryGroup queryGroup = new QueryGroup(
+            "name",
+            new MutableQueryGroupFragment(MutableQueryGroupFragment.ResiliencyMode.ENFORCED, Map.of(ResourceType.MEMORY, 0.01))
+        );
+        updateQueryGroupInClusterState(PUT, queryGroup);
+        Exception caughtException = executeQueryGroupTask(MEMORY, queryGroup.get_id());
+        assertNull("SearchTask should have been cancelled with TaskCancelledException", caughtException);
+        updateQueryGroupInClusterState(DELETE, queryGroup);
+    }
+
+    public void testNoCancellation() throws InterruptedException {
+        QueryGroup queryGroup = new QueryGroup(
+            "name",
+            new MutableQueryGroupFragment(
+                MutableQueryGroupFragment.ResiliencyMode.ENFORCED,
+                Map.of(ResourceType.CPU, 0.8, ResourceType.MEMORY, 0.8)
+            )
+        );
+        updateQueryGroupInClusterState(PUT, queryGroup);
+        Exception caughtException = executeQueryGroupTask(CPU, queryGroup.get_id());
+        assertNull(caughtException);
+        updateQueryGroupInClusterState(DELETE, queryGroup);
+    }
+
+    public Exception executeQueryGroupTask(String resourceType, String queryGroupId) throws InterruptedException {
+        ExceptionCatchingListener listener = new ExceptionCatchingListener();
+        client().execute(
+            TestQueryGroupTaskTransportAction.ACTION,
+            new TestQueryGroupTaskRequest(
+                resourceType,
+                queryGroupId,
+                (TaskFactory<Task>) (id, type, action, description, parentTaskId, headers) -> new SearchTask(
+                    id,
+                    type,
+                    action,
+                    () -> description,
+                    parentTaskId,
+                    headers
+                )
+            ),
+            listener
+        );
+        assertTrue(listener.getLatch().await(TIMEOUT.getSeconds() + 1, TimeUnit.SECONDS));
+        return listener.getException();
+    }
+
+    public void updateQueryGroupInClusterState(String method, QueryGroup queryGroup) throws InterruptedException {
+        ExceptionCatchingListener listener = new ExceptionCatchingListener();
+        client().execute(TestClusterUpdateTransportAction.ACTION, new TestClusterUpdateRequest(queryGroup, method), listener);
+        assertTrue(listener.getLatch().await(TIMEOUT.getSeconds(), TimeUnit.SECONDS));
+        assertEquals(0, listener.getLatch().getCount());
+    }
+
+    public static class TestClusterUpdateRequest extends ClusterManagerNodeRequest<TestClusterUpdateRequest> {
+        final private String method;
+        final private QueryGroup queryGroup;
+
+        public TestClusterUpdateRequest(QueryGroup queryGroup, String method) {
+            this.method = method;
+            this.queryGroup = queryGroup;
+        }
+
+        public TestClusterUpdateRequest(StreamInput in) throws IOException {
+            super(in);
+            this.method = in.readString();
+            this.queryGroup = new QueryGroup(in);
+        }
+
+        @Override
+        public ActionRequestValidationException validate() {
+            return null;
+        }
+
+        @Override
+        public void writeTo(StreamOutput out) throws IOException {
+            super.writeTo(out);
+            out.writeString(method);
+            queryGroup.writeTo(out);
+        }
+
+        public QueryGroup getQueryGroup() {
+            return queryGroup;
+        }
+
+        public String getMethod() {
+            return method;
+        }
+    }
+
+    public static class TestClusterUpdateTransportAction extends TransportClusterManagerNodeAction<TestClusterUpdateRequest, TestResponse> {
+        public static final ActionType<TestResponse> ACTION = new ActionType<>("internal::test_cluster_update_action", TestResponse::new);
+
+        @Inject
+        public TestClusterUpdateTransportAction(
+            ThreadPool threadPool,
+            TransportService transportService,
+            ActionFilters actionFilters,
+            IndexNameExpressionResolver indexNameExpressionResolver,
+            ClusterService clusterService
+        ) {
+            super(
+                ACTION.name(),
+                transportService,
+                clusterService,
+                threadPool,
+                actionFilters,
+                TestClusterUpdateRequest::new,
+                indexNameExpressionResolver
+            );
+        }
+
+        @Override
+        protected String executor() {
+            return SAME;
+        }
+
+        @Override
+        protected TestResponse read(StreamInput in) throws IOException {
+            return new TestResponse(in);
+        }
+
+        @Override
+        protected ClusterBlockException checkBlock(TestClusterUpdateRequest request, ClusterState state) {
+            return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
+        }
+
+        @Override
+        protected void clusterManagerOperation(
+            TestClusterUpdateRequest request,
+            ClusterState clusterState,
+            ActionListener<TestResponse> listener
+        ) {
+            clusterService.submitStateUpdateTask("query-group-persistence-service", new ClusterStateUpdateTask() {
+                @Override
+                public ClusterState execute(ClusterState currentState) {
+                    Map<String, QueryGroup> currentGroups = currentState.metadata().queryGroups();
+                    QueryGroup queryGroup = request.getQueryGroup();
+                    String id = queryGroup.get_id();
+                    String method = request.getMethod();
+                    Metadata metadata;
+                    if (method.equals(PUT)) { // create
+                        metadata = Metadata.builder(currentState.metadata()).put(queryGroup).build();
+                    } else { // delete
+                        metadata = Metadata.builder(currentState.metadata()).remove(currentGroups.get(id)).build();
+                    }
+                    return ClusterState.builder(currentState).metadata(metadata).build();
+                }
+
+                @Override
+                public void onFailure(String source, Exception e) {
+                    listener.onFailure(e);
+                }
+
+                @Override
+                public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+                    listener.onResponse(new TestResponse());
+                }
+            });
+        }
+    }
+
+    public static class TestQueryGroupTaskRequest<T extends Task> extends ActionRequest {
+        private final String type;
+        private final String queryGroupId;
+        private TaskFactory<T> taskFactory;
+
+        public TestQueryGroupTaskRequest(String type, String queryGroupId, TaskFactory<T> taskFactory) {
+            this.type = type;
+            this.queryGroupId = queryGroupId;
+            this.taskFactory = taskFactory;
+        }
+
+        public TestQueryGroupTaskRequest(StreamInput in) throws IOException {
+            super(in);
+            this.type = in.readString();
+            this.queryGroupId = in.readString();
+        }
+
+        @Override
+        public ActionRequestValidationException validate() {
+            return null;
+        }
+
+        @Override
+        public Task createTask(long id, String type, String action, TaskId parentTaskId, Map<String, String> headers) {
+            return taskFactory.createTask(id, type, action, "", parentTaskId, headers);
+        }
+
+        @Override
+        public void writeTo(StreamOutput out) throws IOException {
+            super.writeTo(out);
+            out.writeString(type);
+            out.writeString(queryGroupId);
+        }
+
+        public String getType() {
+            return type;
+        }
+
+        public String getQueryGroupId() {
+            return queryGroupId;
+        }
+    }
+
+    public static class TestQueryGroupTaskTransportAction extends HandledTransportAction<TestQueryGroupTaskRequest, TestResponse> {
+        public static final ActionType<TestResponse> ACTION = new ActionType<>("internal::test_query_group_task_action", TestResponse::new);
+        private final ThreadPool threadPool;
+
+        @Inject
+        public TestQueryGroupTaskTransportAction(TransportService transportService, ThreadPool threadPool, ActionFilters actionFilters) {
+            super(ACTION.name(), transportService, actionFilters, TestQueryGroupTaskRequest::new);
+            this.threadPool = threadPool;
+        }
+
+        @Override
+        protected void doExecute(Task task, TestQueryGroupTaskRequest request, ActionListener<TestResponse> listener) {
+            threadPool.getThreadContext().putHeader(QUERY_GROUP_ID_HEADER, request.getQueryGroupId());
+            threadPool.executor(ThreadPool.Names.SEARCH).execute(() -> {
+                try {
+                    CancellableTask cancellableTask = (CancellableTask) task;
+                    ((QueryGroupTask) task).setQueryGroupId(threadPool.getThreadContext());
+                    assertEquals(request.getQueryGroupId(), ((QueryGroupTask) task).getQueryGroupId());
+                    long startTime = System.nanoTime();
+                    while (System.nanoTime() - startTime < TIMEOUT.getNanos()) {
+                        doWork(request);
+                        if (cancellableTask.isCancelled()) {
+                            break;
+                        }
+                    }
+                    if (cancellableTask.isCancelled()) {
+                        throw new TaskCancelledException(cancellableTask.getReasonCancelled());
+                    } else {
+                        listener.onResponse(new TestResponse());
+                    }
+                } catch (Exception e) {
+                    listener.onFailure(e);
+                }
+            });
+        }
+
+        private void doWork(TestQueryGroupTaskRequest request) throws InterruptedException {
+            switch (request.getType()) {
+                case "CPU":
+                    long i = 0, j = 1, k = 1, iterations = 1000;
+                    do {
+                        j += i;
+                        k *= j;
+                        i++;
+                    } while (i < iterations);
+                    break;
+                case "MEMORY":
+                    int bytesToAllocate = (int) (Runtime.getRuntime().totalMemory() * 0.01);
+                    Byte[] bytes = new Byte[bytesToAllocate];
+                    int[] ints = new int[bytesToAllocate];
+                    break;
+            }
+        }
+    }
+
+    public static class TestClusterUpdatePlugin extends Plugin implements ActionPlugin {
+        @Override
+        public List<ActionHandler<? extends ActionRequest, ? extends ActionResponse>> getActions() {
+            return Arrays.asList(
+                new ActionHandler<>(TestClusterUpdateTransportAction.ACTION, TestClusterUpdateTransportAction.class),
+                new ActionHandler<>(TestQueryGroupTaskTransportAction.ACTION, TestQueryGroupTaskTransportAction.class)
+            );
+        }
+
+        @Override
+        public List<ActionType<? extends ActionResponse>> getClientActions() {
+            return Arrays.asList(TestClusterUpdateTransportAction.ACTION, TestQueryGroupTaskTransportAction.ACTION);
+        }
+    }
+}

From aca373b6b9d4f1afa9507874bdf64f8f9924f9fb Mon Sep 17 00:00:00 2001
From: Rishabh Singh <rishabhksingh@gmail.com>
Date: Mon, 6 Jan 2025 11:27:08 -0800
Subject: [PATCH 24/61] Add new benchmark config for nested workload (#16956)

Signed-off-by: Rishabh Singh <sngri@amazon.com>
---
 .github/benchmark-configs.json | 18 +++++++++++++++++-
 1 file changed, 17 insertions(+), 1 deletion(-)

diff --git a/.github/benchmark-configs.json b/.github/benchmark-configs.json
index b3590f8a2f942..1c80f5048a611 100644
--- a/.github/benchmark-configs.json
+++ b/.github/benchmark-configs.json
@@ -256,5 +256,21 @@
       "data_instance_config": "4vCPU, 32G Mem, 16G Heap"
     },
     "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline"
+  },
+  "id_16": {
+    "description": "Benchmarking config for NESTED workload, benchmarks nested queries with inner-hits",
+    "supported_major_versions": ["2", "3"],
+    "cluster-benchmark-configs": {
+      "SINGLE_NODE_CLUSTER": "true",
+      "MIN_DISTRIBUTION": "true",
+      "TEST_WORKLOAD": "nested",
+      "WORKLOAD_PARAMS": "{\"number_of_replicas\":\"0\",\"number_of_shards\":\"1\"}",
+      "CAPTURE_NODE_STAT": "true"
+    },
+    "cluster_configuration": {
+      "size": "Single-Node",
+      "data_instance_config": "4vCPU, 32G Mem, 16G Heap"
+    },
+    "baseline_cluster_config": "x64-r5.xlarge-single-node-1-shard-0-replica-baseline"
+  }
   }
-}

From dd9695362a9d6db1c3ee2117c269f025155d4957 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 6 Jan 2025 16:52:48 -0500
Subject: [PATCH 25/61] Bump com.azure:azure-core-http-netty from 1.15.5 to
 1.15.7 in /plugins/repository-azure (#16952)

* Bump com.azure:azure-core-http-netty in /plugins/repository-azure

Bumps [com.azure:azure-core-http-netty](https://github.com/Azure/azure-sdk-for-java) from 1.15.5 to 1.15.7.
- [Release notes](https://github.com/Azure/azure-sdk-for-java/releases)
- [Commits](https://github.com/Azure/azure-sdk-for-java/compare/azure-core-http-netty_1.15.5...azure-core-http-netty_1.15.7)

---
updated-dependencies:
- dependency-name: com.azure:azure-core-http-netty
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

* Updating SHAs

Signed-off-by: dependabot[bot] <support@github.com>

* Update changelog

Signed-off-by: dependabot[bot] <support@github.com>

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot[bot] <dependabot[bot]@users.noreply.github.com>
---
 CHANGELOG.md                                                    | 1 +
 plugins/repository-azure/build.gradle                           | 2 +-
 .../licenses/azure-core-http-netty-1.15.5.jar.sha1              | 1 -
 .../licenses/azure-core-http-netty-1.15.7.jar.sha1              | 1 +
 4 files changed, 3 insertions(+), 2 deletions(-)
 delete mode 100644 plugins/repository-azure/licenses/azure-core-http-netty-1.15.5.jar.sha1
 create mode 100644 plugins/repository-azure/licenses/azure-core-http-netty-1.15.7.jar.sha1

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 99bfecfc0eac6..bcf1904db8d27 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -59,6 +59,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 - Bump `com.microsoft.azure:msal4j` from 1.17.2 to 1.18.0 ([#16918](https://github.com/opensearch-project/OpenSearch/pull/16918))
 - Bump `org.apache.commons:commons-text` from 1.12.0 to 1.13.0 ([#16919](https://github.com/opensearch-project/OpenSearch/pull/16919))
 - Bump `ch.qos.logback:logback-core` from 1.5.12 to 1.5.16 ([#16951](https://github.com/opensearch-project/OpenSearch/pull/16951))
+- Bump `com.azure:azure-core-http-netty` from 1.15.5 to 1.15.7 ([#16952](https://github.com/opensearch-project/OpenSearch/pull/16952))
 
 ### Changed
 - Indexed IP field supports `terms_query` with more than 1025 IP masks [#16391](https://github.com/opensearch-project/OpenSearch/pull/16391)
diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle
index 03ea07623dbaf..ad12ec9003e64 100644
--- a/plugins/repository-azure/build.gradle
+++ b/plugins/repository-azure/build.gradle
@@ -48,7 +48,7 @@ dependencies {
   api 'com.azure:azure-json:1.3.0'
   api 'com.azure:azure-xml:1.1.0'
   api 'com.azure:azure-storage-common:12.28.0'
-  api 'com.azure:azure-core-http-netty:1.15.5'
+  api 'com.azure:azure-core-http-netty:1.15.7'
   api "io.netty:netty-codec-dns:${versions.netty}"
   api "io.netty:netty-codec-socks:${versions.netty}"
   api "io.netty:netty-codec-http2:${versions.netty}"
diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.15.5.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.15.5.jar.sha1
deleted file mode 100644
index 2f5239cc26148..0000000000000
--- a/plugins/repository-azure/licenses/azure-core-http-netty-1.15.5.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-44d99705d3759e2ad7ee8110f811d4ed304a6a7c
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.15.7.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.15.7.jar.sha1
new file mode 100644
index 0000000000000..d72f835c69903
--- /dev/null
+++ b/plugins/repository-azure/licenses/azure-core-http-netty-1.15.7.jar.sha1
@@ -0,0 +1 @@
+a83247eeeb7f63f891e725228d54c3c24132c66a
\ No newline at end of file

From 0b365998ed6e4f537dbdf7983a077bc53e785bb9 Mon Sep 17 00:00:00 2001
From: Michael Froh <froh@amazon.com>
Date: Mon, 6 Jan 2025 16:23:59 -0800
Subject: [PATCH 26/61] Always use constant_score query for match_only_text
 (#16964)

In some cases, when we create a term query over a `match_only_text`
field, it may still try to compute scores, which prevents early
termination. We should *always* use a constant score query when
querying `match_only_text`, since we don't have the statistics
required to compute scores.

---------

Signed-off-by: Michael Froh <froh@amazon.com>
---
 CHANGELOG.md                                  |  1 +
 .../mapper/MatchOnlyTextFieldMapper.java      | 11 +++++++++
 .../mapper/MatchOnlyTextFieldMapperTests.java | 23 ++++++++++++++++++-
 .../mapper/MatchOnlyTextFieldTypeTests.java   | 18 +++++++++++++++
 4 files changed, 52 insertions(+), 1 deletion(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index bcf1904db8d27..1b49368a20fa8 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -87,6 +87,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 - Skip remote-repositories validations for node-joins when RepositoriesService is not in sync with cluster-state ([#16763](https://github.com/opensearch-project/OpenSearch/pull/16763))
 - Fix _list/shards API failing when closed indices are present ([#16606](https://github.com/opensearch-project/OpenSearch/pull/16606))
 - Fix remote shards balance ([#15335](https://github.com/opensearch-project/OpenSearch/pull/15335))
+- Always use `constant_score` query for `match_only_text` field ([#16964](https://github.com/opensearch-project/OpenSearch/pull/16964))
 
 ### Security
 
diff --git a/server/src/main/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapper.java
index fb97f8c309a70..757de65248d33 100644
--- a/server/src/main/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapper.java
+++ b/server/src/main/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapper.java
@@ -16,6 +16,7 @@
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.ConstantScoreQuery;
 import org.apache.lucene.search.MultiPhraseQuery;
 import org.apache.lucene.search.PhraseQuery;
 import org.apache.lucene.search.Query;
@@ -290,6 +291,16 @@ public Query phrasePrefixQuery(TokenStream stream, int slop, int maxExpansions,
             return new SourceFieldMatchQuery(builder.build(), phrasePrefixQuery, this, context);
         }
 
+        @Override
+        public Query termQuery(Object value, QueryShardContext context) {
+            return new ConstantScoreQuery(super.termQuery(value, context));
+        }
+
+        @Override
+        public Query termQueryCaseInsensitive(Object value, QueryShardContext context) {
+            return new ConstantScoreQuery(super.termQueryCaseInsensitive(value, context));
+        }
+
         private List<List<Term>> getTermsFromTokenStream(TokenStream stream) throws IOException {
             final List<List<Term>> termArray = new ArrayList<>();
             TermToBytesRefAttribute termAtt = stream.getAttribute(TermToBytesRefAttribute.class);
diff --git a/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapperTests.java
index 580f8cccc9af5..d9f0fd6657085 100644
--- a/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapperTests.java
+++ b/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapperTests.java
@@ -15,11 +15,13 @@
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.ConstantScoreQuery;
 import org.apache.lucene.search.MultiPhraseQuery;
 import org.apache.lucene.search.PhraseQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.tests.analysis.MockSynonymAnalyzer;
+import org.opensearch.common.lucene.search.AutomatonQueries;
 import org.opensearch.common.lucene.search.MultiPhrasePrefixQuery;
 import org.opensearch.core.common.Strings;
 import org.opensearch.core.xcontent.MediaTypeRegistry;
@@ -28,6 +30,7 @@
 import org.opensearch.index.query.MatchPhraseQueryBuilder;
 import org.opensearch.index.query.QueryShardContext;
 import org.opensearch.index.query.SourceFieldMatchQuery;
+import org.opensearch.index.query.TermQueryBuilder;
 import org.opensearch.index.search.MatchQuery;
 import org.junit.Before;
 
@@ -391,7 +394,7 @@ public void testPhraseQuery() throws IOException {
 
         assertThat(q, is(expectedQuery));
         Query q4 = new MatchPhraseQueryBuilder("field", "singleton").toQuery(queryShardContext);
-        assertThat(q4, is(new TermQuery(new Term("field", "singleton"))));
+        assertThat(q4, is(new ConstantScoreQuery(new TermQuery(new Term("field", "singleton")))));
 
         Query q2 = new MatchPhraseQueryBuilder("field", "three words here").toQuery(queryShardContext);
         expectedQuery = new SourceFieldMatchQuery(
@@ -447,4 +450,22 @@ public void testPhraseQuery() throws IOException {
         );
         assertThat(q6, is(expectedQuery));
     }
+
+    public void testTermQuery() throws Exception {
+        MapperService mapperService = createMapperService(mapping(b -> {
+            b.startObject("field");
+            {
+                b.field("type", textFieldName);
+                b.field("analyzer", "my_stop_analyzer"); // "standard" will be replaced with MockSynonymAnalyzer
+            }
+            b.endObject();
+        }));
+        QueryShardContext queryShardContext = createQueryShardContext(mapperService);
+
+        Query q = new TermQueryBuilder("field", "foo").rewrite(queryShardContext).toQuery(queryShardContext);
+        assertEquals(new ConstantScoreQuery(new TermQuery(new Term("field", "foo"))), q);
+
+        q = new TermQueryBuilder("field", "foo").caseInsensitive(true).rewrite(queryShardContext).toQuery(queryShardContext);
+        assertEquals(new ConstantScoreQuery(AutomatonQueries.caseInsensitiveTermQuery(new Term("field", "foo"))), q);
+    }
 }
diff --git a/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldTypeTests.java
index 51234fa04ddc2..0170cdde8b21c 100644
--- a/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldTypeTests.java
+++ b/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldTypeTests.java
@@ -8,7 +8,11 @@
 
 package org.opensearch.index.mapper;
 
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.ConstantScoreQuery;
+import org.apache.lucene.search.TermQuery;
 import org.opensearch.common.lucene.Lucene;
+import org.opensearch.common.lucene.search.AutomatonQueries;
 
 public class MatchOnlyTextFieldTypeTests extends TextFieldTypeTests {
 
@@ -28,4 +32,18 @@ TextFieldMapper.TextFieldType createFieldType(boolean searchable) {
             ParametrizedFieldMapper.Parameter.metaParam().get()
         );
     }
+
+    @Override
+    public void testTermQuery() {
+        MappedFieldType ft = createFieldType(true);
+        assertEquals(new ConstantScoreQuery(new TermQuery(new Term("field", "foo"))), ft.termQuery("foo", null));
+        assertEquals(
+            new ConstantScoreQuery(AutomatonQueries.caseInsensitiveTermQuery(new Term("field", "fOo"))),
+            ft.termQueryCaseInsensitive("fOo", null)
+        );
+
+        MappedFieldType unsearchable = createFieldType(false);
+        IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> unsearchable.termQuery("bar", null));
+        assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage());
+    }
 }

From e7e19f712596ca0ca0531ff5c39663cc472fc95f Mon Sep 17 00:00:00 2001
From: expani1729 <110471048+expani@users.noreply.github.com>
Date: Mon, 6 Jan 2025 16:53:05 -0800
Subject: [PATCH 27/61] Changes to support unmapped fields in metric
 aggregation (#16481)

Avoids exception when querying unmapped field when star tree experimental
feature is enables.

---------

Signed-off-by: expani <anijainc@amazon.com>
---
 .../startree/utils/StarTreeQueryHelper.java   |   2 +-
 .../ValuesSourceAggregatorFactory.java        |   2 +-
 .../startree/MetricAggregatorTests.java       | 139 ++++++++++++++++++
 .../startree/StarTreeFilterTests.java         |  13 +-
 .../aggregations/AggregatorTestCase.java      |  22 ++-
 5 files changed, 172 insertions(+), 6 deletions(-)

diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeQueryHelper.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeQueryHelper.java
index e538be5d5bece..e46cf6f56b36e 100644
--- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeQueryHelper.java
+++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeQueryHelper.java
@@ -152,7 +152,7 @@ private static MetricStat validateStarTreeMetricSupport(
             MetricStat metricStat = ((MetricAggregatorFactory) aggregatorFactory).getMetricStat();
             field = ((MetricAggregatorFactory) aggregatorFactory).getField();
 
-            if (supportedMetrics.containsKey(field) && supportedMetrics.get(field).contains(metricStat)) {
+            if (field != null && supportedMetrics.containsKey(field) && supportedMetrics.get(field).contains(metricStat)) {
                 return metricStat;
             }
         }
diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregatorFactory.java
index d862b2c2784de..41344fd06cbbc 100644
--- a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregatorFactory.java
+++ b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregatorFactory.java
@@ -104,6 +104,6 @@ public String getStatsSubtype() {
     }
 
     public String getField() {
-        return config.fieldContext().field();
+        return config.fieldContext() != null ? config.fieldContext().field() : null;
     }
 }
diff --git a/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java
index 12e83cbbadd5d..05f48eb9243af 100644
--- a/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java
+++ b/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java
@@ -28,18 +28,27 @@
 import org.opensearch.common.lucene.Lucene;
 import org.opensearch.common.settings.Settings;
 import org.opensearch.common.util.FeatureFlags;
+import org.opensearch.common.util.MockBigArrays;
+import org.opensearch.common.util.MockPageCacheRecycler;
+import org.opensearch.core.indices.breaker.CircuitBreakerService;
+import org.opensearch.core.indices.breaker.NoneCircuitBreakerService;
 import org.opensearch.index.codec.composite.CompositeIndexFieldInfo;
 import org.opensearch.index.codec.composite.CompositeIndexReader;
 import org.opensearch.index.codec.composite.composite912.Composite912Codec;
 import org.opensearch.index.codec.composite912.datacube.startree.StarTreeDocValuesFormatTests;
 import org.opensearch.index.compositeindex.datacube.Dimension;
+import org.opensearch.index.compositeindex.datacube.Metric;
+import org.opensearch.index.compositeindex.datacube.MetricStat;
 import org.opensearch.index.compositeindex.datacube.NumericDimension;
 import org.opensearch.index.mapper.MappedFieldType;
 import org.opensearch.index.mapper.MapperService;
 import org.opensearch.index.mapper.NumberFieldMapper;
 import org.opensearch.index.query.QueryBuilder;
+import org.opensearch.index.query.QueryShardContext;
 import org.opensearch.index.query.TermQueryBuilder;
 import org.opensearch.search.aggregations.AggregationBuilder;
+import org.opensearch.search.aggregations.AggregatorFactories;
+import org.opensearch.search.aggregations.AggregatorFactory;
 import org.opensearch.search.aggregations.AggregatorTestCase;
 import org.opensearch.search.aggregations.InternalAggregation;
 import org.opensearch.search.aggregations.metrics.AvgAggregationBuilder;
@@ -49,14 +58,17 @@
 import org.opensearch.search.aggregations.metrics.InternalSum;
 import org.opensearch.search.aggregations.metrics.InternalValueCount;
 import org.opensearch.search.aggregations.metrics.MaxAggregationBuilder;
+import org.opensearch.search.aggregations.metrics.MetricAggregatorFactory;
 import org.opensearch.search.aggregations.metrics.MinAggregationBuilder;
 import org.opensearch.search.aggregations.metrics.SumAggregationBuilder;
 import org.opensearch.search.aggregations.metrics.ValueCountAggregationBuilder;
+import org.opensearch.search.aggregations.support.ValuesSourceAggregatorFactory;
 import org.junit.After;
 import org.junit.Before;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Random;
@@ -69,6 +81,8 @@
 import static org.opensearch.search.aggregations.AggregationBuilders.min;
 import static org.opensearch.search.aggregations.AggregationBuilders.sum;
 import static org.opensearch.test.InternalAggregationTestCase.DEFAULT_MAX_BUCKETS;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
 
 public class MetricAggregatorTests extends AggregatorTestCase {
 
@@ -267,6 +281,110 @@ public void testStarTreeDocValues() throws IOException {
             );
         }
 
+        CircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService();
+
+        QueryShardContext queryShardContext = queryShardContextMock(
+            indexSearcher,
+            mapperServiceMock(),
+            createIndexSettings(),
+            circuitBreakerService,
+            new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), circuitBreakerService).withCircuitBreaking()
+        );
+
+        MetricAggregatorFactory aggregatorFactory = mock(MetricAggregatorFactory.class);
+        when(aggregatorFactory.getSubFactories()).thenReturn(AggregatorFactories.EMPTY);
+        when(aggregatorFactory.getField()).thenReturn(FIELD_NAME);
+        when(aggregatorFactory.getMetricStat()).thenReturn(MetricStat.SUM);
+
+        // Case when field and metric type in aggregation are fully supported by star tree.
+        testCase(
+            indexSearcher,
+            query,
+            queryBuilder,
+            sumAggregationBuilder,
+            starTree,
+            supportedDimensions,
+            List.of(new Metric(FIELD_NAME, List.of(MetricStat.SUM, MetricStat.MAX, MetricStat.MIN, MetricStat.AVG))),
+            verifyAggregation(InternalSum::getValue),
+            aggregatorFactory,
+            true
+        );
+
+        // Case when the field is not supported by star tree
+        SumAggregationBuilder invalidFieldSumAggBuilder = sum("_name").field("hello");
+        testCase(
+            indexSearcher,
+            query,
+            queryBuilder,
+            invalidFieldSumAggBuilder,
+            starTree,
+            supportedDimensions,
+            Collections.emptyList(),
+            verifyAggregation(InternalSum::getValue),
+            invalidFieldSumAggBuilder.build(queryShardContext, null),
+            false // Invalid fields will return null StarTreeQueryContext which will not cause early termination by leaf collector
+        );
+
+        // Case when metric type in aggregation is not supported by star tree but the field is supported.
+        testCase(
+            indexSearcher,
+            query,
+            queryBuilder,
+            sumAggregationBuilder,
+            starTree,
+            supportedDimensions,
+            List.of(new Metric(FIELD_NAME, List.of(MetricStat.MAX, MetricStat.MIN, MetricStat.AVG))),
+            verifyAggregation(InternalSum::getValue),
+            aggregatorFactory,
+            false
+        );
+
+        // Case when field is not present in supported metrics
+        testCase(
+            indexSearcher,
+            query,
+            queryBuilder,
+            sumAggregationBuilder,
+            starTree,
+            supportedDimensions,
+            List.of(new Metric("hello", List.of(MetricStat.MAX, MetricStat.MIN, MetricStat.AVG))),
+            verifyAggregation(InternalSum::getValue),
+            aggregatorFactory,
+            false
+        );
+
+        AggregatorFactories aggregatorFactories = mock(AggregatorFactories.class);
+        when(aggregatorFactories.getFactories()).thenReturn(new AggregatorFactory[] { mock(MetricAggregatorFactory.class) });
+        when(aggregatorFactory.getSubFactories()).thenReturn(aggregatorFactories);
+
+        // Case when sub aggregations are present
+        testCase(
+            indexSearcher,
+            query,
+            queryBuilder,
+            sumAggregationBuilder,
+            starTree,
+            supportedDimensions,
+            List.of(new Metric("hello", List.of(MetricStat.MAX, MetricStat.MIN, MetricStat.AVG))),
+            verifyAggregation(InternalSum::getValue),
+            aggregatorFactory,
+            false
+        );
+
+        // Case when aggregation factory is not metric aggregation
+        testCase(
+            indexSearcher,
+            query,
+            queryBuilder,
+            sumAggregationBuilder,
+            starTree,
+            supportedDimensions,
+            List.of(new Metric("hello", List.of(MetricStat.MAX, MetricStat.MIN, MetricStat.AVG))),
+            verifyAggregation(InternalSum::getValue),
+            mock(ValuesSourceAggregatorFactory.class),
+            false
+        );
+
         ir.close();
         directory.close();
     }
@@ -287,6 +405,21 @@ private <T extends AggregationBuilder, V extends InternalAggregation> void testC
         CompositeIndexFieldInfo starTree,
         List<Dimension> supportedDimensions,
         BiConsumer<V, V> verify
+    ) throws IOException {
+        testCase(searcher, query, queryBuilder, aggBuilder, starTree, supportedDimensions, Collections.emptyList(), verify, null, true);
+    }
+
+    private <T extends AggregationBuilder, V extends InternalAggregation> void testCase(
+        IndexSearcher searcher,
+        Query query,
+        QueryBuilder queryBuilder,
+        T aggBuilder,
+        CompositeIndexFieldInfo starTree,
+        List<Dimension> supportedDimensions,
+        List<Metric> supportedMetrics,
+        BiConsumer<V, V> verify,
+        AggregatorFactory aggregatorFactory,
+        boolean assertCollectorEarlyTermination
     ) throws IOException {
         V starTreeAggregation = searchAndReduceStarTree(
             createIndexSettings(),
@@ -296,8 +429,11 @@ private <T extends AggregationBuilder, V extends InternalAggregation> void testC
             aggBuilder,
             starTree,
             supportedDimensions,
+            supportedMetrics,
             DEFAULT_MAX_BUCKETS,
             false,
+            aggregatorFactory,
+            assertCollectorEarlyTermination,
             DEFAULT_MAPPED_FIELD
         );
         V expectedAggregation = searchAndReduceStarTree(
@@ -308,8 +444,11 @@ private <T extends AggregationBuilder, V extends InternalAggregation> void testC
             aggBuilder,
             null,
             null,
+            null,
             DEFAULT_MAX_BUCKETS,
             false,
+            aggregatorFactory,
+            assertCollectorEarlyTermination,
             DEFAULT_MAPPED_FIELD
         );
         verify.accept(expectedAggregation, starTreeAggregation);
diff --git a/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java b/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java
index b03cb5ac7bb9d..c1cb19b9576e4 100644
--- a/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java
+++ b/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java
@@ -87,7 +87,8 @@ public void testStarTreeFilterWithDocsInSVDFieldButNoStarNode() throws IOExcepti
         testStarTreeFilter(10, false);
     }
 
-    private void testStarTreeFilter(int maxLeafDoc, boolean skipStarNodeCreationForSDVDimension) throws IOException {
+    private Directory createStarTreeIndex(int maxLeafDoc, boolean skipStarNodeCreationForSDVDimension, List<Document> docs)
+        throws IOException {
         Directory directory = newDirectory();
         IndexWriterConfig conf = newIndexWriterConfig(null);
         conf.setCodec(getCodec(maxLeafDoc, skipStarNodeCreationForSDVDimension));
@@ -95,7 +96,6 @@ private void testStarTreeFilter(int maxLeafDoc, boolean skipStarNodeCreationForS
         RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf);
         int totalDocs = 100;
 
-        List<Document> docs = new ArrayList<>();
         for (int i = 0; i < totalDocs; i++) {
             Document doc = new Document();
             doc.add(new SortedNumericDocValuesField(SNDV, i));
@@ -110,6 +110,15 @@ private void testStarTreeFilter(int maxLeafDoc, boolean skipStarNodeCreationForS
         }
         iw.forceMerge(1);
         iw.close();
+        return directory;
+    }
+
+    private void testStarTreeFilter(int maxLeafDoc, boolean skipStarNodeCreationForSDVDimension) throws IOException {
+        List<Document> docs = new ArrayList<>();
+
+        Directory directory = createStarTreeIndex(maxLeafDoc, skipStarNodeCreationForSDVDimension, docs);
+
+        int totalDocs = docs.size();
 
         DirectoryReader ir = DirectoryReader.open(directory);
         initValuesSourceRegistry();
diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java
index e1728c4476699..27142b298db52 100644
--- a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java
+++ b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java
@@ -93,6 +93,7 @@
 import org.opensearch.index.cache.query.DisabledQueryCache;
 import org.opensearch.index.codec.composite.CompositeIndexFieldInfo;
 import org.opensearch.index.compositeindex.datacube.Dimension;
+import org.opensearch.index.compositeindex.datacube.Metric;
 import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper;
 import org.opensearch.index.fielddata.IndexFieldData;
 import org.opensearch.index.fielddata.IndexFieldDataCache;
@@ -348,7 +349,9 @@ protected CountingAggregator createCountingAggregator(
         IndexSettings indexSettings,
         CompositeIndexFieldInfo starTree,
         List<Dimension> supportedDimensions,
+        List<Metric> supportedMetrics,
         MultiBucketConsumer bucketConsumer,
+        AggregatorFactory aggregatorFactory,
         MappedFieldType... fieldTypes
     ) throws IOException {
         SearchContext searchContext;
@@ -360,7 +363,9 @@ protected CountingAggregator createCountingAggregator(
                 queryBuilder,
                 starTree,
                 supportedDimensions,
+                supportedMetrics,
                 bucketConsumer,
+                aggregatorFactory,
                 fieldTypes
             );
         } else {
@@ -389,7 +394,9 @@ protected SearchContext createSearchContextWithStarTreeContext(
         QueryBuilder queryBuilder,
         CompositeIndexFieldInfo starTree,
         List<Dimension> supportedDimensions,
+        List<Metric> supportedMetrics,
         MultiBucketConsumer bucketConsumer,
+        AggregatorFactory aggregatorFactory,
         MappedFieldType... fieldTypes
     ) throws IOException {
         SearchContext searchContext = createSearchContext(
@@ -406,7 +413,12 @@ protected SearchContext createSearchContextWithStarTreeContext(
         AggregatorFactories aggregatorFactories = mock(AggregatorFactories.class);
         when(searchContext.aggregations()).thenReturn(searchContextAggregations);
         when(searchContextAggregations.factories()).thenReturn(aggregatorFactories);
-        when(aggregatorFactories.getFactories()).thenReturn(new AggregatorFactory[] {});
+
+        if (aggregatorFactory != null) {
+            when(aggregatorFactories.getFactories()).thenReturn(new AggregatorFactory[] { aggregatorFactory });
+        } else {
+            when(aggregatorFactories.getFactories()).thenReturn(new AggregatorFactory[] {});
+        }
 
         CompositeDataCubeFieldType compositeMappedFieldType = mock(CompositeDataCubeFieldType.class);
         when(compositeMappedFieldType.name()).thenReturn(starTree.getField());
@@ -414,6 +426,7 @@ protected SearchContext createSearchContextWithStarTreeContext(
         Set<CompositeMappedFieldType> compositeFieldTypes = Set.of(compositeMappedFieldType);
 
         when((compositeMappedFieldType).getDimensions()).thenReturn(supportedDimensions);
+        when((compositeMappedFieldType).getMetrics()).thenReturn(supportedMetrics);
         MapperService mapperService = mock(MapperService.class);
         when(mapperService.getCompositeFieldTypes()).thenReturn(compositeFieldTypes);
         when(searchContext.mapperService()).thenReturn(mapperService);
@@ -740,8 +753,11 @@ protected <A extends InternalAggregation, C extends Aggregator> A searchAndReduc
         AggregationBuilder builder,
         CompositeIndexFieldInfo compositeIndexFieldInfo,
         List<Dimension> supportedDimensions,
+        List<Metric> supportedMetrics,
         int maxBucket,
         boolean hasNested,
+        AggregatorFactory aggregatorFactory,
+        boolean assertCollectorEarlyTermination,
         MappedFieldType... fieldTypes
     ) throws IOException {
         query = query.rewrite(searcher);
@@ -764,7 +780,9 @@ protected <A extends InternalAggregation, C extends Aggregator> A searchAndReduc
             indexSettings,
             compositeIndexFieldInfo,
             supportedDimensions,
+            supportedMetrics,
             bucketConsumer,
+            aggregatorFactory,
             fieldTypes
         );
 
@@ -772,7 +790,7 @@ protected <A extends InternalAggregation, C extends Aggregator> A searchAndReduc
         searcher.search(query, countingAggregator);
         countingAggregator.postCollection();
         aggs.add(countingAggregator.buildTopLevel());
-        if (compositeIndexFieldInfo != null) {
+        if (compositeIndexFieldInfo != null && assertCollectorEarlyTermination) {
             assertEquals(0, countingAggregator.collectCounter.get());
         }
 

From 1d4b85f5ff8c4e314ecf49190b68eb995bf571d8 Mon Sep 17 00:00:00 2001
From: Ashish Singh <ssashish@amazon.com>
Date: Thu, 9 Jan 2025 10:22:24 +0530
Subject: [PATCH 28/61] Use async client for delete blob or path in S3 Blob
 Container (#16788)

* Use async client for delete blob or path in S3 Blob Container

Signed-off-by: Ashish Singh <ssashish@amazon.com>

* Fix UTs

Signed-off-by: Ashish Singh <ssashish@amazon.com>

* Fix failures in S3BlobStoreRepositoryTests

Signed-off-by: Ashish Singh <ssashish@amazon.com>

* Fix S3BlobStoreRepositoryTests

Signed-off-by: Ashish Singh <ssashish@amazon.com>

* Fix failures in S3RepositoryThirdPartyTests

Signed-off-by: Ashish Singh <ssashish@amazon.com>

* Fix failures in S3RepositoryPluginTests

Signed-off-by: Ashish Singh <ssashish@amazon.com>

---------

Signed-off-by: Ashish Singh <ssashish@amazon.com>
---
 .../s3/S3BlobStoreRepositoryTests.java        |  27 +-
 .../s3/S3RepositoryThirdPartyTests.java       |   8 -
 .../repositories/s3/S3AsyncService.java       |  30 +-
 .../repositories/s3/S3BlobContainer.java      | 140 +------
 .../repositories/s3/S3RepositoryPlugin.java   |  19 +-
 .../s3/S3BlobStoreContainerTests.java         | 347 +++++++++++++-----
 .../s3/S3RepositoryPluginTests.java           |  11 +-
 .../common/settings/ClusterSettings.java      |   1 -
 .../blobstore/BlobStoreRepository.java        |  38 +-
 9 files changed, 322 insertions(+), 299 deletions(-)

diff --git a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java
index 944de326d144c..5bea51706cfae 100644
--- a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java
+++ b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java
@@ -59,6 +59,7 @@
 import org.opensearch.repositories.RepositoryStats;
 import org.opensearch.repositories.blobstore.BlobStoreRepository;
 import org.opensearch.repositories.blobstore.OpenSearchMockAPIBasedRepositoryIntegTestCase;
+import org.opensearch.repositories.s3.async.AsyncTransferManager;
 import org.opensearch.repositories.s3.utils.AwsRequestSigner;
 import org.opensearch.snapshots.mockstore.BlobStoreWrapper;
 import org.opensearch.test.BackgroundIndexer;
@@ -153,7 +154,6 @@ protected Settings nodeSettings(int nodeOrdinal) {
             // Disable request throttling because some random values in tests might generate too many failures for the S3 client
             .put(S3ClientSettings.USE_THROTTLE_RETRIES_SETTING.getConcreteSettingForNamespace("test").getKey(), false)
             .put(S3ClientSettings.PROXY_TYPE_SETTING.getConcreteSettingForNamespace("test").getKey(), ProxySettings.ProxyType.DIRECT)
-            .put(BlobStoreRepository.SNAPSHOT_ASYNC_DELETION_ENABLE_SETTING.getKey(), false)
             .put(super.nodeSettings(nodeOrdinal))
             .setSecureSettings(secureSettings);
 
@@ -253,22 +253,27 @@ protected S3Repository createRepository(
             ClusterService clusterService,
             RecoverySettings recoverySettings
         ) {
-            GenericStatsMetricPublisher genericStatsMetricPublisher = new GenericStatsMetricPublisher(10000L, 10, 10000L, 10);
-
+            AsyncTransferManager asyncUploadUtils = new AsyncTransferManager(
+                S3Repository.PARALLEL_MULTIPART_UPLOAD_MINIMUM_PART_SIZE_SETTING.get(clusterService.getSettings()).getBytes(),
+                normalExecutorBuilder.getStreamReader(),
+                priorityExecutorBuilder.getStreamReader(),
+                urgentExecutorBuilder.getStreamReader(),
+                transferSemaphoresHolder
+            );
             return new S3Repository(
                 metadata,
                 registry,
                 service,
                 clusterService,
                 recoverySettings,
-                null,
-                null,
-                null,
-                null,
-                null,
-                false,
-                null,
-                null,
+                asyncUploadUtils,
+                urgentExecutorBuilder,
+                priorityExecutorBuilder,
+                normalExecutorBuilder,
+                s3AsyncService,
+                S3Repository.PARALLEL_MULTIPART_UPLOAD_ENABLED_SETTING.get(clusterService.getSettings()),
+                normalPrioritySizeBasedBlockingQ,
+                lowPrioritySizeBasedBlockingQ,
                 genericStatsMetricPublisher
             ) {
 
diff --git a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java
index f0e40db965646..7db9a0d3ba790 100644
--- a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java
+++ b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java
@@ -55,14 +55,6 @@
 
 public class S3RepositoryThirdPartyTests extends AbstractThirdPartyRepositoryTestCase {
 
-    @Override
-    protected Settings nodeSettings() {
-        return Settings.builder()
-            .put(super.nodeSettings())
-            .put(BlobStoreRepository.SNAPSHOT_ASYNC_DELETION_ENABLE_SETTING.getKey(), false)
-            .build();
-    }
-
     @Override
     @Before
     @SuppressForbidden(reason = "Need to set system property here for AWS SDK v2")
diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java
index 8bbef168de89c..7397c3132c17c 100644
--- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java
+++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java
@@ -25,7 +25,6 @@
 import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient;
 import software.amazon.awssdk.http.nio.netty.ProxyConfiguration;
 import software.amazon.awssdk.http.nio.netty.SdkEventLoopGroup;
-import software.amazon.awssdk.profiles.ProfileFileSystemSetting;
 import software.amazon.awssdk.regions.Region;
 import software.amazon.awssdk.services.s3.S3AsyncClient;
 import software.amazon.awssdk.services.s3.S3AsyncClientBuilder;
@@ -120,6 +119,7 @@ public AmazonAsyncS3Reference client(
             if (existing != null && existing.tryIncRef()) {
                 return existing;
             }
+
             final AmazonAsyncS3Reference clientReference = new AmazonAsyncS3Reference(
                 buildClient(clientSettings, urgentExecutorBuilder, priorityExecutorBuilder, normalExecutorBuilder)
             );
@@ -235,17 +235,17 @@ synchronized AmazonAsyncS3WithCredentials buildClient(
     }
 
     static ClientOverrideConfiguration buildOverrideConfiguration(final S3ClientSettings clientSettings) {
+        RetryPolicy retryPolicy = SocketAccess.doPrivileged(
+            () -> RetryPolicy.builder()
+                .numRetries(clientSettings.maxRetries)
+                .throttlingBackoffStrategy(
+                    clientSettings.throttleRetries ? BackoffStrategy.defaultThrottlingStrategy(RetryMode.STANDARD) : BackoffStrategy.none()
+                )
+                .build()
+        );
+
         return ClientOverrideConfiguration.builder()
-            .retryPolicy(
-                RetryPolicy.builder()
-                    .numRetries(clientSettings.maxRetries)
-                    .throttlingBackoffStrategy(
-                        clientSettings.throttleRetries
-                            ? BackoffStrategy.defaultThrottlingStrategy(RetryMode.STANDARD)
-                            : BackoffStrategy.none()
-                    )
-                    .build()
-            )
+            .retryPolicy(retryPolicy)
             .apiCallAttemptTimeout(Duration.ofMillis(clientSettings.requestTimeoutMillis))
             .build();
     }
@@ -346,12 +346,7 @@ static AwsCredentialsProvider buildCredentials(Logger logger, S3ClientSettings c
     // valid paths.
     @SuppressForbidden(reason = "Need to provide this override to v2 SDK so that path does not default to home path")
     private static void setDefaultAwsProfilePath() {
-        if (ProfileFileSystemSetting.AWS_SHARED_CREDENTIALS_FILE.getStringValue().isEmpty()) {
-            System.setProperty(ProfileFileSystemSetting.AWS_SHARED_CREDENTIALS_FILE.property(), System.getProperty("opensearch.path.conf"));
-        }
-        if (ProfileFileSystemSetting.AWS_CONFIG_FILE.getStringValue().isEmpty()) {
-            System.setProperty(ProfileFileSystemSetting.AWS_CONFIG_FILE.property(), System.getProperty("opensearch.path.conf"));
-        }
+        S3Service.setDefaultAwsProfilePath();
     }
 
     private static IrsaCredentials buildFromEnvironment(IrsaCredentials defaults) {
@@ -443,5 +438,6 @@ public AwsCredentials resolveCredentials() {
     @Override
     public void close() {
         releaseCachedClients();
+
     }
 }
diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java
index 1a402e8431e25..8690a5c91680a 100644
--- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java
+++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java
@@ -43,9 +43,6 @@
 import software.amazon.awssdk.services.s3.model.CompletedMultipartUpload;
 import software.amazon.awssdk.services.s3.model.CompletedPart;
 import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest;
-import software.amazon.awssdk.services.s3.model.Delete;
-import software.amazon.awssdk.services.s3.model.DeleteObjectsRequest;
-import software.amazon.awssdk.services.s3.model.DeleteObjectsResponse;
 import software.amazon.awssdk.services.s3.model.GetObjectAttributesRequest;
 import software.amazon.awssdk.services.s3.model.GetObjectAttributesResponse;
 import software.amazon.awssdk.services.s3.model.GetObjectRequest;
@@ -55,9 +52,7 @@
 import software.amazon.awssdk.services.s3.model.ListObjectsV2Response;
 import software.amazon.awssdk.services.s3.model.NoSuchKeyException;
 import software.amazon.awssdk.services.s3.model.ObjectAttributes;
-import software.amazon.awssdk.services.s3.model.ObjectIdentifier;
 import software.amazon.awssdk.services.s3.model.PutObjectRequest;
-import software.amazon.awssdk.services.s3.model.S3Error;
 import software.amazon.awssdk.services.s3.model.ServerSideEncryption;
 import software.amazon.awssdk.services.s3.model.UploadPartRequest;
 import software.amazon.awssdk.services.s3.model.UploadPartResponse;
@@ -68,7 +63,7 @@
 import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
 import org.apache.logging.log4j.message.ParameterizedMessage;
-import org.opensearch.ExceptionsHelper;
+import org.opensearch.action.support.PlainActionFuture;
 import org.opensearch.common.Nullable;
 import org.opensearch.common.SetOnce;
 import org.opensearch.common.StreamContext;
@@ -101,11 +96,8 @@
 import java.io.IOException;
 import java.io.InputStream;
 import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
-import java.util.Set;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.function.Function;
@@ -381,125 +373,17 @@ public void writeBlobAtomic(String blobName, InputStream inputStream, long blobS
     }
 
     @Override
-    public DeleteResult delete() throws IOException {
-        final AtomicLong deletedBlobs = new AtomicLong();
-        final AtomicLong deletedBytes = new AtomicLong();
-        try (AmazonS3Reference clientReference = blobStore.clientReference()) {
-            ListObjectsV2Iterable listObjectsIterable = SocketAccess.doPrivileged(
-                () -> clientReference.get()
-                    .listObjectsV2Paginator(
-                        ListObjectsV2Request.builder()
-                            .bucket(blobStore.bucket())
-                            .prefix(keyPath)
-                            .overrideConfiguration(
-                                o -> o.addMetricPublisher(blobStore.getStatsMetricPublisher().listObjectsMetricPublisher)
-                            )
-                            .build()
-                    )
-            );
-
-            Iterator<ListObjectsV2Response> listObjectsResponseIterator = listObjectsIterable.iterator();
-            while (listObjectsResponseIterator.hasNext()) {
-                ListObjectsV2Response listObjectsResponse = SocketAccess.doPrivileged(listObjectsResponseIterator::next);
-                List<String> blobsToDelete = listObjectsResponse.contents().stream().map(s3Object -> {
-                    deletedBlobs.incrementAndGet();
-                    deletedBytes.addAndGet(s3Object.size());
-
-                    return s3Object.key();
-                }).collect(Collectors.toList());
-
-                if (!listObjectsResponseIterator.hasNext()) {
-                    blobsToDelete.add(keyPath);
-                }
-
-                doDeleteBlobs(blobsToDelete, false);
-            }
-        } catch (SdkException e) {
-            throw new IOException("Exception when deleting blob container [" + keyPath + "]", e);
-        }
-
-        return new DeleteResult(deletedBlobs.get(), deletedBytes.get());
+    public DeleteResult delete() {
+        PlainActionFuture<DeleteResult> future = new PlainActionFuture<>();
+        deleteAsync(future);
+        return future.actionGet();
     }
 
     @Override
-    public void deleteBlobsIgnoringIfNotExists(List<String> blobNames) throws IOException {
-        doDeleteBlobs(blobNames, true);
-    }
-
-    private void doDeleteBlobs(List<String> blobNames, boolean relative) throws IOException {
-        if (blobNames.isEmpty()) {
-            return;
-        }
-        final Set<String> outstanding;
-        if (relative) {
-            outstanding = blobNames.stream().map(this::buildKey).collect(Collectors.toSet());
-        } else {
-            outstanding = new HashSet<>(blobNames);
-        }
-        try (AmazonS3Reference clientReference = blobStore.clientReference()) {
-            // S3 API allows 1k blobs per delete so we split up the given blobs into requests of bulk size deletes
-            final List<DeleteObjectsRequest> deleteRequests = new ArrayList<>();
-            final List<String> partition = new ArrayList<>();
-            for (String key : outstanding) {
-                partition.add(key);
-                if (partition.size() == blobStore.getBulkDeletesSize()) {
-                    deleteRequests.add(bulkDelete(blobStore.bucket(), partition));
-                    partition.clear();
-                }
-            }
-            if (partition.isEmpty() == false) {
-                deleteRequests.add(bulkDelete(blobStore.bucket(), partition));
-            }
-            SocketAccess.doPrivilegedVoid(() -> {
-                SdkException aex = null;
-                for (DeleteObjectsRequest deleteRequest : deleteRequests) {
-                    List<String> keysInRequest = deleteRequest.delete()
-                        .objects()
-                        .stream()
-                        .map(ObjectIdentifier::key)
-                        .collect(Collectors.toList());
-                    try {
-                        DeleteObjectsResponse deleteObjectsResponse = clientReference.get().deleteObjects(deleteRequest);
-                        outstanding.removeAll(keysInRequest);
-                        outstanding.addAll(deleteObjectsResponse.errors().stream().map(S3Error::key).collect(Collectors.toSet()));
-                        if (!deleteObjectsResponse.errors().isEmpty()) {
-                            logger.warn(
-                                () -> new ParameterizedMessage(
-                                    "Failed to delete some blobs {}",
-                                    deleteObjectsResponse.errors()
-                                        .stream()
-                                        .map(s3Error -> "[" + s3Error.key() + "][" + s3Error.code() + "][" + s3Error.message() + "]")
-                                        .collect(Collectors.toList())
-                                )
-                            );
-                        }
-                    } catch (SdkException e) {
-                        // The AWS client threw any unexpected exception and did not execute the request at all so we do not
-                        // remove any keys from the outstanding deletes set.
-                        aex = ExceptionsHelper.useOrSuppress(aex, e);
-                    }
-                }
-                if (aex != null) {
-                    throw aex;
-                }
-            });
-        } catch (Exception e) {
-            throw new IOException("Failed to delete blobs [" + outstanding + "]", e);
-        }
-        assert outstanding.isEmpty();
-    }
-
-    private DeleteObjectsRequest bulkDelete(String bucket, List<String> blobs) {
-        return DeleteObjectsRequest.builder()
-            .bucket(bucket)
-            .delete(
-                Delete.builder()
-                    .objects(blobs.stream().map(blob -> ObjectIdentifier.builder().key(blob).build()).collect(Collectors.toList()))
-                    .quiet(true)
-                    .build()
-            )
-            .overrideConfiguration(o -> o.addMetricPublisher(blobStore.getStatsMetricPublisher().deleteObjectsMetricPublisher))
-            .build();
+    public void deleteBlobsIgnoringIfNotExists(List<String> blobNames) {
+        PlainActionFuture<Void> future = new PlainActionFuture<>();
+        deleteBlobsAsyncIgnoringIfNotExists(blobNames, future);
+        future.actionGet();
     }
 
     @Override
@@ -886,7 +770,11 @@ public void deleteAsync(ActionListener<DeleteResult> completionListener) {
         try (AmazonAsyncS3Reference asyncClientReference = blobStore.asyncClientReference()) {
             S3AsyncClient s3AsyncClient = asyncClientReference.get().client();
 
-            ListObjectsV2Request listRequest = ListObjectsV2Request.builder().bucket(blobStore.bucket()).prefix(keyPath).build();
+            ListObjectsV2Request listRequest = ListObjectsV2Request.builder()
+                .bucket(blobStore.bucket())
+                .prefix(keyPath)
+                .overrideConfiguration(o -> o.addMetricPublisher(blobStore.getStatsMetricPublisher().listObjectsMetricPublisher))
+                .build();
             ListObjectsV2Publisher listPublisher = s3AsyncClient.listObjectsV2Paginator(listRequest);
 
             AtomicLong deletedBlobs = new AtomicLong();
diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java
index 1048ec784ec4e..72a812339e387 100644
--- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java
+++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java
@@ -93,19 +93,19 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin, Relo
     private static final String NORMAL_TRANSFER_QUEUE_CONSUMER = "normal_transfer_queue_consumer";
 
     protected final S3Service service;
-    private final S3AsyncService s3AsyncService;
+    protected final S3AsyncService s3AsyncService;
 
     private final Path configPath;
 
-    private AsyncExecutorContainer urgentExecutorBuilder;
-    private AsyncExecutorContainer priorityExecutorBuilder;
-    private AsyncExecutorContainer normalExecutorBuilder;
+    protected AsyncExecutorContainer urgentExecutorBuilder;
+    protected AsyncExecutorContainer priorityExecutorBuilder;
+    protected AsyncExecutorContainer normalExecutorBuilder;
     private ExecutorService lowTransferQConsumerService;
     private ExecutorService normalTransferQConsumerService;
-    private SizeBasedBlockingQ normalPrioritySizeBasedBlockingQ;
-    private SizeBasedBlockingQ lowPrioritySizeBasedBlockingQ;
-    private TransferSemaphoresHolder transferSemaphoresHolder;
-    private GenericStatsMetricPublisher genericStatsMetricPublisher;
+    protected SizeBasedBlockingQ normalPrioritySizeBasedBlockingQ;
+    protected SizeBasedBlockingQ lowPrioritySizeBasedBlockingQ;
+    protected TransferSemaphoresHolder transferSemaphoresHolder;
+    protected GenericStatsMetricPublisher genericStatsMetricPublisher;
 
     public S3RepositoryPlugin(final Settings settings, final Path configPath) {
         this(settings, configPath, new S3Service(configPath), new S3AsyncService(configPath));
@@ -387,5 +387,8 @@ public void reload(Settings settings) {
     public void close() throws IOException {
         service.close();
         s3AsyncService.close();
+        urgentExecutorBuilder.getAsyncTransferEventLoopGroup().close();
+        priorityExecutorBuilder.getAsyncTransferEventLoopGroup().close();
+        normalExecutorBuilder.getAsyncTransferEventLoopGroup().close();
     }
 }
diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java
index 2cb11541d924f..53371cd1529ce 100644
--- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java
+++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java
@@ -48,6 +48,7 @@
 import software.amazon.awssdk.services.s3.model.CreateMultipartUploadResponse;
 import software.amazon.awssdk.services.s3.model.DeleteObjectsRequest;
 import software.amazon.awssdk.services.s3.model.DeleteObjectsResponse;
+import software.amazon.awssdk.services.s3.model.DeletedObject;
 import software.amazon.awssdk.services.s3.model.GetObjectAttributesParts;
 import software.amazon.awssdk.services.s3.model.GetObjectAttributesRequest;
 import software.amazon.awssdk.services.s3.model.GetObjectAttributesResponse;
@@ -92,7 +93,6 @@
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
@@ -102,6 +102,7 @@
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.CompletionException;
 import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicReference;
@@ -286,9 +287,8 @@ public int numberOfPagesFetched() {
         }
     }
 
-    public void testDelete() throws IOException {
+    public void testDelete() throws Exception {
         final String bucketName = randomAlphaOfLengthBetween(1, 10);
-
         final BlobPath blobPath = new BlobPath();
         int bulkDeleteSize = 5;
 
@@ -297,147 +297,314 @@ public void testDelete() throws IOException {
         when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher());
         when(blobStore.getBulkDeletesSize()).thenReturn(bulkDeleteSize);
 
-        final S3Client client = mock(S3Client.class);
-        doAnswer(invocation -> new AmazonS3Reference(client)).when(blobStore).clientReference();
+        final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class);
+        final AmazonAsyncS3Reference asyncClientReference = mock(AmazonAsyncS3Reference.class);
+        when(blobStore.asyncClientReference()).thenReturn(asyncClientReference);
+        AmazonAsyncS3WithCredentials amazonAsyncS3WithCredentials = AmazonAsyncS3WithCredentials.create(
+            s3AsyncClient,
+            s3AsyncClient,
+            s3AsyncClient,
+            null
+        );
+        when(asyncClientReference.get()).thenReturn(amazonAsyncS3WithCredentials);
 
-        ListObjectsV2Iterable listObjectsV2Iterable = mock(ListObjectsV2Iterable.class);
+        final ListObjectsV2Publisher listPublisher = mock(ListObjectsV2Publisher.class);
         final int totalPageCount = 3;
         final long s3ObjectSize = ByteSizeUnit.MB.toBytes(5);
         final int s3ObjectsPerPage = 5;
-        MockListObjectsV2ResponseIterator listObjectsV2ResponseIterator = new MockListObjectsV2ResponseIterator(
-            totalPageCount,
-            s3ObjectsPerPage,
-            s3ObjectSize
-        );
-        when(listObjectsV2Iterable.iterator()).thenReturn(listObjectsV2ResponseIterator);
-        when(client.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listObjectsV2Iterable);
 
-        final List<String> keysDeleted = new ArrayList<>();
-        AtomicInteger deleteCount = new AtomicInteger();
+        List<ListObjectsV2Response> responses = new ArrayList<>();
+        List<S3Object> allObjects = new ArrayList<>();
+        long totalSize = 0;
+
+        for (int i = 0; i < totalPageCount; i++) {
+            List<S3Object> pageObjects = new ArrayList<>();
+            for (int j = 0; j < s3ObjectsPerPage; j++) {
+                pageObjects.add(S3Object.builder().key(randomAlphaOfLength(10)).size(s3ObjectSize).build());
+                totalSize += s3ObjectSize;
+            }
+            allObjects.addAll(pageObjects);
+            responses.add(ListObjectsV2Response.builder().contents(pageObjects).build());
+        }
+
+        AtomicInteger counter = new AtomicInteger();
         doAnswer(invocation -> {
-            DeleteObjectsRequest deleteObjectsRequest = invocation.getArgument(0);
-            deleteCount.getAndIncrement();
-            logger.info("Object sizes are{}", deleteObjectsRequest.delete().objects().size());
-            keysDeleted.addAll(deleteObjectsRequest.delete().objects().stream().map(ObjectIdentifier::key).collect(Collectors.toList()));
-            return DeleteObjectsResponse.builder().build();
-        }).when(client).deleteObjects(any(DeleteObjectsRequest.class));
+            Subscriber<? super ListObjectsV2Response> subscriber = invocation.getArgument(0);
+            subscriber.onSubscribe(new Subscription() {
+                @Override
+                public void request(long n) {
+                    int currentCounter = counter.getAndIncrement();
+                    if (currentCounter < responses.size()) {
+                        subscriber.onNext(responses.get(currentCounter));
+                    }
+                    if (currentCounter == responses.size() - 1) {
+                        subscriber.onComplete();
+                    }
+                }
+
+                @Override
+                public void cancel() {}
+            });
+            return null;
+        }).when(listPublisher).subscribe(ArgumentMatchers.<Subscriber<ListObjectsV2Response>>any());
+
+        when(s3AsyncClient.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listPublisher);
+
+        when(s3AsyncClient.deleteObjects(any(DeleteObjectsRequest.class))).thenReturn(
+            CompletableFuture.completedFuture(DeleteObjectsResponse.builder().build())
+        );
 
         final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore);
 
-        DeleteResult deleteResult = blobContainer.delete();
-        assertEquals(s3ObjectSize * s3ObjectsPerPage * totalPageCount, deleteResult.bytesDeleted());
-        assertEquals(s3ObjectsPerPage * totalPageCount, deleteResult.blobsDeleted());
-        // keysDeleted will have blobPath also
-        assertEquals(listObjectsV2ResponseIterator.getKeysListed().size(), keysDeleted.size() - 1);
-        assertTrue(keysDeleted.contains(blobPath.buildAsString()));
-        // keysDeleted will have blobPath also
-        assertEquals((int) Math.ceil(((double) keysDeleted.size() + 1) / bulkDeleteSize), deleteCount.get());
-        keysDeleted.remove(blobPath.buildAsString());
-        assertEquals(new HashSet<>(listObjectsV2ResponseIterator.getKeysListed()), new HashSet<>(keysDeleted));
+        CountDownLatch latch = new CountDownLatch(1);
+        AtomicReference<DeleteResult> resultRef = new AtomicReference<>();
+
+        blobContainer.deleteAsync(new ActionListener<>() {
+            @Override
+            public void onResponse(DeleteResult deleteResult) {
+                resultRef.set(deleteResult);
+                latch.countDown();
+            }
+
+            @Override
+            public void onFailure(Exception e) {
+                fail("Unexpected failure: " + e.getMessage());
+            }
+        });
+
+        assertTrue(latch.await(5, TimeUnit.SECONDS));
+        DeleteResult result = resultRef.get();
+
+        assertEquals(totalSize, result.bytesDeleted());
+        assertEquals(allObjects.size(), result.blobsDeleted());
+
+        verify(s3AsyncClient, times(1)).listObjectsV2Paginator(any(ListObjectsV2Request.class));
+        int expectedDeleteCalls = (int) Math.ceil((double) allObjects.size() / bulkDeleteSize);
+        verify(s3AsyncClient, times(expectedDeleteCalls)).deleteObjects(any(DeleteObjectsRequest.class));
     }
 
-    public void testDeleteItemLevelErrorsDuringDelete() {
+    public void testDeleteItemLevelErrorsDuringDelete() throws Exception {
         final String bucketName = randomAlphaOfLengthBetween(1, 10);
-
         final BlobPath blobPath = new BlobPath();
 
         final S3BlobStore blobStore = mock(S3BlobStore.class);
         when(blobStore.bucket()).thenReturn(bucketName);
         when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher());
+        int bulkDeleteSize = 3; // Small size to force multiple delete requests
+        when(blobStore.getBulkDeletesSize()).thenReturn(bulkDeleteSize);
 
-        final S3Client client = mock(S3Client.class);
-        doAnswer(invocation -> new AmazonS3Reference(client)).when(blobStore).clientReference();
+        final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class);
+        final AmazonAsyncS3Reference asyncClientReference = mock(AmazonAsyncS3Reference.class);
+        when(blobStore.asyncClientReference()).thenReturn(asyncClientReference);
+        when(asyncClientReference.get()).thenReturn(AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null));
 
-        ListObjectsV2Iterable listObjectsV2Iterable = mock(ListObjectsV2Iterable.class);
-        final int totalPageCount = 3;
-        final long s3ObjectSize = ByteSizeUnit.MB.toBytes(5);
-        final int s3ObjectsPerPage = 5;
-        MockListObjectsV2ResponseIterator listObjectsV2ResponseIterator = new MockListObjectsV2ResponseIterator(
-            totalPageCount,
-            s3ObjectsPerPage,
-            s3ObjectSize
-        );
-        when(listObjectsV2Iterable.iterator()).thenReturn(listObjectsV2ResponseIterator);
-        when(client.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listObjectsV2Iterable);
+        final ListObjectsV2Publisher listPublisher = mock(ListObjectsV2Publisher.class);
+        final int totalObjects = 10;
+        List<S3Object> s3Objects = new ArrayList<>();
+        for (int i = 0; i < totalObjects; i++) {
+            s3Objects.add(S3Object.builder().key("key-" + i).size(100L).build());
+        }
 
-        final List<String> keysFailedDeletion = new ArrayList<>();
+        AtomicBoolean onNext = new AtomicBoolean(false);
         doAnswer(invocation -> {
-            DeleteObjectsRequest deleteObjectsRequest = invocation.getArgument(0);
-            int i = 0;
-            for (ObjectIdentifier objectIdentifier : deleteObjectsRequest.delete().objects()) {
+            Subscriber<? super ListObjectsV2Response> subscriber = invocation.getArgument(0);
+            subscriber.onSubscribe(new Subscription() {
+                @Override
+                public void request(long n) {
+                    if (onNext.compareAndSet(false, true)) {
+                        subscriber.onNext(ListObjectsV2Response.builder().contents(s3Objects).build());
+                    } else {
+                        subscriber.onComplete();
+                    }
+                }
+
+                @Override
+                public void cancel() {}
+            });
+            return null;
+        }).when(listPublisher).subscribe(ArgumentMatchers.<Subscriber<ListObjectsV2Response>>any());
+
+        when(s3AsyncClient.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listPublisher);
+
+        // Simulate item-level errors during delete
+        AtomicInteger deleteCallCount = new AtomicInteger(0);
+        when(s3AsyncClient.deleteObjects(any(DeleteObjectsRequest.class))).thenAnswer(invocation -> {
+            DeleteObjectsRequest request = invocation.getArgument(0);
+            List<S3Error> errors = new ArrayList<>();
+            List<DeletedObject> deletedObjects = new ArrayList<>();
+
+            for (int i = 0; i < request.delete().objects().size(); i++) {
                 if (i % 2 == 0) {
-                    keysFailedDeletion.add(objectIdentifier.key());
+                    errors.add(
+                        S3Error.builder()
+                            .key(request.delete().objects().get(i).key())
+                            .code("InternalError")
+                            .message("Simulated error")
+                            .build()
+                    );
+                } else {
+                    deletedObjects.add(DeletedObject.builder().key(request.delete().objects().get(i).key()).build());
                 }
-                i++;
             }
-            return DeleteObjectsResponse.builder()
-                .errors(keysFailedDeletion.stream().map(key -> S3Error.builder().key(key).build()).collect(Collectors.toList()))
-                .build();
-        }).when(client).deleteObjects(any(DeleteObjectsRequest.class));
+
+            deleteCallCount.incrementAndGet();
+            return CompletableFuture.completedFuture(DeleteObjectsResponse.builder().errors(errors).deleted(deletedObjects).build());
+        });
 
         final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore);
 
-        assertThrows(AssertionError.class, blobContainer::delete);
+        CountDownLatch latch = new CountDownLatch(1);
+        AtomicReference<DeleteResult> resultRef = new AtomicReference<>();
+        AtomicReference<Exception> exceptionRef = new AtomicReference<>();
+
+        blobContainer.deleteAsync(new ActionListener<>() {
+            @Override
+            public void onResponse(DeleteResult deleteResult) {
+                resultRef.set(deleteResult);
+                latch.countDown();
+            }
+
+            @Override
+            public void onFailure(Exception e) {
+                exceptionRef.set(e);
+                latch.countDown();
+            }
+        });
+
+        assertTrue(latch.await(5, TimeUnit.SECONDS));
+
+        assertNull("Unexpected exception: " + exceptionRef.get(), exceptionRef.get());
+        DeleteResult result = resultRef.get();
+        assertNotNull("Expected DeleteResult but got null", result);
+
+        // We expect half of the objects to be deleted successfully
+        // But as of today, the blob delete count and bytes is updated a bit earlier.
+        assertEquals(totalObjects, result.blobsDeleted());
+        assertEquals(totalObjects * 100L, result.bytesDeleted());
+
+        verify(s3AsyncClient, times(1)).listObjectsV2Paginator(any(ListObjectsV2Request.class));
+
+        // Calculate expected number of deleteObjects calls
+        int expectedDeleteCalls = (int) Math.ceil((double) totalObjects / bulkDeleteSize);
+        assertEquals(expectedDeleteCalls, deleteCallCount.get());
     }
 
-    public void testDeleteSdkExceptionDuringListOperation() {
+    public void testDeleteSdkExceptionDuringListOperation() throws Exception {
         final String bucketName = randomAlphaOfLengthBetween(1, 10);
-
         final BlobPath blobPath = new BlobPath();
 
         final S3BlobStore blobStore = mock(S3BlobStore.class);
         when(blobStore.bucket()).thenReturn(bucketName);
         when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher());
 
-        final S3Client client = mock(S3Client.class);
-        doAnswer(invocation -> new AmazonS3Reference(client)).when(blobStore).clientReference();
+        final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class);
+        final AmazonAsyncS3Reference asyncClientReference = mock(AmazonAsyncS3Reference.class);
+        when(blobStore.asyncClientReference()).thenReturn(asyncClientReference);
+        when(asyncClientReference.get()).thenReturn(AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null));
 
-        ListObjectsV2Iterable listObjectsV2Iterable = mock(ListObjectsV2Iterable.class);
-        final int totalPageCount = 3;
-        final long s3ObjectSize = ByteSizeUnit.MB.toBytes(5);
-        final int s3ObjectsPerPage = 5;
-        MockListObjectsV2ResponseIterator listObjectsV2ResponseIterator = new MockListObjectsV2ResponseIterator(
-            totalPageCount,
-            s3ObjectsPerPage,
-            s3ObjectSize
-        );
-        when(listObjectsV2Iterable.iterator()).thenReturn(listObjectsV2ResponseIterator);
-        when(client.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listObjectsV2Iterable);
+        final ListObjectsV2Publisher listPublisher = mock(ListObjectsV2Publisher.class);
+        doAnswer(invocation -> {
+            Subscriber<? super ListObjectsV2Response> subscriber = invocation.getArgument(0);
+            subscriber.onSubscribe(new Subscription() {
+                @Override
+                public void request(long n) {
+                    subscriber.onError(new RuntimeException("Simulated listing error"));
+                }
+
+                @Override
+                public void cancel() {}
+            });
+            return null;
+        }).when(listPublisher).subscribe(ArgumentMatchers.<Subscriber<ListObjectsV2Response>>any());
+
+        when(s3AsyncClient.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listPublisher);
 
         final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore);
 
-        assertThrows(IOException.class, blobContainer::delete);
+        CountDownLatch latch = new CountDownLatch(1);
+        AtomicReference<Exception> exceptionRef = new AtomicReference<>();
+
+        blobContainer.deleteAsync(new ActionListener<>() {
+            @Override
+            public void onResponse(DeleteResult deleteResult) {
+                fail("Expected failure but got success");
+            }
+
+            @Override
+            public void onFailure(Exception e) {
+                exceptionRef.set(e);
+                latch.countDown();
+            }
+        });
+
+        assertTrue(latch.await(5, TimeUnit.SECONDS));
+        assertNotNull(exceptionRef.get());
+        assertEquals(IOException.class, exceptionRef.get().getClass());
+        assertEquals("Failed to list objects for deletion", exceptionRef.get().getMessage());
     }
 
-    public void testDeleteSdkExceptionDuringDeleteOperation() {
+    public void testDeleteSdkExceptionDuringDeleteOperation() throws Exception {
         final String bucketName = randomAlphaOfLengthBetween(1, 10);
-
         final BlobPath blobPath = new BlobPath();
+        int bulkDeleteSize = 5;
 
         final S3BlobStore blobStore = mock(S3BlobStore.class);
         when(blobStore.bucket()).thenReturn(bucketName);
+        when(blobStore.getBulkDeletesSize()).thenReturn(bulkDeleteSize);
         when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher());
 
-        final S3Client client = mock(S3Client.class);
-        doAnswer(invocation -> new AmazonS3Reference(client)).when(blobStore).clientReference();
+        final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class);
+        final AmazonAsyncS3Reference asyncClientReference = mock(AmazonAsyncS3Reference.class);
+        when(blobStore.asyncClientReference()).thenReturn(asyncClientReference);
+        when(asyncClientReference.get()).thenReturn(AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null));
 
-        ListObjectsV2Iterable listObjectsV2Iterable = mock(ListObjectsV2Iterable.class);
-        final int totalPageCount = 3;
-        final long s3ObjectSize = ByteSizeUnit.MB.toBytes(5);
-        final int s3ObjectsPerPage = 5;
-        MockListObjectsV2ResponseIterator listObjectsV2ResponseIterator = new MockListObjectsV2ResponseIterator(
-            totalPageCount,
-            s3ObjectsPerPage,
-            s3ObjectSize
-        );
-        when(listObjectsV2Iterable.iterator()).thenReturn(listObjectsV2ResponseIterator);
-        when(client.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listObjectsV2Iterable);
+        final ListObjectsV2Publisher listPublisher = mock(ListObjectsV2Publisher.class);
+        doAnswer(invocation -> {
+            Subscriber<? super ListObjectsV2Response> subscriber = invocation.getArgument(0);
+            subscriber.onSubscribe(new Subscription() {
+                @Override
+                public void request(long n) {
+                    subscriber.onNext(
+                        ListObjectsV2Response.builder().contents(S3Object.builder().key("test-key").size(100L).build()).build()
+                    );
+                    subscriber.onComplete();
+                }
+
+                @Override
+                public void cancel() {}
+            });
+            return null;
+        }).when(listPublisher).subscribe(ArgumentMatchers.<Subscriber<ListObjectsV2Response>>any());
 
-        when(client.deleteObjects(any(DeleteObjectsRequest.class))).thenThrow(SdkException.builder().build());
+        when(s3AsyncClient.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listPublisher);
+
+        CompletableFuture<DeleteObjectsResponse> failedFuture = new CompletableFuture<>();
+        failedFuture.completeExceptionally(new RuntimeException("Simulated delete error"));
+        when(s3AsyncClient.deleteObjects(any(DeleteObjectsRequest.class))).thenReturn(failedFuture);
 
         final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore);
 
-        assertThrows(IOException.class, blobContainer::delete);
+        CountDownLatch latch = new CountDownLatch(1);
+        AtomicReference<Exception> exceptionRef = new AtomicReference<>();
+
+        blobContainer.deleteAsync(new ActionListener<>() {
+            @Override
+            public void onResponse(DeleteResult deleteResult) {
+                fail("Expected failure but got success");
+            }
+
+            @Override
+            public void onFailure(Exception e) {
+                exceptionRef.set(e);
+                latch.countDown();
+            }
+        });
+
+        assertTrue(latch.await(5, TimeUnit.SECONDS));
+        assertNotNull(exceptionRef.get());
+        logger.error("", exceptionRef.get());
+        assertTrue(exceptionRef.get() instanceof CompletionException);
+        assertEquals("java.lang.RuntimeException: Simulated delete error", exceptionRef.get().getMessage());
     }
 
     public void testExecuteSingleUpload() throws IOException {
diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryPluginTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryPluginTests.java
index 9ac1564c807c3..c0ee9cb6d980f 100644
--- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryPluginTests.java
+++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryPluginTests.java
@@ -8,6 +8,7 @@
 
 package org.opensearch.repositories.s3;
 
+import org.opensearch.cluster.service.ClusterService;
 import org.opensearch.common.settings.Settings;
 import org.opensearch.common.unit.SizeUnit;
 import org.opensearch.common.unit.SizeValue;
@@ -25,6 +26,8 @@
 import static org.hamcrest.CoreMatchers.instanceOf;
 import static org.hamcrest.Matchers.equalTo;
 import static org.hamcrest.Matchers.notNullValue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
 
 public class S3RepositoryPluginTests extends OpenSearchTestCase {
 
@@ -37,8 +40,6 @@ public void testGetExecutorBuilders() throws IOException {
         ThreadPool threadPool = null;
         try (S3RepositoryPlugin plugin = new S3RepositoryPlugin(settings, configPath)) {
             List<ExecutorBuilder<?>> executorBuilders = plugin.getExecutorBuilders(settings);
-            assertNotNull(executorBuilders);
-            assertFalse(executorBuilders.isEmpty());
             threadPool = new ThreadPool(settings, executorBuilders.toArray(new ExecutorBuilder<?>[0]));
             final Executor executor = threadPool.executor(URGENT_FUTURE_COMPLETION);
             assertNotNull(executor);
@@ -57,6 +58,12 @@ public void testGetExecutorBuilders() throws IOException {
             assertThat(info.getMax(), equalTo(size));
             assertThat(openSearchThreadPoolExecutor.getMaximumPoolSize(), equalTo(size));
 
+            ClusterService clusterService = mock(ClusterService.class);
+            when(clusterService.getSettings()).thenReturn(Settings.EMPTY);
+            plugin.createComponents(null, clusterService, threadPool, null, null, null, null, null, null, null, null);
+            assertNotNull(executorBuilders);
+            assertFalse(executorBuilders.isEmpty());
+
             final int availableProcessors = Runtime.getRuntime().availableProcessors();
             if (processors > availableProcessors) {
                 assertWarnings(
diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java
index c27efa080ac4e..f554e6d1dc591 100644
--- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java
+++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java
@@ -789,7 +789,6 @@ public void apply(Settings value, Settings current, Settings previous) {
 
                 // Snapshot related Settings
                 BlobStoreRepository.SNAPSHOT_SHARD_PATH_PREFIX_SETTING,
-                BlobStoreRepository.SNAPSHOT_ASYNC_DELETION_ENABLE_SETTING,
                 BlobStoreRepository.SNAPSHOT_REPOSITORY_DATA_CACHE_THRESHOLD,
 
                 SearchService.CLUSTER_ALLOW_DERIVED_FIELD_SETTING,
diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java
index 998ae5e4791b7..9146cb3c4091c 100644
--- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java
+++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java
@@ -50,7 +50,6 @@
 import org.opensearch.action.ActionRunnable;
 import org.opensearch.action.StepListener;
 import org.opensearch.action.support.GroupedActionListener;
-import org.opensearch.action.support.PlainActionFuture;
 import org.opensearch.cluster.ClusterState;
 import org.opensearch.cluster.ClusterStateUpdateTask;
 import org.opensearch.cluster.RepositoryCleanupInProgress;
@@ -70,7 +69,6 @@
 import org.opensearch.common.Randomness;
 import org.opensearch.common.SetOnce;
 import org.opensearch.common.UUIDs;
-import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer;
 import org.opensearch.common.blobstore.BlobContainer;
 import org.opensearch.common.blobstore.BlobMetadata;
 import org.opensearch.common.blobstore.BlobPath;
@@ -428,16 +426,6 @@ protected static long calculateMaxWithinIntLimit(long defaultThresholdOfHeap, lo
         Setting.Property.Final
     );
 
-    /**
-     * Controls the fixed prefix for the snapshot shard blob path. cluster.snapshot.async-deletion.enable
-     */
-    public static final Setting<Boolean> SNAPSHOT_ASYNC_DELETION_ENABLE_SETTING = Setting.boolSetting(
-        "cluster.snapshot.async-deletion.enable",
-        true,
-        Setting.Property.NodeScope,
-        Setting.Property.Dynamic
-    );
-
     protected volatile boolean supportURLRepo;
 
     private volatile int maxShardBlobDeleteBatch;
@@ -531,8 +519,6 @@ protected static long calculateMaxWithinIntLimit(long defaultThresholdOfHeap, lo
 
     private final String snapshotShardPathPrefix;
 
-    private volatile boolean enableAsyncDeletion;
-
     protected final long repositoryDataCacheThreshold;
 
     /**
@@ -587,8 +573,6 @@ protected BlobStoreRepository(
         this.recoverySettings = recoverySettings;
         this.remoteStoreSettings = new RemoteStoreSettings(clusterService.getSettings(), clusterService.getClusterSettings());
         this.snapshotShardPathPrefix = SNAPSHOT_SHARD_PATH_PREFIX_SETTING.get(clusterService.getSettings());
-        this.enableAsyncDeletion = SNAPSHOT_ASYNC_DELETION_ENABLE_SETTING.get(clusterService.getSettings());
-        clusterService.getClusterSettings().addSettingsUpdateConsumer(SNAPSHOT_ASYNC_DELETION_ENABLE_SETTING, this::setEnableAsyncDeletion);
         this.repositoryDataCacheThreshold = SNAPSHOT_REPOSITORY_DATA_CACHE_THRESHOLD.get(clusterService.getSettings()).getBytes();
     }
 
@@ -2219,15 +2203,7 @@ private void executeOneStaleIndexDelete(
 
     private DeleteResult deleteContainer(BlobContainer container) throws IOException {
         long startTime = System.nanoTime();
-        DeleteResult deleteResult;
-        if (enableAsyncDeletion && container instanceof AsyncMultiStreamBlobContainer) {
-            // Use deleteAsync and wait for the result
-            PlainActionFuture<DeleteResult> future = new PlainActionFuture<>();
-            ((AsyncMultiStreamBlobContainer) container).deleteAsync(future);
-            deleteResult = future.actionGet();
-        } else {
-            deleteResult = container.delete();
-        }
+        DeleteResult deleteResult = container.delete();
         logger.debug(new ParameterizedMessage("[{}] Deleted {} in {}ns", metadata.name(), container.path(), startTime - System.nanoTime()));
         return deleteResult;
     }
@@ -2862,13 +2838,7 @@ public IndexMetadata getSnapshotIndexMetaData(RepositoryData repositoryData, Sna
     private void deleteFromContainer(BlobContainer container, List<String> blobs) throws IOException {
         logger.trace(() -> new ParameterizedMessage("[{}] Deleting {} from [{}]", metadata.name(), blobs, container.path()));
         long startTime = System.nanoTime();
-        if (enableAsyncDeletion && container instanceof AsyncMultiStreamBlobContainer) {
-            PlainActionFuture<Void> future = new PlainActionFuture<>();
-            ((AsyncMultiStreamBlobContainer) container).deleteBlobsAsyncIgnoringIfNotExists(blobs, future);
-            future.actionGet();
-        } else {
-            container.deleteBlobsIgnoringIfNotExists(blobs);
-        }
+        container.deleteBlobsIgnoringIfNotExists(blobs);
         logger.debug(
             () -> new ParameterizedMessage(
                 "[{}] Deletion {} from [{}] took {}ns",
@@ -4742,8 +4712,4 @@ public String toString() {
             return name;
         }
     }
-
-    public void setEnableAsyncDeletion(boolean enableAsyncDeletion) {
-        this.enableAsyncDeletion = enableAsyncDeletion;
-    }
 }

From 2eadf12c0dff4da120ecae085ccf3324b32c215c Mon Sep 17 00:00:00 2001
From: Shubh Sahu <shubhsahu200103@gmail.com>
Date: Thu, 9 Jan 2025 11:14:21 +0530
Subject: [PATCH 29/61] Fix Shallow copy snapshot failures on closed index
 (#16868)

* Fix shallow v1 snapshot failures on closed index

Signed-off-by: Shubh Sahu <shubhvs@amazon.com>

* UT fix

Signed-off-by: Shubh Sahu <shubhvs@amazon.com>

* Adding UT

Signed-off-by: Shubh Sahu <shubhvs@amazon.com>

* small fix

Signed-off-by: Shubh Sahu <shubhvs@amazon.com>

* Addressing comments

Signed-off-by: Shubh Sahu <shubhvs@amazon.com>

* Addressing comments

Signed-off-by: Shubh Sahu <shubhvs@amazon.com>

* Modifying IT to restore snapshot

Signed-off-by: Shubh Sahu <shubhvs@amazon.com>

---------

Signed-off-by: Shubh Sahu <shubhvs@amazon.com>
Co-authored-by: Shubh Sahu <shubhvs@amazon.com>
---
 CHANGELOG.md                                  |  1 +
 .../opensearch/remotestore/RemoteStoreIT.java | 78 +++++++++++++++++
 .../opensearch/index/shard/IndexShard.java    | 16 ++++
 .../opensearch/repositories/Repository.java   | 39 +++++++++
 .../blobstore/BlobStoreRepository.java        | 54 ++++++++++--
 .../snapshots/SnapshotShardsService.java      | 63 +++++++++-----
 .../index/shard/RemoteIndexShardTests.java    | 87 +++++++++++++++++++
 .../SegmentReplicationIndexShardTests.java    | 39 +++++++--
 .../RepositoriesServiceTests.java             |  2 +
 ...enSearchIndexLevelReplicationTestCase.java |  4 +
 10 files changed, 346 insertions(+), 37 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 1b49368a20fa8..dbbfd96eefca2 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -88,6 +88,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 - Fix _list/shards API failing when closed indices are present ([#16606](https://github.com/opensearch-project/OpenSearch/pull/16606))
 - Fix remote shards balance ([#15335](https://github.com/opensearch-project/OpenSearch/pull/15335))
 - Always use `constant_score` query for `match_only_text` field ([#16964](https://github.com/opensearch-project/OpenSearch/pull/16964))
+- Fix Shallow copy snapshot failures on closed index ([#16868](https://github.com/opensearch-project/OpenSearch/pull/16868))
 
 ### Security
 
diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java
index ebb911c739eb3..1c4585e38ee90 100644
--- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java
@@ -39,6 +39,9 @@
 import org.opensearch.indices.recovery.RecoverySettings;
 import org.opensearch.indices.recovery.RecoveryState;
 import org.opensearch.plugins.Plugin;
+import org.opensearch.repositories.blobstore.BlobStoreRepository;
+import org.opensearch.snapshots.SnapshotInfo;
+import org.opensearch.snapshots.SnapshotState;
 import org.opensearch.test.InternalTestCluster;
 import org.opensearch.test.OpenSearchIntegTestCase;
 import org.opensearch.test.transport.MockTransportService;
@@ -1078,4 +1081,79 @@ public void testCloseIndexWithNoOpSyncAndFlushForAsyncTranslog() throws Interrup
         Thread.sleep(10000);
         ensureGreen(INDEX_NAME);
     }
+
+    public void testSuccessfulShallowV1SnapshotPostIndexClose() throws Exception {
+        internalCluster().startClusterManagerOnlyNode();
+        String dataNode = internalCluster().startDataOnlyNodes(1).get(0);
+        createIndex(INDEX_NAME, remoteStoreIndexSettings(0, 10000L, -1));
+        ensureGreen(INDEX_NAME);
+
+        ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest();
+        updateSettingsRequest.persistentSettings(Settings.builder().put(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), "0ms"));
+
+        assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet());
+
+        logger.info("Create shallow snapshot setting enabled repo");
+        String shallowSnapshotRepoName = "shallow-snapshot-repo-name";
+        Path shallowSnapshotRepoPath = randomRepoPath();
+        Settings.Builder settings = Settings.builder()
+            .put("location", shallowSnapshotRepoPath)
+            .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), Boolean.TRUE);
+        createRepository(shallowSnapshotRepoName, "fs", settings);
+
+        for (int i = 0; i < 10; i++) {
+            indexBulk(INDEX_NAME, 1);
+        }
+        flushAndRefresh(INDEX_NAME);
+
+        logger.info("Verify shallow snapshot created before close");
+        final String snapshot1 = "snapshot1";
+        SnapshotInfo snapshotInfo1 = internalCluster().client()
+            .admin()
+            .cluster()
+            .prepareCreateSnapshot(shallowSnapshotRepoName, snapshot1)
+            .setIndices(INDEX_NAME)
+            .setWaitForCompletion(true)
+            .get()
+            .getSnapshotInfo();
+
+        assertEquals(SnapshotState.SUCCESS, snapshotInfo1.state());
+        assertTrue(snapshotInfo1.successfulShards() > 0);
+        assertEquals(0, snapshotInfo1.failedShards());
+
+        for (int i = 0; i < 10; i++) {
+            indexBulk(INDEX_NAME, 1);
+        }
+
+        // close index
+        client().admin().indices().close(Requests.closeIndexRequest(INDEX_NAME)).actionGet();
+        Thread.sleep(1000);
+        logger.info("Verify shallow snapshot created after close");
+        final String snapshot2 = "snapshot2";
+
+        SnapshotInfo snapshotInfo2 = internalCluster().client()
+            .admin()
+            .cluster()
+            .prepareCreateSnapshot(shallowSnapshotRepoName, snapshot2)
+            .setIndices(INDEX_NAME)
+            .setWaitForCompletion(true)
+            .get()
+            .getSnapshotInfo();
+
+        assertEquals(SnapshotState.SUCCESS, snapshotInfo2.state());
+        assertTrue(snapshotInfo2.successfulShards() > 0);
+        assertEquals(0, snapshotInfo2.failedShards());
+
+        // delete the index
+        cluster().wipeIndices(INDEX_NAME);
+        // try restoring the snapshot
+        RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(shallowSnapshotRepoName, snapshot2)
+            .setWaitForCompletion(true)
+            .execute()
+            .actionGet();
+        assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+        ensureGreen(INDEX_NAME);
+        flushAndRefresh(INDEX_NAME);
+        assertBusy(() -> { assertHitCount(client(dataNode).prepareSearch(INDEX_NAME).setSize(0).get(), 20); });
+    }
 }
diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java
index eb3999718ca5b..f5de4dfb5a933 100644
--- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java
+++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java
@@ -1624,6 +1624,22 @@ public org.apache.lucene.util.Version minimumCompatibleVersion() {
         return luceneVersion == null ? indexSettings.getIndexVersionCreated().luceneVersion : luceneVersion;
     }
 
+    /**
+     * Fetches the last remote uploaded segment metadata file
+     * @return {@link RemoteSegmentMetadata}
+     * @throws IOException
+     */
+    public RemoteSegmentMetadata fetchLastRemoteUploadedSegmentMetadata() throws IOException {
+        if (!indexSettings.isAssignedOnRemoteNode()) {
+            throw new IllegalStateException("Index is not assigned on Remote Node");
+        }
+        RemoteSegmentMetadata lastUploadedMetadata = getRemoteDirectory().readLatestMetadataFile();
+        if (lastUploadedMetadata == null) {
+            throw new FileNotFoundException("No metadata file found in remote store");
+        }
+        return lastUploadedMetadata;
+    }
+
     /**
      * Creates a new {@link IndexCommit} snapshot from the currently running engine. All resources referenced by this
      * commit won't be freed until the commit / snapshot is closed.
diff --git a/server/src/main/java/org/opensearch/repositories/Repository.java b/server/src/main/java/org/opensearch/repositories/Repository.java
index 138bc13140aea..259c4a6e09ce7 100644
--- a/server/src/main/java/org/opensearch/repositories/Repository.java
+++ b/server/src/main/java/org/opensearch/repositories/Repository.java
@@ -416,6 +416,45 @@ default void snapshotRemoteStoreIndexShard(
         throw new UnsupportedOperationException();
     }
 
+    /**
+     * Adds a reference of remote store data for a index commit point.
+     * <p>
+     * The index commit point can be obtained by using {@link org.opensearch.index.engine.Engine#acquireLastIndexCommit} method.
+     * Or for closed index can be obtained by reading last remote uploaded metadata by using {@link org.opensearch.index.shard.IndexShard#fetchLastRemoteUploadedSegmentMetadata()} method.
+     * Repository implementations shouldn't release the snapshot index commit point. It is done by the method caller.
+     * <p>
+     * As snapshot process progresses, implementation of this method should update {@link IndexShardSnapshotStatus} object and check
+     * {@link IndexShardSnapshotStatus#isAborted()} to see if the snapshot process should be aborted.
+     * @param store                    store to be snapshotted
+     * @param snapshotId               snapshot id
+     * @param indexId                  id for the index being snapshotted
+     * @param snapshotIndexCommit      commit point
+     * @param shardStateIdentifier     a unique identifier of the state of the shard that is stored with the shard's snapshot and used
+     *                                 to detect if the shard has changed between snapshots. If {@code null} is passed as the identifier
+     *                                 snapshotting will be done by inspecting the physical files referenced by {@code snapshotIndexCommit}
+     * @param snapshotStatus           snapshot status
+     * @param primaryTerm              current Primary Term
+     * @param commitGeneration         current commit generation
+     * @param startTime                start time of the snapshot commit, this will be used as the start time for snapshot.
+     * @param indexFilesToFileLengthMap map of index files to file length
+     * @param listener                 listener invoked on completion
+     */
+    default void snapshotRemoteStoreIndexShard(
+        Store store,
+        SnapshotId snapshotId,
+        IndexId indexId,
+        @Nullable IndexCommit snapshotIndexCommit,
+        @Nullable String shardStateIdentifier,
+        IndexShardSnapshotStatus snapshotStatus,
+        long primaryTerm,
+        long commitGeneration,
+        long startTime,
+        @Nullable Map<String, Long> indexFilesToFileLengthMap,
+        ActionListener<String> listener
+    ) {
+        throw new UnsupportedOperationException();
+    }
+
     /**
      * Restores snapshot of the shard.
      * <p>
diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java
index 9146cb3c4091c..93a7dc0cb06af 100644
--- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java
+++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java
@@ -3714,6 +3714,33 @@ private void writeAtomic(BlobContainer container, final String blobName, final B
         }
     }
 
+    @Override
+    public void snapshotRemoteStoreIndexShard(
+        Store store,
+        SnapshotId snapshotId,
+        IndexId indexId,
+        IndexCommit snapshotIndexCommit,
+        @Nullable String shardStateIdentifier,
+        IndexShardSnapshotStatus snapshotStatus,
+        long primaryTerm,
+        long startTime,
+        ActionListener<String> listener
+    ) {
+        snapshotRemoteStoreIndexShard(
+            store,
+            snapshotId,
+            indexId,
+            snapshotIndexCommit,
+            shardStateIdentifier,
+            snapshotStatus,
+            primaryTerm,
+            snapshotIndexCommit.getGeneration(),
+            startTime,
+            null,
+            listener
+        );
+    }
+
     @Override
     public void snapshotRemoteStoreIndexShard(
         Store store,
@@ -3723,13 +3750,16 @@ public void snapshotRemoteStoreIndexShard(
         String shardStateIdentifier,
         IndexShardSnapshotStatus snapshotStatus,
         long primaryTerm,
+        long commitGeneration,
         long startTime,
+        Map<String, Long> indexFilesToFileLengthMap,
         ActionListener<String> listener
     ) {
         if (isReadOnly()) {
             listener.onFailure(new RepositoryException(metadata.name(), "cannot snapshot shard on a readonly repository"));
             return;
         }
+
         final ShardId shardId = store.shardId();
         try {
             final String generation = snapshotStatus.generation();
@@ -3737,13 +3767,21 @@ public void snapshotRemoteStoreIndexShard(
             final BlobContainer shardContainer = shardContainer(indexId, shardId);
 
             long indexTotalFileSize = 0;
-            // local store is being used here to fetch the files metadata instead of remote store as currently
-            // remote store is mirroring the local store.
-            List<String> fileNames = new ArrayList<>(snapshotIndexCommit.getFileNames());
-            Store.MetadataSnapshot commitSnapshotMetadata = store.getMetadata(snapshotIndexCommit);
-            for (String fileName : fileNames) {
-                indexTotalFileSize += commitSnapshotMetadata.get(fileName).length();
+            List<String> fileNames;
+
+            if (snapshotIndexCommit != null) {
+                // local store is being used here to fetch the files metadata instead of remote store as currently
+                // remote store is mirroring the local store.
+                fileNames = new ArrayList<>(snapshotIndexCommit.getFileNames());
+                Store.MetadataSnapshot commitSnapshotMetadata = store.getMetadata(snapshotIndexCommit);
+                for (String fileName : fileNames) {
+                    indexTotalFileSize += commitSnapshotMetadata.get(fileName).length();
+                }
+            } else {
+                fileNames = new ArrayList<>(indexFilesToFileLengthMap.keySet());
+                indexTotalFileSize = indexFilesToFileLengthMap.values().stream().mapToLong(Long::longValue).sum();
             }
+
             int indexTotalNumberOfFiles = fileNames.size();
 
             snapshotStatus.moveToStarted(
@@ -3754,7 +3792,7 @@ public void snapshotRemoteStoreIndexShard(
                 indexTotalFileSize
             );
 
-            final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.moveToFinalize(snapshotIndexCommit.getGeneration());
+            final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.moveToFinalize(commitGeneration);
 
             // now create and write the commit point
             logger.trace("[{}] [{}] writing shard snapshot file", shardId, snapshotId);
@@ -3765,7 +3803,7 @@ public void snapshotRemoteStoreIndexShard(
                         snapshotId.getName(),
                         lastSnapshotStatus.getIndexVersion(),
                         primaryTerm,
-                        snapshotIndexCommit.getGeneration(),
+                        commitGeneration,
                         lastSnapshotStatus.getStartTime(),
                         threadPool.absoluteTimeInMillis() - lastSnapshotStatus.getStartTime(),
                         indexTotalNumberOfFiles,
diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java
index 8da36bbb8d4bd..1e2264593310d 100644
--- a/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java
+++ b/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java
@@ -44,6 +44,7 @@
 import org.opensearch.cluster.SnapshotsInProgress.ShardSnapshotStatus;
 import org.opensearch.cluster.SnapshotsInProgress.ShardState;
 import org.opensearch.cluster.SnapshotsInProgress.State;
+import org.opensearch.cluster.metadata.IndexMetadata;
 import org.opensearch.cluster.node.DiscoveryNode;
 import org.opensearch.cluster.service.ClusterService;
 import org.opensearch.common.Nullable;
@@ -63,6 +64,7 @@
 import org.opensearch.index.shard.IndexShardState;
 import org.opensearch.index.snapshots.IndexShardSnapshotStatus;
 import org.opensearch.index.snapshots.IndexShardSnapshotStatus.Stage;
+import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata;
 import org.opensearch.indices.IndicesService;
 import org.opensearch.repositories.IndexId;
 import org.opensearch.repositories.RepositoriesService;
@@ -74,7 +76,6 @@
 import org.opensearch.transport.TransportService;
 
 import java.io.IOException;
-import java.nio.file.NoSuchFileException;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.Map;
@@ -371,7 +372,9 @@ private void snapshot(
         ActionListener<String> listener
     ) {
         try {
-            final IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id());
+            final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
+            final IndexShard indexShard = indexService.getShardOrNull(shardId.id());
+            final boolean closedIndex = indexService.getMetadata().getState() == IndexMetadata.State.CLOSE;
             if (indexShard.routingEntry().primary() == false) {
                 throw new IndexShardSnapshotFailedException(shardId, "snapshot should be performed only on primary");
             }
@@ -398,24 +401,42 @@ private void snapshot(
                 if (remoteStoreIndexShallowCopy && indexShard.indexSettings().isRemoteStoreEnabled()) {
                     long startTime = threadPool.relativeTimeInMillis();
                     long primaryTerm = indexShard.getOperationPrimaryTerm();
-                    // we flush first to make sure we get the latest writes snapshotted
-                    wrappedSnapshot = indexShard.acquireLastIndexCommitAndRefresh(true);
-                    IndexCommit snapshotIndexCommit = wrappedSnapshot.get();
-                    long commitGeneration = snapshotIndexCommit.getGeneration();
+                    long commitGeneration = 0L;
+                    Map<String, Long> indexFilesToFileLengthMap = null;
+                    IndexCommit snapshotIndexCommit = null;
+
                     try {
+                        if (closedIndex) {
+                            RemoteSegmentMetadata lastRemoteUploadedIndexCommit = indexShard.fetchLastRemoteUploadedSegmentMetadata();
+                            indexFilesToFileLengthMap = lastRemoteUploadedIndexCommit.getMetadata()
+                                .entrySet()
+                                .stream()
+                                .collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().getLength()));
+                            primaryTerm = lastRemoteUploadedIndexCommit.getPrimaryTerm();
+                            commitGeneration = lastRemoteUploadedIndexCommit.getGeneration();
+                        } else {
+                            wrappedSnapshot = indexShard.acquireLastIndexCommitAndRefresh(true);
+                            snapshotIndexCommit = wrappedSnapshot.get();
+                            commitGeneration = snapshotIndexCommit.getGeneration();
+                        }
                         indexShard.acquireLockOnCommitData(snapshot.getSnapshotId().getUUID(), primaryTerm, commitGeneration);
-                    } catch (NoSuchFileException e) {
-                        wrappedSnapshot.close();
-                        logger.warn(
-                            "Exception while acquiring lock on primaryTerm = {} and generation = {}",
-                            primaryTerm,
-                            commitGeneration
-                        );
-                        indexShard.flush(new FlushRequest(shardId.getIndexName()).force(true));
-                        wrappedSnapshot = indexShard.acquireLastIndexCommit(false);
-                        snapshotIndexCommit = wrappedSnapshot.get();
-                        commitGeneration = snapshotIndexCommit.getGeneration();
-                        indexShard.acquireLockOnCommitData(snapshot.getSnapshotId().getUUID(), primaryTerm, commitGeneration);
+                    } catch (IOException e) {
+                        if (closedIndex) {
+                            logger.warn("Exception while reading latest metadata file from remote store");
+                            listener.onFailure(e);
+                        } else {
+                            wrappedSnapshot.close();
+                            logger.warn(
+                                "Exception while acquiring lock on primaryTerm = {} and generation = {}",
+                                primaryTerm,
+                                commitGeneration
+                            );
+                            indexShard.flush(new FlushRequest(shardId.getIndexName()).force(true));
+                            wrappedSnapshot = indexShard.acquireLastIndexCommit(false);
+                            snapshotIndexCommit = wrappedSnapshot.get();
+                            commitGeneration = snapshotIndexCommit.getGeneration();
+                            indexShard.acquireLockOnCommitData(snapshot.getSnapshotId().getUUID(), primaryTerm, commitGeneration);
+                        }
                     }
                     try {
                         repository.snapshotRemoteStoreIndexShard(
@@ -423,11 +444,13 @@ private void snapshot(
                             snapshot.getSnapshotId(),
                             indexId,
                             snapshotIndexCommit,
-                            getShardStateId(indexShard, snapshotIndexCommit),
+                            null,
                             snapshotStatus,
                             primaryTerm,
+                            commitGeneration,
                             startTime,
-                            ActionListener.runBefore(listener, wrappedSnapshot::close)
+                            indexFilesToFileLengthMap,
+                            closedIndex ? listener : ActionListener.runBefore(listener, wrappedSnapshot::close)
                         );
                     } catch (IndexShardSnapshotFailedException e) {
                         logger.error(
diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java
index 57a561bc8f2a3..4d85a3c491af8 100644
--- a/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java
+++ b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java
@@ -12,6 +12,9 @@
 import org.apache.lucene.index.SegmentInfos;
 import org.apache.lucene.util.Version;
 import org.opensearch.action.StepListener;
+import org.opensearch.cluster.ClusterChangedEvent;
+import org.opensearch.cluster.ClusterState;
+import org.opensearch.cluster.SnapshotsInProgress;
 import org.opensearch.cluster.metadata.IndexMetadata;
 import org.opensearch.common.concurrent.GatedCloseable;
 import org.opensearch.common.settings.Settings;
@@ -20,6 +23,7 @@
 import org.opensearch.index.engine.Engine;
 import org.opensearch.index.engine.InternalEngine;
 import org.opensearch.index.engine.NRTReplicationEngineFactory;
+import org.opensearch.index.snapshots.IndexShardSnapshotStatus;
 import org.opensearch.index.store.Store;
 import org.opensearch.index.store.StoreFileMetadata;
 import org.opensearch.indices.replication.CheckpointInfoResponse;
@@ -32,6 +36,11 @@
 import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint;
 import org.opensearch.indices.replication.common.ReplicationFailedException;
 import org.opensearch.indices.replication.common.ReplicationType;
+import org.opensearch.repositories.RepositoriesService;
+import org.opensearch.repositories.blobstore.BlobStoreRepository;
+import org.opensearch.snapshots.Snapshot;
+import org.opensearch.snapshots.SnapshotId;
+import org.opensearch.snapshots.SnapshotShardsService;
 import org.opensearch.test.CorruptionUtils;
 import org.opensearch.test.junit.annotations.TestLogging;
 import org.hamcrest.MatcherAssert;
@@ -41,6 +50,7 @@
 import java.nio.channels.FileChannel;
 import java.nio.file.Path;
 import java.nio.file.StandardOpenOption;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashSet;
 import java.util.List;
@@ -55,6 +65,8 @@
 import static org.hamcrest.Matchers.containsInAnyOrder;
 import static org.hamcrest.Matchers.equalTo;
 import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyLong;
+import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
@@ -541,6 +553,81 @@ public void onReplicationFailure(
         }
     }
 
+    public void testShallowCopySnapshotForClosedIndexSuccessful() throws Exception {
+        try (ReplicationGroup shards = createGroup(0, settings)) {
+            final IndexShard primaryShard = shards.getPrimary();
+            shards.startAll();
+            shards.indexDocs(10);
+            shards.refresh("test");
+            shards.flush();
+            shards.assertAllEqual(10);
+
+            RepositoriesService repositoriesService = createRepositoriesService();
+            BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository("random");
+
+            doAnswer(invocation -> {
+                IndexShardSnapshotStatus snapshotStatus = invocation.getArgument(5);
+                long commitGeneration = invocation.getArgument(7);
+                long startTime = invocation.getArgument(8);
+                final Map<String, Long> indexFilesToFileLengthMap = invocation.getArgument(9);
+                ActionListener<String> listener = invocation.getArgument(10);
+                if (indexFilesToFileLengthMap != null) {
+                    List<String> fileNames = new ArrayList<>(indexFilesToFileLengthMap.keySet());
+                    long indexTotalFileSize = indexFilesToFileLengthMap.values().stream().mapToLong(Long::longValue).sum();
+                    int indexTotalNumberOfFiles = fileNames.size();
+                    snapshotStatus.moveToStarted(startTime, 0, indexTotalNumberOfFiles, 0, indexTotalFileSize);
+                    // Not performing actual snapshot, just modifying the state
+                    snapshotStatus.moveToFinalize(commitGeneration);
+                    snapshotStatus.moveToDone(System.currentTimeMillis(), snapshotStatus.generation());
+                    listener.onResponse(snapshotStatus.generation());
+                    return null;
+                }
+                listener.onResponse(snapshotStatus.generation());
+                return null;
+            }).when(repository)
+                .snapshotRemoteStoreIndexShard(any(), any(), any(), any(), any(), any(), anyLong(), anyLong(), anyLong(), any(), any());
+
+            final SnapshotShardsService shardsService = getSnapshotShardsService(
+                primaryShard,
+                shards.getIndexMetadata(),
+                true,
+                repositoriesService
+            );
+            final Snapshot snapshot1 = new Snapshot(
+                randomAlphaOfLength(10),
+                new SnapshotId(randomAlphaOfLength(5), randomAlphaOfLength(5))
+            );
+
+            // Initialize the shallow copy snapshot
+            final ClusterState initState = addSnapshotIndex(
+                clusterService.state(),
+                snapshot1,
+                primaryShard,
+                SnapshotsInProgress.State.INIT,
+                true
+            );
+            shardsService.clusterChanged(new ClusterChangedEvent("test", initState, clusterService.state()));
+
+            // start the snapshot
+            shardsService.clusterChanged(
+                new ClusterChangedEvent(
+                    "test",
+                    addSnapshotIndex(clusterService.state(), snapshot1, primaryShard, SnapshotsInProgress.State.STARTED, true),
+                    initState
+                )
+            );
+
+            // Check the snapshot got completed successfully
+            assertBusy(() -> {
+                final IndexShardSnapshotStatus.Copy copy = shardsService.currentSnapshotShards(snapshot1)
+                    .get(primaryShard.shardId)
+                    .asCopy();
+                final IndexShardSnapshotStatus.Stage stage = copy.getStage();
+                assertEquals(IndexShardSnapshotStatus.Stage.DONE, stage);
+            });
+        }
+    }
+
     private RemoteStoreReplicationSource getRemoteStoreReplicationSource(IndexShard shard, Runnable postGetFilesRunnable) {
         return new RemoteStoreReplicationSource(shard) {
             @Override
diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java
index 2311fc582616f..f4f94baabd7b0 100644
--- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java
+++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java
@@ -68,6 +68,7 @@
 import org.opensearch.indices.replication.common.ReplicationState;
 import org.opensearch.indices.replication.common.ReplicationType;
 import org.opensearch.repositories.IndexId;
+import org.opensearch.repositories.RepositoriesService;
 import org.opensearch.snapshots.Snapshot;
 import org.opensearch.snapshots.SnapshotId;
 import org.opensearch.snapshots.SnapshotInfoTests;
@@ -892,10 +893,21 @@ public void testSnapshotWhileFailoverIncomplete() throws Exception {
             replicateSegments(primaryShard, shards.getReplicas());
             shards.assertAllEqual(10);
 
-            final SnapshotShardsService shardsService = getSnapshotShardsService(replicaShard);
+            final SnapshotShardsService shardsService = getSnapshotShardsService(
+                replicaShard,
+                shards.getIndexMetadata(),
+                false,
+                createRepositoriesService()
+            );
             final Snapshot snapshot = new Snapshot(randomAlphaOfLength(10), new SnapshotId(randomAlphaOfLength(5), randomAlphaOfLength(5)));
 
-            final ClusterState initState = addSnapshotIndex(clusterService.state(), snapshot, replicaShard, SnapshotsInProgress.State.INIT);
+            final ClusterState initState = addSnapshotIndex(
+                clusterService.state(),
+                snapshot,
+                replicaShard,
+                SnapshotsInProgress.State.INIT,
+                false
+            );
             shardsService.clusterChanged(new ClusterChangedEvent("test", initState, clusterService.state()));
 
             CountDownLatch latch = new CountDownLatch(1);
@@ -907,7 +919,7 @@ public void testSnapshotWhileFailoverIncomplete() throws Exception {
                 shardsService.clusterChanged(
                     new ClusterChangedEvent(
                         "test",
-                        addSnapshotIndex(clusterService.state(), snapshot, replicaShard, SnapshotsInProgress.State.STARTED),
+                        addSnapshotIndex(clusterService.state(), snapshot, replicaShard, SnapshotsInProgress.State.STARTED, false),
                         initState
                     )
                 );
@@ -956,21 +968,30 @@ public void testComputeReplicationCheckpointNullInfosReturnsEmptyCheckpoint() th
         }
     }
 
-    private SnapshotShardsService getSnapshotShardsService(IndexShard replicaShard) {
+    protected SnapshotShardsService getSnapshotShardsService(
+        IndexShard indexShard,
+        IndexMetadata indexMetadata,
+        boolean closedIdx,
+        RepositoriesService repositoriesService
+    ) {
         final TransportService transportService = mock(TransportService.class);
         when(transportService.getThreadPool()).thenReturn(threadPool);
         final IndicesService indicesService = mock(IndicesService.class);
         final IndexService indexService = mock(IndexService.class);
         when(indicesService.indexServiceSafe(any())).thenReturn(indexService);
-        when(indexService.getShardOrNull(anyInt())).thenReturn(replicaShard);
-        return new SnapshotShardsService(settings, clusterService, createRepositoriesService(), transportService, indicesService);
+        when(indexService.getShardOrNull(anyInt())).thenReturn(indexShard);
+        when(indexService.getMetadata()).thenReturn(
+            new IndexMetadata.Builder(indexMetadata).state(closedIdx ? IndexMetadata.State.CLOSE : IndexMetadata.State.OPEN).build()
+        );
+        return new SnapshotShardsService(settings, clusterService, repositoriesService, transportService, indicesService);
     }
 
-    private ClusterState addSnapshotIndex(
+    protected ClusterState addSnapshotIndex(
         ClusterState state,
         Snapshot snapshot,
         IndexShard shard,
-        SnapshotsInProgress.State snapshotState
+        SnapshotsInProgress.State snapshotState,
+        boolean shallowCopySnapshot
     ) {
         final Map<ShardId, SnapshotsInProgress.ShardSnapshotStatus> shardsBuilder = new HashMap<>();
         ShardRouting shardRouting = shard.shardRouting;
@@ -991,7 +1012,7 @@ private ClusterState addSnapshotIndex(
             null,
             SnapshotInfoTests.randomUserMetadata(),
             VersionUtils.randomVersion(random()),
-            false
+            shallowCopySnapshot
         );
         return ClusterState.builder(state)
             .putCustom(SnapshotsInProgress.TYPE, SnapshotsInProgress.of(Collections.singletonList(entry)))
diff --git a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java
index 4cd822c7d583b..1ec6d320762f2 100644
--- a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java
+++ b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java
@@ -774,7 +774,9 @@ public void snapshotRemoteStoreIndexShard(
             String shardStateIdentifier,
             IndexShardSnapshotStatus snapshotStatus,
             long primaryTerm,
+            long commitGeneration,
             long startTime,
+            Map<String, Long> indexFilesToFileLengthMap,
             ActionListener<String> listener
         ) {
 
diff --git a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java
index a5dc13c334513..062ebd2051f6e 100644
--- a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java
+++ b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java
@@ -289,6 +289,10 @@ protected EngineConfigFactory getEngineConfigFactory(IndexSettings indexSettings
             return new EngineConfigFactory(indexSettings);
         }
 
+        public IndexMetadata getIndexMetadata() {
+            return indexMetadata;
+        }
+
         public int indexDocs(final int numOfDoc) throws Exception {
             for (int doc = 0; doc < numOfDoc; doc++) {
                 final IndexRequest indexRequest = new IndexRequest(index.getName()).id(Integer.toString(docId.incrementAndGet()))

From 2c7d7749871e5f21b224660877e5a2f1c6838b86 Mon Sep 17 00:00:00 2001
From: Rishikesh <62345295+Rishikesh1159@users.noreply.github.com>
Date: Thu, 9 Jan 2025 10:35:16 -0800
Subject: [PATCH 30/61] Add Response Status Number in http trace logs. (#16978)

Signed-off-by: Rishikesh1159 <rishireddy1159@gmail.com>
---
 server/src/main/java/org/opensearch/http/HttpTracer.java       | 3 ++-
 .../org/opensearch/http/AbstractHttpServerTransportTests.java  | 2 ++
 2 files changed, 4 insertions(+), 1 deletion(-)

diff --git a/server/src/main/java/org/opensearch/http/HttpTracer.java b/server/src/main/java/org/opensearch/http/HttpTracer.java
index de1da4a20e294..e31cca21f6a54 100644
--- a/server/src/main/java/org/opensearch/http/HttpTracer.java
+++ b/server/src/main/java/org/opensearch/http/HttpTracer.java
@@ -116,10 +116,11 @@ void traceResponse(
     ) {
         logger.trace(
             new ParameterizedMessage(
-                "[{}][{}][{}][{}][{}] sent response to [{}] success [{}]",
+                "[{}][{}][{}][{}][{}][{}] sent response to [{}] success [{}]",
                 requestId,
                 opaqueHeader,
                 restResponse.status(),
+                restResponse.status().getStatus(),
                 restResponse.contentType(),
                 contentLength,
                 httpChannel,
diff --git a/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java b/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java
index a4295289c3109..cd6beffa6e195 100644
--- a/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java
+++ b/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java
@@ -285,6 +285,8 @@ public HttpStats stats() {
                             + opaqueId
                             + "\\]\\["
                             + (badRequest ? "BAD_REQUEST" : "OK")
+                            + "\\]\\["
+                            + (badRequest ? "400" : "200")
                             + "\\]\\[null\\]\\[0\\] sent response to \\[.*"
                     )
                 );

From cc990c024fe5305f40daa6a1991cd3d9fa21467c Mon Sep 17 00:00:00 2001
From: kkewwei <kewei.11@bytedance.com>
Date: Fri, 10 Jan 2025 07:43:29 +0800
Subject: [PATCH 31/61] support termQueryCaseInsensitive/termQuery can search
 from doc_value in flat_object/keyword field (#16974)

Signed-off-by: kkewwei <kewei.11@bytedance.com>
Signed-off-by: kkewwei <kkewwei@163.com>
---
 CHANGELOG.md                                  |   1 +
 .../92_flat_object_support_doc_values.yml     |  50 +++-
 .../test/search/340_doc_values_field.yml      |  35 ++-
 .../index/mapper/FlatObjectFieldMapper.java   |  19 +-
 .../index/mapper/KeywordFieldMapper.java      |  41 +++
 .../mapper/FlatObjectFieldTypeTests.java      | 283 ++++++++++++++++--
 .../index/mapper/KeywordFieldTypeTests.java   |  49 ++-
 7 files changed, 436 insertions(+), 42 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index dbbfd96eefca2..0cb11d1c45d38 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -30,6 +30,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 - Introduce framework for auxiliary transports and an experimental gRPC transport plugin ([#16534](https://github.com/opensearch-project/OpenSearch/pull/16534))
 - Changes to support IP field in star tree indexing([#16641](https://github.com/opensearch-project/OpenSearch/pull/16641/))
 - Support object fields in star-tree index([#16728](https://github.com/opensearch-project/OpenSearch/pull/16728/))
+- Support searching from doc_value using termQueryCaseInsensitive/termQuery in flat_object/keyword field([#16974](https://github.com/opensearch-project/OpenSearch/pull/16974/))
 
 ### Dependencies
 - Bump `com.google.cloud:google-cloud-core-http` from 2.23.0 to 2.47.0 ([#16504](https://github.com/opensearch-project/OpenSearch/pull/16504))
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/92_flat_object_support_doc_values.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/92_flat_object_support_doc_values.yml
index c840276ee1157..266b41c6b5a77 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/92_flat_object_support_doc_values.yml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/92_flat_object_support_doc_values.yml
@@ -45,6 +45,8 @@ setup:
           {"order":"order7","issue":{"labels":{"number":7,"name":"abc7","status":1}}}
           {"index":{"_index":"flat_object_doc_values_test","_id":"8"}}
           {"order":"order8","issue":{"labels":{"number":8,"name":"abc8","status":1}}}
+          {"index":{"_index":"flat_object_doc_values_test","_id":"9"}}
+          {"order":"order9","issue":{"labels":{"number":9,"name":"abC8","status":1}}}
 
 ---
 # Delete Index when connection is teardown
@@ -68,7 +70,53 @@ teardown:
           }
         }
 
-  - length: { hits.hits: 9 }
+  - length: { hits.hits: 10 }
+
+  # Case Insensitive Term Query with exact dot path.
+  - do:
+      search:
+        body: {
+          _source: true,
+          query: {
+            bool: {
+              must: [
+                {
+                  term: {
+                    issue.labels.name: {
+                      value: "abc8",
+                      case_insensitive: "true"
+                    }
+                  }
+                }
+              ]
+            }
+          }
+        }
+
+  - length: { hits.hits: 2 }
+
+  # Case Insensitive Term Query with no path.
+  - do:
+      search:
+        body: {
+          _source: true,
+          query: {
+            bool: {
+              must: [
+                {
+                  term: {
+                    issue.labels: {
+                      value: "abc8",
+                      case_insensitive: "true"
+                    }
+                  }
+                }
+              ]
+            }
+          }
+        }
+
+  - length: { hits.hits: 2 }
 
   # Term Query with exact dot path.
   - do:
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_doc_values_field.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_doc_values_field.yml
index 647aaf2c9088b..53ed730925595 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_doc_values_field.yml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_doc_values_field.yml
@@ -1121,8 +1121,8 @@
 "search on fields with only doc_values enabled":
   - skip:
       features: [ "headers" ]
-      version: " - 2.18.99"
-      reason: "searching with only doc_values was finally added in 2.19.0"
+      version: " - 2.99.99"
+      reason: "searching with only doc_values was finally added in 3.0.0"
   - do:
       indices.create:
         index: test-doc-values
@@ -1193,6 +1193,37 @@
           - '{ "some_keyword": "400", "byte": 121, "double": 101.0, "float": "801.0", "half_float": "401.0", "integer": 1291, "long": 13457, "short": 151, "unsigned_long": 10223372036854775801, "ip_field": "192.168.0.2", "boolean": true, "date_nanos": "2020-10-29T12:12:12.123456789Z", "date": "2020-10-29T12:12:12.987Z" }'
           - '{ "index": { "_index": "test-doc-values", "_id": "3" } }'
           - '{ "some_keyword": "5", "byte": 122, "double": 102.0, "float": "802.0", "half_float": "402.0", "integer": 1292, "long": 13458, "short": 152, "unsigned_long": 10223372036854775802, "ip_field": "192.168.0.3", "boolean": false, "date_nanos": "2024-10-29T12:12:12.123456789Z", "date": "2024-10-29T12:12:12.987Z" }'
+          - '{ "index": { "_index": "test-doc-values", "_id": "4" } }'
+          - '{ "some_keyword": "Keyword1" }'
+          - '{ "index": { "_index": "test-doc-values", "_id": "5" } }'
+          - '{ "some_keyword": "keyword1" }'
+
+  - do:
+      search:
+        rest_total_hits_as_int: true
+        index: test-doc-values
+        body:
+          query:
+            term: {
+              "some_keyword": {
+                "value": "Keyword1"
+              } }
+
+  - match: { hits.total: 1 }
+
+  - do:
+      search:
+        rest_total_hits_as_int: true
+        index: test-doc-values
+        body:
+          query:
+            term: {
+              "some_keyword": {
+                "value": "keyword1",
+                "case_insensitive": "true"
+              } }
+
+  - match: { hits.total: 2 }
 
   - do:
       search:
diff --git a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java
index 13063a4761006..4fe821ff74d34 100644
--- a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java
+++ b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java
@@ -15,7 +15,6 @@
 import org.apache.lucene.document.SortedSetDocValuesField;
 import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.search.BoostQuery;
 import org.apache.lucene.search.FieldExistsQuery;
 import org.apache.lucene.search.MultiTermQuery;
 import org.apache.lucene.search.Query;
@@ -364,23 +363,17 @@ private KeywordFieldType valueFieldType() {
             return (mappedFieldTypeName == null) ? valueFieldType : valueAndPathFieldType;
         }
 
+        @Override
+        public Query termQueryCaseInsensitive(Object value, QueryShardContext context) {
+            return valueFieldType().termQueryCaseInsensitive(rewriteValue(inputToString(value)), context);
+        }
+
         /**
          * redirect queries with rewrite value to rewriteSearchValue and directSubFieldName
          */
         @Override
         public Query termQuery(Object value, @Nullable QueryShardContext context) {
-
-            String searchValueString = inputToString(value);
-            String directSubFieldName = directSubfield();
-            String rewriteSearchValue = rewriteValue(searchValueString);
-
-            failIfNotIndexed();
-            Query query;
-            query = new TermQuery(new Term(directSubFieldName, indexedValueForSearch(rewriteSearchValue)));
-            if (boost() != 1f) {
-                query = new BoostQuery(query, boost());
-            }
-            return query;
+            return valueFieldType().termQuery(rewriteValue(inputToString(value)), context);
         }
 
         @Override
diff --git a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java
index 90e43c818e137..4436e74c821c3 100644
--- a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java
+++ b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java
@@ -39,6 +39,7 @@
 import org.apache.lucene.document.SortedSetDocValuesField;
 import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.Term;
+import org.apache.lucene.search.BoostQuery;
 import org.apache.lucene.search.FuzzyQuery;
 import org.apache.lucene.search.IndexOrDocValuesQuery;
 import org.apache.lucene.search.MultiTermQuery;
@@ -398,6 +399,46 @@ protected Object rewriteForDocValue(Object value) {
             return value;
         }
 
+        @Override
+        public Query termQueryCaseInsensitive(Object value, QueryShardContext context) {
+            failIfNotIndexedAndNoDocValues();
+            if (isSearchable()) {
+                return super.termQueryCaseInsensitive(value, context);
+            } else {
+                BytesRef bytesRef = indexedValueForSearch(rewriteForDocValue(value));
+                Term term = new Term(name(), bytesRef);
+                Query query = AutomatonQueries.createAutomatonQuery(
+                    term,
+                    AutomatonQueries.toCaseInsensitiveString(bytesRef.utf8ToString(), Operations.DEFAULT_DETERMINIZE_WORK_LIMIT),
+                    MultiTermQuery.DOC_VALUES_REWRITE
+                );
+                if (boost() != 1f) {
+                    query = new BoostQuery(query, boost());
+                }
+                return query;
+            }
+        }
+
+        @Override
+        public Query termQuery(Object value, QueryShardContext context) {
+            failIfNotIndexedAndNoDocValues();
+            if (isSearchable()) {
+                return super.termQuery(value, context);
+            } else {
+                Query query = SortedSetDocValuesField.newSlowRangeQuery(
+                    name(),
+                    indexedValueForSearch(rewriteForDocValue(value)),
+                    indexedValueForSearch(rewriteForDocValue(value)),
+                    true,
+                    true
+                );
+                if (boost() != 1f) {
+                    query = new BoostQuery(query, boost());
+                }
+                return query;
+            }
+        }
+
         @Override
         public Query termsQuery(List<?> values, QueryShardContext context) {
             failIfNotIndexedAndNoDocValues();
diff --git a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldTypeTests.java
index 38a6f13777f00..4160108342534 100644
--- a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldTypeTests.java
+++ b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldTypeTests.java
@@ -9,6 +9,7 @@
 package org.opensearch.index.mapper;
 
 import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.SortedSetDocValuesField;
 import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.FieldExistsQuery;
@@ -24,6 +25,7 @@
 import org.apache.lucene.search.WildcardQuery;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.automaton.Operations;
+import org.opensearch.common.lucene.search.AutomatonQueries;
 import org.opensearch.common.unit.Fuzziness;
 import org.opensearch.index.analysis.AnalyzerScope;
 import org.opensearch.index.analysis.NamedAnalyzer;
@@ -138,39 +140,273 @@ public void testRewriteValue() {
         assertEquals("field.bar=foo", searchValuesDocPath);
     }
 
-    public void testTermQuery() {
+    public void testTermQueryCaseInsensitive() {
 
-        FlatObjectFieldMapper.FlatObjectFieldType flatParentFieldType = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType(
+        // 1.test isSearchable=true, hasDocValues=true, mappedFieldTypeName=null
+        {
+            FlatObjectFieldMapper.FlatObjectFieldType flatParentFieldType =
+                (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType("field", null, true, true);
+
+            MappedFieldType dynamicMappedFieldType = new FlatObjectFieldMapper.FlatObjectFieldType(
+                "field.bar",
+                flatParentFieldType.name(),
+                flatParentFieldType.getValueFieldType(),
+                flatParentFieldType.getValueAndPathFieldType()
+            );
+            assertEquals(
+                AutomatonQueries.caseInsensitiveTermQuery(new Term("field._valueAndPath", "field.bar=fOo")),
+                dynamicMappedFieldType.termQueryCaseInsensitive("fOo", null)
+            );
+        }
+
+        // 2.test isSearchable=true, hasDocValues=false, mappedFieldTypeName=null
+        {
+            FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType(
+                "field",
+                null,
+                true,
+                false
+            );
+            assertEquals(
+                AutomatonQueries.caseInsensitiveTermQuery(new Term("field._value", "fOo")),
+                ft.termQueryCaseInsensitive("fOo", null)
+            );
+        }
+
+        // test isSearchable=true, hasDocValues=false, mappedFieldTypeName!=null
+        {
+            FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType(
+                "field",
+                "field",
+                true,
+                false
+            );
+            Query expected = new TermQuery(new Term("field" + VALUE_AND_PATH_SUFFIX, new BytesRef("fOo")));
+
+            assertEquals(expected, ft.termQuery("fOo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES));
+        }
+
+        // 3.test isSearchable=false, hasDocValues=true, mappedFieldTypeName=null
+        {
+            FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType(
+                "field",
+                null,
+                false,
+                true
+            );
+            Query expected = AutomatonQueries.createAutomatonQuery(
+                new Term("field" + VALUE_SUFFIX, "field.fOo"),
+                AutomatonQueries.toCaseInsensitiveString("field.fOo", Integer.MAX_VALUE),
+                MultiTermQuery.DOC_VALUES_REWRITE
+            );
+            assertEquals(expected, ft.termQueryCaseInsensitive("fOo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES));
+        }
+
+        // test isSearchable=false, hasDocValues=true, mappedFieldTypeName!=null
+        {
+            FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType(
+                "field",
+                "field",
+                false,
+                true
+            );
+            Query expected = AutomatonQueries.createAutomatonQuery(
+                new Term("field" + VALUE_AND_PATH_SUFFIX, "field.fOo"),
+                AutomatonQueries.toCaseInsensitiveString("field.fOo", Integer.MAX_VALUE),
+                MultiTermQuery.DOC_VALUES_REWRITE
+            );
+
+            assertEquals(expected, ft.termQueryCaseInsensitive("fOo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES));
+        }
+
+        // 4.test isSearchable=false, hasDocValues=false, mappedFieldTypeName=null
+        {
+            FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType(
+                "field",
+                null,
+                false,
+                false
+            );
+            IllegalArgumentException e = expectThrows(
+                IllegalArgumentException.class,
+                () -> ft.termQueryCaseInsensitive("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)
+            );
+            assertEquals(
+                "Cannot search on field [field._value] since it is both not indexed, and does not have doc_values " + "enabled.",
+                e.getMessage()
+            );
+        }
+
+        // test isSearchable=false, hasDocValues=false, mappedFieldTypeName!=null
+        {
+            FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType(
+                "field",
+                "field",
+                false,
+                false
+            );
+            IllegalArgumentException e = expectThrows(
+                IllegalArgumentException.class,
+                () -> ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)
+            );
+            assertEquals(
+                "Cannot search on field [field._valueAndPath] since it is both not indexed, and does not have doc_values " + "enabled.",
+                e.getMessage()
+            );
+        }
+
+        MappedFieldType unsearchable = new FlatObjectFieldMapper.FlatObjectFieldType(
             "field",
             null,
-            true,
-            true
+            false,
+            false,
+            null,
+            Collections.emptyMap()
+        );
+        IllegalArgumentException e = expectThrows(
+            IllegalArgumentException.class,
+            () -> unsearchable.termQuery("bar", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)
         );
+        assertEquals(
+            "Cannot search on field [field._value] since it is both not indexed, and does not have doc_values enabled.",
+            e.getMessage()
+        );
+    }
 
-        // when searching for "foo" in "field", the term query is directed to search "foo" in field._value field
-        String searchFieldName = (flatParentFieldType).directSubfield();
-        String searchValues = (flatParentFieldType).rewriteValue("foo");
-        assertEquals("foo", searchValues);
-        assertEquals(new TermQuery(new Term(searchFieldName, searchValues)), flatParentFieldType.termQuery(searchValues, null));
+    public void testTermQuery() {
 
-        MappedFieldType dynamicMappedFieldType = new FlatObjectFieldMapper.FlatObjectFieldType(
-            "field.bar",
-            flatParentFieldType.name(),
-            flatParentFieldType.getValueFieldType(),
-            flatParentFieldType.getValueAndPathFieldType()
-        );
+        // 1.test isSearchable=true, hasDocValues=true, mappedFieldTypeName=null
+        {
+            FlatObjectFieldMapper.FlatObjectFieldType flatParentFieldType =
+                (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType("field", null, true, true);
 
-        // when searching for "foo" in "field.bar", the term query is directed to search in field._valueAndPath field
-        String searchFieldNameDocPath = ((FlatObjectFieldMapper.FlatObjectFieldType) dynamicMappedFieldType).directSubfield();
-        String searchValuesDocPath = ((FlatObjectFieldMapper.FlatObjectFieldType) dynamicMappedFieldType).rewriteValue("foo");
-        assertEquals("field.bar=foo", searchValuesDocPath);
-        assertEquals(new TermQuery(new Term(searchFieldNameDocPath, searchValuesDocPath)), dynamicMappedFieldType.termQuery("foo", null));
+            // when searching for "foo" in "field", the term query is directed to search "foo" in field._value field
+            String searchFieldName = (flatParentFieldType).directSubfield();
+            String searchValues = (flatParentFieldType).rewriteValue("foo");
+            assertEquals("foo", searchValues);
+            assertEquals(new TermQuery(new Term(searchFieldName, searchValues)), flatParentFieldType.termQuery(searchValues, null));
+
+            MappedFieldType dynamicMappedFieldType = new FlatObjectFieldMapper.FlatObjectFieldType(
+                "field.bar",
+                flatParentFieldType.name(),
+                flatParentFieldType.getValueFieldType(),
+                flatParentFieldType.getValueAndPathFieldType()
+            );
+
+            // when searching for "foo" in "field.bar", the term query is directed to search in field._valueAndPath field
+            String searchFieldNameDocPath = ((FlatObjectFieldMapper.FlatObjectFieldType) dynamicMappedFieldType).directSubfield();
+            String searchValuesDocPath = ((FlatObjectFieldMapper.FlatObjectFieldType) dynamicMappedFieldType).rewriteValue("foo");
+            assertEquals("field.bar=foo", searchValuesDocPath);
+            assertEquals(
+                new TermQuery(new Term(searchFieldNameDocPath, searchValuesDocPath)),
+                dynamicMappedFieldType.termQuery("foo", null)
+            );
+
+        }
+
+        // 2.test isSearchable=true, hasDocValues=false, mappedFieldTypeName=null
+        {
+            FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType(
+                "field",
+                null,
+                true,
+                false
+            );
+            Query expected = new TermQuery(new Term("field" + VALUE_SUFFIX, new BytesRef("foo")));
+            assertEquals(expected, ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES));
+        }
+
+        // test isSearchable=true, hasDocValues=false, mappedFieldTypeName!=null
+        {
+            FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType(
+                "field",
+                "field",
+                true,
+                false
+            );
+            Query expected = new TermQuery(new Term("field" + VALUE_AND_PATH_SUFFIX, new BytesRef("foo")));
+
+            assertEquals(expected, ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES));
+        }
+
+        // 3.test isSearchable=false, hasDocValues=true, mappedFieldTypeName=null
+        {
+            FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType(
+                "field",
+                null,
+                false,
+                true
+            );
+            Query expected = SortedSetDocValuesField.newSlowRangeQuery(
+                "field" + VALUE_SUFFIX,
+                new BytesRef("field.foo"),
+                new BytesRef("field.foo"),
+                true,
+                true
+            );
+            assertEquals(expected, ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES));
+
+        }
+
+        // test isSearchable=false, hasDocValues=true, mappedFieldTypeName!=null
+        {
+            FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType(
+                "field",
+                "field",
+                false,
+                true
+            );
+            Query expected = SortedSetDocValuesField.newSlowRangeQuery(
+                "field" + VALUE_AND_PATH_SUFFIX,
+                new BytesRef("field.foo"),
+                new BytesRef("field.foo"),
+                true,
+                true
+            );
+            assertEquals(expected, ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES));
+        }
+
+        // 4.test isSearchable=false, hasDocValues=false, mappedFieldTypeName=null
+        {
+            FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType(
+                "field",
+                null,
+                false,
+                false
+            );
+            IllegalArgumentException e = expectThrows(
+                IllegalArgumentException.class,
+                () -> ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)
+            );
+            assertEquals(
+                "Cannot search on field [field._value] since it is both not indexed, and does not have doc_values " + "enabled.",
+                e.getMessage()
+            );
+        }
+
+        // test isSearchable=false, hasDocValues=false, mappedFieldTypeName!=null
+        {
+            FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType(
+                "field",
+                "field",
+                false,
+                false
+            );
+            IllegalArgumentException e = expectThrows(
+                IllegalArgumentException.class,
+                () -> ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)
+            );
+            assertEquals(
+                "Cannot search on field [field._valueAndPath] since it is both not indexed, and does not have doc_values " + "enabled.",
+                e.getMessage()
+            );
+        }
 
         MappedFieldType unsearchable = new FlatObjectFieldMapper.FlatObjectFieldType(
             "field",
             null,
             false,
-            true,
+            false,
             null,
             Collections.emptyMap()
         );
@@ -178,7 +414,10 @@ public void testTermQuery() {
             IllegalArgumentException.class,
             () -> unsearchable.termQuery("bar", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)
         );
-        assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage());
+        assertEquals(
+            "Cannot search on field [field._value] since it is both not indexed, and does not have doc_values enabled.",
+            e.getMessage()
+        );
     }
 
     public void testExistsQuery() {
diff --git a/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java
index f291b864beb59..d52426c67d256 100644
--- a/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java
+++ b/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java
@@ -41,6 +41,7 @@
 import org.apache.lucene.analysis.core.WhitespaceTokenizer;
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
 import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.SortedSetDocValuesField;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.DocValuesFieldExistsQuery;
 import org.apache.lucene.search.FuzzyQuery;
@@ -60,6 +61,7 @@
 import org.opensearch.cluster.metadata.IndexMetadata;
 import org.opensearch.common.lucene.BytesRefs;
 import org.opensearch.common.lucene.Lucene;
+import org.opensearch.common.lucene.search.AutomatonQueries;
 import org.opensearch.common.settings.Settings;
 import org.opensearch.common.unit.Fuzziness;
 import org.opensearch.index.analysis.AnalyzerScope;
@@ -100,13 +102,52 @@ public void testIsFieldWithinQuery() throws IOException {
         );
     }
 
+    public void testTermQueryCaseInsensitive() {
+        MappedFieldType ft = new KeywordFieldType("field");
+        Query expected = AutomatonQueries.caseInsensitiveTermQuery(new Term("field", BytesRefs.toBytesRef("foo")));
+        assertEquals(expected, ft.termQueryCaseInsensitive("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES));
+
+        ft = new KeywordFieldType("field", true, false, Collections.emptyMap());
+        assertEquals(expected, ft.termQueryCaseInsensitive("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES));
+
+        ft = new KeywordFieldType("field", false, true, Collections.emptyMap());
+        Term term = new Term("field", "foo");
+
+        expected = AutomatonQueries.createAutomatonQuery(
+            term,
+            AutomatonQueries.toCaseInsensitiveString("foo", Integer.MAX_VALUE),
+            MultiTermQuery.DOC_VALUES_REWRITE
+        );
+        assertEquals(expected, ft.termQueryCaseInsensitive("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES));
+
+        MappedFieldType unsearchable = new KeywordFieldType("field", false, false, Collections.emptyMap());
+        IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> unsearchable.termQueryCaseInsensitive("foo", null));
+        assertEquals(
+            "Cannot search on field [field] since it is both not indexed, and does not have doc_values " + "enabled.",
+            e.getMessage()
+        );
+    }
+
     public void testTermQuery() {
         MappedFieldType ft = new KeywordFieldType("field");
-        assertEquals(new TermQuery(new Term("field", "foo")), ft.termQuery("foo", null));
+        assertEquals(new TermQuery(new Term("field", "foo")), ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES));
+
+        ft = new KeywordFieldType("field", true, false, Collections.emptyMap());
+        assertEquals(new TermQuery(new Term("field", "foo")), ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES));
 
-        MappedFieldType unsearchable = new KeywordFieldType("field", false, true, Collections.emptyMap());
-        IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> unsearchable.termQuery("bar", null));
-        assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage());
+        ft = new KeywordFieldType("field", false, true, Collections.emptyMap());
+        Query expected = SortedSetDocValuesField.newSlowRangeQuery("field", new BytesRef("foo"), new BytesRef("foo"), true, true);
+        assertEquals(expected, ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES));
+
+        MappedFieldType unsearchable = new KeywordFieldType("field", false, false, Collections.emptyMap());
+        IllegalArgumentException e = expectThrows(
+            IllegalArgumentException.class,
+            () -> unsearchable.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)
+        );
+        assertEquals(
+            "Cannot search on field [field] since it is both not indexed, and does not have doc_values " + "enabled.",
+            e.getMessage()
+        );
     }
 
     public void testTermQueryWithNormalizer() {

From bbcbd216200c2d71e32f4fe062c6e040d907659d Mon Sep 17 00:00:00 2001
From: panguixin <panguixin@bytedance.com>
Date: Fri, 10 Jan 2025 09:53:11 +0800
Subject: [PATCH 32/61] use the correct type to widen the sort fields when
 merging top docs (#16881)

* use the correct type to widen the sort fields when merging top docs

Signed-off-by: panguixin <panguixin@bytedance.com>

* fix

Signed-off-by: panguixin <panguixin@bytedance.com>

* apply commments

Signed-off-by: panguixin <panguixin@bytedance.com>

* changelog

Signed-off-by: panguixin <panguixin@bytedance.com>

* add more tests

Signed-off-by: panguixin <panguixin@bytedance.com>

---------

Signed-off-by: panguixin <panguixin@bytedance.com>
---
 CHANGELOG.md                                  |  1 +
 .../opensearch/search/sort/FieldSortIT.java   | 99 +++++++++++++++++++
 .../action/search/SearchPhaseController.java  | 54 ++++++----
 .../sort/SortedWiderNumericSortField.java     | 21 +++-
 4 files changed, 152 insertions(+), 23 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 0cb11d1c45d38..9aabbbf75f00c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -66,6 +66,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 - Indexed IP field supports `terms_query` with more than 1025 IP masks [#16391](https://github.com/opensearch-project/OpenSearch/pull/16391)
 - Make entries for dependencies from server/build.gradle to gradle version catalog ([#16707](https://github.com/opensearch-project/OpenSearch/pull/16707))
 - Allow extended plugins to be optional ([#16909](https://github.com/opensearch-project/OpenSearch/pull/16909))
+- Use the correct type to widen the sort fields when merging top docs ([#16881](https://github.com/opensearch-project/OpenSearch/pull/16881))
 
 ### Deprecated
 - Performing update operation with default pipeline or final pipeline is deprecated ([#16712](https://github.com/opensearch-project/OpenSearch/pull/16712))
diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java
index fdb12639c65be..cc837019d0b42 100644
--- a/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java
@@ -49,6 +49,7 @@
 import org.opensearch.common.Numbers;
 import org.opensearch.common.settings.Settings;
 import org.opensearch.common.xcontent.XContentFactory;
+import org.opensearch.common.xcontent.XContentType;
 import org.opensearch.core.rest.RestStatus;
 import org.opensearch.core.xcontent.MediaTypeRegistry;
 import org.opensearch.core.xcontent.XContentBuilder;
@@ -63,6 +64,7 @@
 import org.opensearch.search.SearchHit;
 import org.opensearch.search.SearchHits;
 import org.opensearch.test.InternalSettingsPlugin;
+import org.opensearch.test.OpenSearchTestCase;
 import org.opensearch.test.ParameterizedDynamicSettingsOpenSearchIntegTestCase;
 import org.hamcrest.Matchers;
 
@@ -82,7 +84,9 @@
 import java.util.Set;
 import java.util.TreeMap;
 import java.util.concurrent.ExecutionException;
+import java.util.concurrent.atomic.AtomicInteger;
 import java.util.function.Function;
+import java.util.function.Supplier;
 
 import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder;
 import static org.opensearch.index.query.QueryBuilders.functionScoreQuery;
@@ -2609,4 +2613,99 @@ public void testSimpleSortsPoints() throws Exception {
 
         assertThat(searchResponse.toString(), not(containsString("error")));
     }
+
+    public void testSortMixedIntegerNumericFields() throws Exception {
+        internalCluster().ensureAtLeastNumDataNodes(3);
+        AtomicInteger counter = new AtomicInteger();
+        index("long", () -> Long.MAX_VALUE - counter.getAndIncrement());
+        index("integer", () -> Integer.MAX_VALUE - counter.getAndIncrement());
+        SearchResponse searchResponse = client().prepareSearch("long", "integer")
+            .setQuery(matchAllQuery())
+            .setSize(10)
+            .addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC).sortMode(SortMode.MAX))
+            .get();
+        assertNoFailures(searchResponse);
+        long[] sortValues = new long[10];
+        for (int i = 0; i < 10; i++) {
+            sortValues[i] = ((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).longValue();
+        }
+        for (int i = 1; i < 10; i++) {
+            assertThat(Arrays.toString(sortValues), sortValues[i - 1], lessThan(sortValues[i]));
+        }
+    }
+
+    public void testSortMixedFloatingNumericFields() throws Exception {
+        internalCluster().ensureAtLeastNumDataNodes(3);
+        AtomicInteger counter = new AtomicInteger();
+        index("double", () -> 100.5 - counter.getAndIncrement());
+        counter.set(0);
+        index("float", () -> 200.5 - counter.getAndIncrement());
+        counter.set(0);
+        index("half_float", () -> 300.5 - counter.getAndIncrement());
+        SearchResponse searchResponse = client().prepareSearch("double", "float", "half_float")
+            .setQuery(matchAllQuery())
+            .setSize(15)
+            .addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC).sortMode(SortMode.MAX))
+            .get();
+        assertNoFailures(searchResponse);
+        double[] sortValues = new double[15];
+        for (int i = 0; i < 15; i++) {
+            sortValues[i] = ((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).doubleValue();
+        }
+        for (int i = 1; i < 15; i++) {
+            assertThat(Arrays.toString(sortValues), sortValues[i - 1], lessThan(sortValues[i]));
+        }
+    }
+
+    public void testSortMixedFloatingAndIntegerNumericFields() throws Exception {
+        internalCluster().ensureAtLeastNumDataNodes(3);
+        index("long", () -> randomLongBetween(0, (long) 2E53 - 1));
+        index("integer", OpenSearchTestCase::randomInt);
+        index("double", OpenSearchTestCase::randomDouble);
+        index("float", () -> randomFloat());
+        boolean asc = randomBoolean();
+        SearchResponse searchResponse = client().prepareSearch("long", "integer", "double", "float")
+            .setQuery(matchAllQuery())
+            .setSize(20)
+            .addSort(SortBuilders.fieldSort("field").order(asc ? SortOrder.ASC : SortOrder.DESC).sortMode(SortMode.MAX))
+            .get();
+        assertNoFailures(searchResponse);
+        double[] sortValues = new double[20];
+        for (int i = 0; i < 20; i++) {
+            sortValues[i] = ((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).doubleValue();
+        }
+        if (asc) {
+            for (int i = 1; i < 20; i++) {
+                assertThat(Arrays.toString(sortValues), sortValues[i - 1], lessThanOrEqualTo(sortValues[i]));
+            }
+        } else {
+            for (int i = 1; i < 20; i++) {
+                assertThat(Arrays.toString(sortValues), sortValues[i - 1], greaterThanOrEqualTo(sortValues[i]));
+            }
+        }
+    }
+
+    private void index(String type, Supplier<Number> valueSupplier) throws Exception {
+        assertAcked(
+            prepareCreate(type).setMapping(
+                XContentFactory.jsonBuilder()
+                    .startObject()
+                    .startObject("properties")
+                    .startObject("field")
+                    .field("type", type)
+                    .endObject()
+                    .endObject()
+                    .endObject()
+            ).setSettings(Settings.builder().put("index.number_of_shards", 3).put("index.number_of_replicas", 0))
+        );
+        ensureGreen(type);
+        for (int i = 0; i < 5; i++) {
+            client().prepareIndex(type)
+                .setId(Integer.toString(i))
+                .setSource("{\"field\" : " + valueSupplier.get() + " }", XContentType.JSON)
+                .get();
+        }
+        client().admin().indices().prepareRefresh(type).get();
+    }
+
 }
diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java b/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java
index 161a103cdf36a..d63695447e365 100644
--- a/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java
+++ b/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java
@@ -48,6 +48,7 @@
 import org.opensearch.common.lucene.search.TopDocsAndMaxScore;
 import org.opensearch.core.common.breaker.CircuitBreaker;
 import org.opensearch.core.common.io.stream.NamedWriteableRegistry;
+import org.opensearch.index.fielddata.IndexFieldData;
 import org.opensearch.search.DocValueFormat;
 import org.opensearch.search.SearchHit;
 import org.opensearch.search.SearchHits;
@@ -604,36 +605,51 @@ private static void validateMergeSortValueFormats(Collection<? extends SearchPha
      * support sort optimization, we removed type widening there and taking care here during merging.
      * More details here https://github.com/opensearch-project/OpenSearch/issues/6326
      */
+    // TODO: should we check the compatibility between types
     private static Sort createSort(TopFieldDocs[] topFieldDocs) {
         final SortField[] firstTopDocFields = topFieldDocs[0].fields;
         final SortField[] newFields = new SortField[firstTopDocFields.length];
+        for (int fieldIndex = 0; fieldIndex < firstTopDocFields.length; fieldIndex++) {
+            SortField.Type firstType = getSortType(firstTopDocFields[fieldIndex]);
+            newFields[fieldIndex] = firstTopDocFields[fieldIndex];
+            if (SortedWiderNumericSortField.isTypeSupported(firstType) == false) {
+                continue;
+            }
 
-        for (int i = 0; i < firstTopDocFields.length; i++) {
-            final SortField delegate = firstTopDocFields[i];
-            final SortField.Type type = delegate instanceof SortedNumericSortField
-                ? ((SortedNumericSortField) delegate).getNumericType()
-                : delegate.getType();
+            boolean requireWiden = false;
+            boolean isFloat = firstType == SortField.Type.FLOAT || firstType == SortField.Type.DOUBLE;
+            for (int shardIndex = 1; shardIndex < topFieldDocs.length; shardIndex++) {
+                final SortField sortField = topFieldDocs[shardIndex].fields[fieldIndex];
+                SortField.Type sortType = getSortType(sortField);
+                if (SortedWiderNumericSortField.isTypeSupported(sortType) == false) {
+                    // throw exception if sortType is not CUSTOM?
+                    // skip this shard or do not widen?
+                    requireWiden = false;
+                    break;
+                }
+                requireWiden = requireWiden || sortType != firstType;
+                isFloat = isFloat || sortType == SortField.Type.FLOAT || sortType == SortField.Type.DOUBLE;
+            }
 
-            if (SortedWiderNumericSortField.isTypeSupported(type) && isSortWideningRequired(topFieldDocs, i)) {
-                newFields[i] = new SortedWiderNumericSortField(delegate.getField(), type, delegate.getReverse());
-            } else {
-                newFields[i] = firstTopDocFields[i];
+            if (requireWiden) {
+                newFields[fieldIndex] = new SortedWiderNumericSortField(
+                    firstTopDocFields[fieldIndex].getField(),
+                    isFloat ? SortField.Type.DOUBLE : SortField.Type.LONG,
+                    firstTopDocFields[fieldIndex].getReverse()
+                );
             }
         }
         return new Sort(newFields);
     }
 
-    /**
-     * It will compare respective SortField between shards to see if any shard results have different
-     * field mapping type, accordingly it will decide to widen the sort fields.
-     */
-    private static boolean isSortWideningRequired(TopFieldDocs[] topFieldDocs, int sortFieldindex) {
-        for (int i = 0; i < topFieldDocs.length - 1; i++) {
-            if (!topFieldDocs[i].fields[sortFieldindex].equals(topFieldDocs[i + 1].fields[sortFieldindex])) {
-                return true;
-            }
+    private static SortField.Type getSortType(SortField sortField) {
+        if (sortField.getComparatorSource() instanceof IndexFieldData.XFieldComparatorSource) {
+            return ((IndexFieldData.XFieldComparatorSource) sortField.getComparatorSource()).reducedType();
+        } else {
+            return sortField instanceof SortedNumericSortField
+                ? ((SortedNumericSortField) sortField).getNumericType()
+                : sortField.getType();
         }
-        return false;
     }
 
     /*
diff --git a/server/src/main/java/org/opensearch/search/sort/SortedWiderNumericSortField.java b/server/src/main/java/org/opensearch/search/sort/SortedWiderNumericSortField.java
index 10cc832fdb684..7f61b7cca3501 100644
--- a/server/src/main/java/org/opensearch/search/sort/SortedWiderNumericSortField.java
+++ b/server/src/main/java/org/opensearch/search/sort/SortedWiderNumericSortField.java
@@ -21,14 +21,18 @@
 import org.apache.lucene.search.comparators.NumericComparator;
 
 import java.io.IOException;
+import java.util.Comparator;
 
 /**
- * Sorted numeric field for wider sort types,
- * to help sorting two different numeric types.
+ * Sorted numeric field for wider sort types, to help sorting two different numeric types.
+ * NOTE: the unsigned_long is not supported by widening sort since the unsigned_long could not be used with other types
  *
  * @opensearch.internal
  */
 public class SortedWiderNumericSortField extends SortedNumericSortField {
+    private final int byteCounts;
+    private final Comparator<Number> comparator;
+
     /**
      * Creates a sort, possibly in reverse, specifying how the sort value from the document's set is
      * selected.
@@ -39,6 +43,15 @@ public class SortedWiderNumericSortField extends SortedNumericSortField {
      */
     public SortedWiderNumericSortField(String field, Type type, boolean reverse) {
         super(field, type, reverse);
+        if (type == Type.LONG) {
+            byteCounts = Long.BYTES;
+            comparator = Comparator.comparingLong(Number::longValue);
+        } else if (type == Type.DOUBLE) {
+            byteCounts = Double.BYTES;
+            comparator = Comparator.comparingDouble(Number::doubleValue);
+        } else {
+            throw new IllegalArgumentException("Unsupported numeric type: " + type);
+        }
     }
 
     /**
@@ -51,7 +64,7 @@ public SortedWiderNumericSortField(String field, Type type, boolean reverse) {
      */
     @Override
     public FieldComparator<?> getComparator(int numHits, Pruning pruning) {
-        return new NumericComparator<Number>(getField(), (Number) getMissingValue(), getReverse(), pruning, Double.BYTES) {
+        return new NumericComparator<Number>(getField(), (Number) getMissingValue(), getReverse(), pruning, byteCounts) {
             @Override
             public int compare(int slot1, int slot2) {
                 throw new UnsupportedOperationException();
@@ -78,7 +91,7 @@ public int compareValues(Number first, Number second) {
                 } else if (second == null) {
                     return 1;
                 } else {
-                    return Double.compare(first.doubleValue(), second.doubleValue());
+                    return comparator.compare(first, second);
                 }
             }
         };

From f6dc4a691d4bdf9b2d85e84c14a9c5c2e61e2460 Mon Sep 17 00:00:00 2001
From: panguixin <panguixin@bytedance.com>
Date: Fri, 10 Jan 2025 20:43:56 +0800
Subject: [PATCH 33/61] Fix multi-value sort for unsigned long (#16732)

* Fix multi-value sort for unsigned long

Signed-off-by: panguixin <panguixin@bytedance.com>

* Add initial rest-api-spec tests

Signed-off-by: Andriy Redko <andriy.redko@aiven.io>

* add more rest tests

Signed-off-by: panguixin <panguixin@bytedance.com>

* fix

Signed-off-by: panguixin <panguixin@bytedance.com>

* fix

Signed-off-by: panguixin <panguixin@bytedance.com>

* Extend MultiValueMode with dedicated support of unsigned_long doc values

Signed-off-by: Andriy Redko <andriy.redko@aiven.io>

* Add CHANGELOG.md, minor cleanups

Signed-off-by: Andriy Redko <andriy.redko@aiven.io>

* Correct the license headers

Signed-off-by: Andriy Redko <andriy.redko@aiven.io>

* Correct the @PublicApi version

Signed-off-by: Andriy Redko <andriy.redko@aiven.io>

* Replace SingletonSortedNumericUnsignedLongValues with LongToSortedNumericUnsignedLongValues (as per review comments)

Signed-off-by: Andriy Redko <andriy.redko@aiven.io>

---------

Signed-off-by: panguixin <panguixin@bytedance.com>
Signed-off-by: Andriy Redko <andriy.redko@aiven.io>
Co-authored-by: Andriy Redko <andriy.redko@aiven.io>
---
 CHANGELOG.md                                  |   1 +
 .../test/search/260_sort_double.yml           | 136 +++++++
 .../test/search/260_sort_long.yml             | 137 +++++++
 .../test/search/260_sort_unsigned_long.yml    | 167 +++++++++
 ...LongToSortedNumericUnsignedLongValues.java |  55 +++
 .../SortedNumericUnsignedLongValues.java      |  62 ++++
 .../UnsignedLongValuesComparatorSource.java   |  10 +-
 .../org/opensearch/search/MultiValueMode.java | 333 ++++++++++++++++++
 .../search/MultiValueModeTests.java           | 230 ++++++++++++
 9 files changed, 1126 insertions(+), 5 deletions(-)
 create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_double.yml
 create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_long.yml
 create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_unsigned_long.yml
 create mode 100644 server/src/main/java/org/opensearch/index/fielddata/LongToSortedNumericUnsignedLongValues.java
 create mode 100644 server/src/main/java/org/opensearch/index/fielddata/SortedNumericUnsignedLongValues.java

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 9aabbbf75f00c..512ba48941c87 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -91,6 +91,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 - Fix remote shards balance ([#15335](https://github.com/opensearch-project/OpenSearch/pull/15335))
 - Always use `constant_score` query for `match_only_text` field ([#16964](https://github.com/opensearch-project/OpenSearch/pull/16964))
 - Fix Shallow copy snapshot failures on closed index ([#16868](https://github.com/opensearch-project/OpenSearch/pull/16868))
+- Fix multi-value sort for unsigned long ([#16732](https://github.com/opensearch-project/OpenSearch/pull/16732))
 
 ### Security
 
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_double.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_double.yml
new file mode 100644
index 0000000000000..eccafaf96dd23
--- /dev/null
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_double.yml
@@ -0,0 +1,136 @@
+setup:
+  - do:
+      indices.create:
+          index: double_sort
+          body:
+            settings:
+              number_of_shards: 3
+              number_of_replicas: 0
+            mappings:
+              properties:
+                field:
+                  type: double
+
+---
+"test sorting against double only fields":
+
+  - do:
+      bulk:
+        refresh: true
+        body:
+          - '{ "index" : { "_index" : "double_sort", "_id" : "1" } }'
+          - '{"field" : [ 900719925474099.1, 1.1 ] }'
+          - '{ "index" : { "_index" : "double_sort", "_id" : "2" } }'
+          - '{"field" : [ 900719925474099.2, 900719925474099.3 ] }'
+          - '{ "index" : { "_index" : "double_sort", "_id" : "3" } }'
+          - '{"field" : [ 450359962737049.4, 3.5, 4.6 ] }'
+          - '{ "index" : { "_index" : "double_sort", "_id" : "4" } }'
+          - '{"field" : [ 450359962737049.7, 5.8, -1.9, -2.0 ] }'
+
+  - do:
+      search:
+        index: double_sort
+        body:
+          size: 5
+          sort: [{ field: { mode: max, order: desc } } ]
+  - match: {hits.total.value: 4 }
+  - length: {hits.hits: 4 }
+  - match: { hits.hits.0._index: double_sort }
+  - match: { hits.hits.0._source.field: [ 900719925474099.2, 900719925474099.2 ] }
+  - match: { hits.hits.0.sort.0: 900719925474099.2 }
+  - match: { hits.hits.1._source.field: [ 900719925474099.1, 1.1 ] }
+  - match: { hits.hits.1.sort.0: 900719925474099.1 }
+  - match: { hits.hits.2._source.field: [ 450359962737049.7, 5.8, -1.9, -2.0 ] }
+  - match: { hits.hits.2.sort.0: 450359962737049.7 }
+  - match: { hits.hits.3._source.field: [ 450359962737049.4, 3.5, 4.6 ] }
+  - match: { hits.hits.3.sort.0: 450359962737049.4 }
+
+  - do:
+      search:
+        index: double_sort
+        body:
+          size: 5
+          sort: [ { field: { mode: max, order: asc } } ]
+  - match: { hits.total.value: 4 }
+  - length: { hits.hits: 4 }
+  - match: { hits.hits.0._index: double_sort }
+  - match: { hits.hits.0._source.field: [ 450359962737049.4, 3.5, 4.6 ] }
+  - match: { hits.hits.0.sort.0: 450359962737049.4 }
+  - match: { hits.hits.1._source.field: [ 450359962737049.7, 5.8, -1.9, -2.0 ] }
+  - match: { hits.hits.1.sort.0: 450359962737049.7 }
+  - match: { hits.hits.2._source.field: [ 900719925474099.1, 1.1 ] }
+  - match: { hits.hits.2.sort.0: 900719925474099.1 }
+  - match: { hits.hits.3._source.field: [ 900719925474099.2, 900719925474099.2 ] }
+  - match: { hits.hits.3.sort.0: 900719925474099.2 }
+
+  - do:
+      search:
+        index: double_sort
+        body:
+          size: 5
+          sort: [ { field: { mode: min, order: asc } } ]
+  - match: { hits.total.value: 4 }
+  - length: { hits.hits: 4 }
+  - match: { hits.hits.0._index: double_sort }
+  - match: { hits.hits.0._source.field: [ 450359962737049.7, 5.8, -1.9, -2.0 ] }
+  - match: { hits.hits.0.sort: [ -2.0 ] }
+  - match: { hits.hits.1._source.field: [ 900719925474099.1, 1.1 ] }
+  - match: { hits.hits.1.sort.0: 1.1 }
+  - match: { hits.hits.2._source.field: [ 450359962737049.4, 3.5, 4.6 ] }
+  - match: { hits.hits.2.sort.0: 3.5 }
+  - match: { hits.hits.3._source.field: [ 900719925474099.2, 900719925474099.2 ] }
+  - match: { hits.hits.3.sort.0: 900719925474099.2 }
+
+  - do:
+      search:
+        index: double_sort
+        body:
+          size: 5
+          sort: [ { field: { mode: median, order: desc } } ]
+  - match: { hits.total.value: 4 }
+  - length: { hits.hits: 4 }
+  - match: { hits.hits.0._index: double_sort }
+  - match: { hits.hits.0._source.field: [ 900719925474099.2, 900719925474099.2 ] }
+  - match: { hits.hits.0.sort.0: 900719925474099.2 }
+  - match: { hits.hits.1._source.field: [ 900719925474099.1, 1.1 ] }
+  - match: { hits.hits.1.sort.0: 450359962737050.1 }
+  - match: { hits.hits.2._source.field: [ 450359962737049.4, 3.5, 4.6 ] }
+  - match: { hits.hits.2.sort.0: 4.6 }
+  - match: { hits.hits.3._source.field: [ 450359962737049.7, 5.8, -1.9, -2.0 ] }
+  - match: { hits.hits.3.sort.0: 1.95 }
+
+  - do:
+      search:
+        index: double_sort
+        body:
+          size: 5
+          sort: [ { field: { mode: avg, order: asc } } ]
+  - match: { hits.total.value: 4 }
+  - length: { hits.hits: 4 }
+  - match: { hits.hits.0._index: double_sort }
+  - match: { hits.hits.0._source.field: [ 450359962737049.7, 5.8, -1.9, -2.0 ] }
+  - match: { hits.hits.0.sort.0: 112589990684262.89 }
+  - match: { hits.hits.1._source.field: [ 450359962737049.4, 3.5, 4.6 ] }
+  - match: { hits.hits.1.sort.0: 150119987579019.16 }
+  - match: { hits.hits.2._source.field: [ 900719925474099.1, 1.1 ] }
+  - match: { hits.hits.2.sort.0: 450359962737050.1 }
+  - match: { hits.hits.3._source.field: [ 900719925474099.2, 900719925474099.2 ] }
+  - match: { hits.hits.3.sort.0: 900719925474099.2 }
+
+  - do:
+      search:
+        index: double_sort
+        body:
+          size: 5
+          sort: [ { field: { mode: sum, order: desc } } ]
+  - match: { hits.total.value: 4 }
+  - length: { hits.hits: 4 }
+  - match: { hits.hits.0._index: double_sort }
+  - match: { hits.hits.0._source.field: [ 900719925474099.2, 900719925474099.2 ] }
+  - match: { hits.hits.0.sort.0: 1801439850948198.5 }
+  - match: { hits.hits.1._source.field: [ 900719925474099.1, 1.1 ] }
+  - match: { hits.hits.1.sort.0: 900719925474100.2 }
+  - match: { hits.hits.2._source.field: [ 450359962737049.4, 3.5, 4.6 ] }
+  - match: { hits.hits.2.sort.0: 450359962737057.5 }
+  - match: { hits.hits.3._source.field: [ 450359962737049.7, 5.8, -1.9, -2.0 ] }
+  - match: { hits.hits.3.sort.0: 450359962737051.56 }
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_long.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_long.yml
new file mode 100644
index 0000000000000..f354dff6cbf02
--- /dev/null
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_long.yml
@@ -0,0 +1,137 @@
+setup:
+  - do:
+      indices.create:
+          index: long_sort
+          body:
+            settings:
+              number_of_shards: 3
+              number_of_replicas: 0
+            mappings:
+              properties:
+                field:
+                  type: long
+
+---
+"test sorting against long only fields":
+
+  - do:
+      bulk:
+        refresh: true
+        body:
+          - '{ "index" : { "_index" : "long_sort", "_id" : "1" } }'
+          - '{"field" : [ 9223372036854775807, 1 ] }'
+          - '{ "index" : { "_index" : "long_sort", "_id" : "2" } }'
+          - '{"field" : [ 922337203685477777, 2 ] }'
+          - '{ "index" : { "_index" : "long_sort", "_id" : "3" } }'
+          - '{"field" : [ 2147483647, 3, 4 ] }'
+          - '{ "index" : { "_index" : "long_sort", "_id" : "4" } }'
+          - '{"field" : [ 2147483648, 5, -1, -2 ] }'
+
+  - do:
+      search:
+        index: long_sort
+        body:
+          size: 5
+          sort: [{ field: { mode: max, order: desc } } ]
+  - match: {hits.total.value: 4 }
+  - length: {hits.hits: 4 }
+  - match: { hits.hits.0._index: long_sort }
+  - match: { hits.hits.0._source.field: [ 9223372036854775807, 1 ] }
+  - match: { hits.hits.0.sort.0: 9223372036854775807 }
+  - match: { hits.hits.1._source.field: [ 922337203685477777, 2 ] }
+  - match: { hits.hits.1.sort.0: 922337203685477777 }
+  - match: { hits.hits.2._source.field: [ 2147483648, 5, -1, -2 ] }
+  - match: { hits.hits.2.sort.0: 2147483648 }
+  - match: { hits.hits.3._source.field: [ 2147483647, 3, 4 ] }
+  - match: { hits.hits.3.sort.0: 2147483647 }
+
+  - do:
+      search:
+        index: long_sort
+        body:
+          size: 5
+          sort: [ { field: { mode: max, order: asc } } ]
+  - match: { hits.total.value: 4 }
+  - length: { hits.hits: 4 }
+  - match: { hits.hits.0._index: long_sort }
+  - match: { hits.hits.0._source.field: [ 2147483647, 3, 4 ] }
+  - match: { hits.hits.0.sort.0: 2147483647 }
+  - match: { hits.hits.1._source.field: [ 2147483648, 5, -1, -2 ] }
+  - match: { hits.hits.1.sort.0: 2147483648 }
+  - match: { hits.hits.2._source.field: [ 922337203685477777, 2 ] }
+  - match: { hits.hits.2.sort.0: 922337203685477777 }
+  - match: { hits.hits.3._source.field: [ 9223372036854775807, 1 ] }
+  - match: { hits.hits.3.sort.0: 9223372036854775807 }
+
+
+  - do:
+      search:
+        index: long_sort
+        body:
+          size: 5
+          sort: [{ field: { mode: min, order: desc } } ]
+  - match: { hits.total.value: 4 }
+  - length: { hits.hits: 4 }
+  - match: { hits.hits.0._index: long_sort }
+  - match: { hits.hits.0._source.field: [ 2147483647, 3, 4 ] }
+  - match: { hits.hits.0.sort.0: 3 }
+  - match: { hits.hits.1._source.field: [ 922337203685477777, 2 ] }
+  - match: { hits.hits.1.sort.0: 2 }
+  - match: { hits.hits.2._source.field: [ 9223372036854775807, 1 ] }
+  - match: { hits.hits.2.sort.0: 1 }
+  - match: { hits.hits.3._source.field: [ 2147483648, 5, -1, -2 ] }
+  - match: { hits.hits.3.sort: [ -2 ] }
+
+  - do:
+      search:
+        index: long_sort
+        body:
+          size: 5
+          sort: [ { field: { mode: median, order: asc } } ]
+  - match: { hits.total.value: 4 }
+  - length: { hits.hits: 4 }
+  - match: { hits.hits.0._index: long_sort }
+  - match: { hits.hits.0._source.field: [ 2147483648, 5, -1, -2 ] }
+  - match: { hits.hits.0.sort.0: 2 }
+  - match: { hits.hits.1._source.field: [ 2147483647, 3, 4 ] }
+  - match: { hits.hits.1.sort.0: 4 }
+  - match: { hits.hits.2._source.field: [ 922337203685477777, 2 ] }
+  - match: { hits.hits.2.sort.0: 461168601842738880 }
+  - match: { hits.hits.3._source.field: [ 9223372036854775807, 1 ] }
+  - match: { hits.hits.3.sort.0: 4611686018427387904 }
+
+  - do:
+      search:
+        index: long_sort
+        body:
+          size: 5
+          sort: [ { field: { mode: avg, order: desc } } ]
+  - match: { hits.total.value: 4 }
+  - length: { hits.hits: 4 }
+  - match: { hits.hits.0._index: long_sort }
+  - match: { hits.hits.0._source.field: [ 922337203685477777, 2 ] }
+  - match: { hits.hits.0.sort.0: 461168601842738880 }
+  - match: { hits.hits.1._source.field: [ 2147483647, 3, 4 ] }
+  - match: { hits.hits.1.sort.0: 715827885 }
+  - match: { hits.hits.2._source.field: [ 2147483648, 5, -1, -2 ] }
+  - match: { hits.hits.2.sort.0: 536870913 }
+  - match: { hits.hits.3._source.field: [ 9223372036854775807, 1 ] }
+  - match: { hits.hits.3.sort: [ -4611686018427387904 ] }
+
+  - do:
+      search:
+        index: long_sort
+        body:
+          size: 5
+          sort: [ { field: { mode: sum, order: asc } } ]
+  - match: { hits.total.value: 4 }
+  - length: { hits.hits: 4 }
+  - match: { hits.hits.0._index: long_sort }
+  - match: { hits.hits.0._source.field: [ 9223372036854775807, 1 ] }
+  - match: { hits.hits.0.sort: [ -9223372036854775808 ] }
+  - match: { hits.hits.1._source.field: [ 2147483648, 5, -1, -2 ] }
+  - match: { hits.hits.1.sort.0: 2147483650 }
+  - match: { hits.hits.2._source.field: [ 2147483647, 3, 4 ] }
+  - match: { hits.hits.2.sort.0: 2147483654 }
+  - match: { hits.hits.3._source.field: [ 922337203685477777, 2 ] }
+  - match: { hits.hits.3.sort.0: 922337203685477779 }
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_unsigned_long.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_unsigned_long.yml
new file mode 100644
index 0000000000000..056b2f58b2229
--- /dev/null
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_unsigned_long.yml
@@ -0,0 +1,167 @@
+setup:
+  - do:
+      indices.create:
+          index: unsigned_long_sort
+          body:
+            settings:
+              number_of_shards: 3
+              number_of_replicas: 0
+            mappings:
+              properties:
+                field:
+                  type: unsigned_long
+
+---
+"test sorting against unsigned_long only fields":
+  - skip:
+      version:  " - 2.19.99"
+      reason:   "this change is added in 3.0.0"
+
+  - do:
+      bulk:
+        refresh: true
+        body:
+          - '{ "index" : { "_index" : "unsigned_long_sort", "_id" : "1" } }'
+          - '{"field" : [ 13835058055282163712, 1 ] }'
+          - '{ "index" : { "_index" : "unsigned_long_sort", "_id" : "2" } }'
+          - '{"field" : [ 13835058055282163713, 13835058055282163714 ] }'
+          - '{ "index" : { "_index" : "unsigned_long_sort", "_id" : "3" } }'
+          - '{"field" : [ 13835058055282163715, 13835058055282163716, 2 ] }'
+          - '{ "index" : { "_index" : "unsigned_long_sort", "_id" : "4" } }'
+          - '{"field" : [ 13835058055282163717, 13835058055282163718, 13835058055282163719 ] }'
+          - '{ "index" : { "_index" : "unsigned_long_sort", "_id" : "5" } }'
+          - '{"field" : [ 13835058055282163720, 13835058055282163721, 3, 4 ] }'
+          - '{ "index" : { "_index" : "unsigned_long_sort", "_id" : "6" } }'
+          - '{"field" : [ 13835058055282163722, 5, 6, 7 ] }'
+
+  - do:
+      search:
+        index: unsigned_long_sort
+        body:
+          size: 10
+          sort: [{ field: { mode: max, order: desc } } ]
+  - match: {hits.total.value: 6 }
+  - length: {hits.hits: 6 }
+  - match: { hits.hits.0._index: unsigned_long_sort }
+  - match: { hits.hits.0._source.field: [ 13835058055282163722, 5, 6, 7 ] }
+  - match: { hits.hits.0.sort.0: 13835058055282163722 }
+  - match: { hits.hits.1._source.field: [ 13835058055282163720, 13835058055282163721, 3, 4 ] }
+  - match: { hits.hits.1.sort.0: 13835058055282163721 }
+  - match: { hits.hits.2._source.field: [ 13835058055282163717, 13835058055282163718, 13835058055282163719 ] }
+  - match: { hits.hits.2.sort.0: 13835058055282163719 }
+  - match: { hits.hits.3._source.field: [ 13835058055282163715, 13835058055282163716, 2 ] }
+  - match: { hits.hits.3.sort.0: 13835058055282163716 }
+  - match: { hits.hits.4._source.field: [ 13835058055282163713, 13835058055282163714 ] }
+  - match: { hits.hits.4.sort.0: 13835058055282163714 }
+  - match: { hits.hits.5._source.field: [ 13835058055282163712, 1 ] }
+  - match: { hits.hits.5.sort.0: 13835058055282163712 }
+
+  - do:
+      search:
+        index: unsigned_long_sort
+        body:
+          size: 10
+          sort: [{ field: { mode: max, order: asc } } ]
+  - match: {hits.total.value: 6 }
+  - length: {hits.hits: 6 }
+  - match: { hits.hits.0._index: unsigned_long_sort }
+  - match: { hits.hits.0._source.field: [ 13835058055282163712, 1 ] }
+  - match: { hits.hits.0.sort.0: 13835058055282163712 }
+  - match: { hits.hits.1._source.field: [ 13835058055282163713, 13835058055282163714 ] }
+  - match: { hits.hits.1.sort.0: 13835058055282163714 }
+  - match: { hits.hits.2._source.field: [ 13835058055282163715, 13835058055282163716, 2 ] }
+  - match: { hits.hits.2.sort.0: 13835058055282163716 }
+  - match: { hits.hits.3._source.field: [ 13835058055282163717, 13835058055282163718, 13835058055282163719 ] }
+  - match: { hits.hits.3.sort.0: 13835058055282163719 }
+  - match: { hits.hits.4._source.field: [ 13835058055282163720, 13835058055282163721, 3, 4 ] }
+  - match: { hits.hits.4.sort.0: 13835058055282163721 }
+  - match: { hits.hits.5._source.field: [ 13835058055282163722, 5, 6, 7 ] }
+  - match: { hits.hits.5.sort.0: 13835058055282163722 }
+
+  - do:
+      search:
+        index: unsigned_long_sort
+        body:
+          size: 10
+          sort: [ { field: { mode: median, order: asc } } ]
+  - match: { hits.total.value: 6 }
+  - length: { hits.hits: 6 }
+  - match: { hits.hits.0._index: unsigned_long_sort }
+  - match: { hits.hits.0._source.field: [ 13835058055282163722, 5, 6, 7 ] }
+  - match: { hits.hits.0.sort.0: 7 }
+  - match: { hits.hits.1._source.field: [ 13835058055282163713, 13835058055282163714 ] }
+  - match: { hits.hits.1.sort.0: 4611686018427387906 }
+  - match: { hits.hits.2._source.field: [ 13835058055282163712, 1 ] }
+  - match: { hits.hits.2.sort.0: 6917529027641081857 }
+  - match: { hits.hits.3._source.field: [ 13835058055282163720, 13835058055282163721, 3, 4 ] }
+  - match: { hits.hits.3.sort.0: 6917529027641081862 }
+  - match: { hits.hits.4._source.field: [ 13835058055282163715, 13835058055282163716, 2 ] }
+  - match: { hits.hits.4.sort.0: 13835058055282163715 }
+  - match: { hits.hits.5._source.field: [ 13835058055282163717, 13835058055282163718, 13835058055282163719 ] }
+  - match: { hits.hits.5.sort.0: 13835058055282163718 }
+
+  - do:
+      search:
+        index: unsigned_long_sort
+        body:
+          size: 10
+          sort: [ { field: { mode: sum, order: desc } } ]
+  - match: { hits.total.value: 6 }
+  - length: { hits.hits: 6 }
+  - match: { hits.hits.0._index: unsigned_long_sort }
+  - match: { hits.hits.0._source.field: [ 13835058055282163722, 5, 6, 7 ] }
+  - match: { hits.hits.0.sort.0: 13835058055282163740 }
+  - match: { hits.hits.1._source.field: [ 13835058055282163712, 1 ] }
+  - match: { hits.hits.1.sort.0: 13835058055282163713 }
+  - match: { hits.hits.2._source.field: [ 13835058055282163720, 13835058055282163721, 3, 4 ] }
+  - match: { hits.hits.2.sort.0: 9223372036854775832 }
+  - match: { hits.hits.3._source.field: [ 13835058055282163715, 13835058055282163716, 2 ] }
+  - match: { hits.hits.3.sort.0: 9223372036854775817 }
+  - match: { hits.hits.4._source.field: [ 13835058055282163713, 13835058055282163714 ] }
+  - match: { hits.hits.4.sort.0: 9223372036854775811 }
+  - match: { hits.hits.5._source.field: [ 13835058055282163717, 13835058055282163718, 13835058055282163719 ] }
+  - match: { hits.hits.5.sort.0: 4611686018427387922 }
+
+  - do:
+      search:
+        index: unsigned_long_sort
+        body:
+          size: 10
+          sort: [ { field: { mode: avg, order: desc } } ]
+  - match: { hits.total.value: 6 }
+  - length: { hits.hits: 6 }
+  - match: { hits.hits.0._index: unsigned_long_sort }
+  - match: { hits.hits.0._source.field: [ 13835058055282163712, 1 ] }
+  - match: { hits.hits.0.sort.0: 6917529027641081857 }
+  - match: { hits.hits.1._source.field: [ 13835058055282163713, 13835058055282163714 ] }
+  - match: { hits.hits.1.sort.0: 4611686018427387906 }
+  - match: { hits.hits.2._source.field: [ 13835058055282163722, 5, 6, 7 ] }
+  - match: { hits.hits.2.sort.0: 3458764513820540935 }
+  - match: { hits.hits.3._source.field: [ 13835058055282163715, 13835058055282163716, 2 ] }
+  - match: { hits.hits.3.sort.0: 3074457345618258606 }
+  - match: { hits.hits.4._source.field: [ 13835058055282163720, 13835058055282163721, 3, 4 ] }
+  - match: { hits.hits.4.sort.0: 2305843009213693958 }
+  - match: { hits.hits.5._source.field: [ 13835058055282163717, 13835058055282163718, 13835058055282163719 ] }
+  - match: { hits.hits.5.sort.0: 1537228672809129307 }
+
+  - do:
+      search:
+        index: unsigned_long_sort
+        body:
+          size: 10
+          sort: [ { field: { mode: min, order: asc } } ]
+  - match: { hits.total.value: 6 }
+  - length: { hits.hits: 6 }
+  - match: { hits.hits.0._index: unsigned_long_sort }
+  - match: { hits.hits.0._source.field: [ 13835058055282163712, 1 ] }
+  - match: { hits.hits.0.sort.0: 1 }
+  - match: { hits.hits.1._source.field: [ 13835058055282163715, 13835058055282163716, 2 ] }
+  - match: { hits.hits.1.sort.0: 2 }
+  - match: { hits.hits.2._source.field: [ 13835058055282163720, 13835058055282163721, 3, 4 ] }
+  - match: { hits.hits.2.sort.0: 3 }
+  - match: { hits.hits.3._source.field: [ 13835058055282163722, 5, 6, 7 ] }
+  - match: { hits.hits.3.sort.0: 5 }
+  - match: { hits.hits.4._source.field: [ 13835058055282163713, 13835058055282163714 ] }
+  - match: { hits.hits.4.sort.0: 13835058055282163713 }
+  - match: { hits.hits.5._source.field: [ 13835058055282163717, 13835058055282163718, 13835058055282163719 ] }
+  - match: { hits.hits.5.sort.0: 13835058055282163717 }
diff --git a/server/src/main/java/org/opensearch/index/fielddata/LongToSortedNumericUnsignedLongValues.java b/server/src/main/java/org/opensearch/index/fielddata/LongToSortedNumericUnsignedLongValues.java
new file mode 100644
index 0000000000000..eb8d8f1667218
--- /dev/null
+++ b/server/src/main/java/org/opensearch/index/fielddata/LongToSortedNumericUnsignedLongValues.java
@@ -0,0 +1,55 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.index.fielddata;
+
+import org.apache.lucene.index.SortedNumericDocValues;
+
+import java.io.IOException;
+
+/**
+ * Wraps long-based {@link SortedNumericDocValues} as unsigned long ones
+ * (primarily used by {@link org.opensearch.search.MultiValueMode}
+ *
+ * @opensearch.internal
+ */
+public final class LongToSortedNumericUnsignedLongValues extends SortedNumericUnsignedLongValues {
+    private final SortedNumericDocValues values;
+
+    public LongToSortedNumericUnsignedLongValues(SortedNumericDocValues values) {
+        this.values = values;
+    }
+
+    @Override
+    public boolean advanceExact(int target) throws IOException {
+        return values.advanceExact(target);
+    }
+
+    @Override
+    public long nextValue() throws IOException {
+        return values.nextValue();
+    }
+
+    @Override
+    public int docValueCount() {
+        return values.docValueCount();
+    }
+
+    public int advance(int target) throws IOException {
+        return values.advance(target);
+    }
+
+    public int docID() {
+        return values.docID();
+    }
+
+    /** Return the wrapped values. */
+    public SortedNumericDocValues getNumericUnsignedLongValues() {
+        return values;
+    }
+}
diff --git a/server/src/main/java/org/opensearch/index/fielddata/SortedNumericUnsignedLongValues.java b/server/src/main/java/org/opensearch/index/fielddata/SortedNumericUnsignedLongValues.java
new file mode 100644
index 0000000000000..fa4c5152b9f90
--- /dev/null
+++ b/server/src/main/java/org/opensearch/index/fielddata/SortedNumericUnsignedLongValues.java
@@ -0,0 +1,62 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.index.fielddata;
+
+import org.apache.lucene.index.SortedNumericDocValues;
+import org.opensearch.common.annotation.PublicApi;
+
+import java.io.IOException;
+
+/**
+ * Clone of {@link SortedNumericDocValues} for unsigned long values.
+ *
+ * @opensearch.api
+ */
+@PublicApi(since = "2.19.0")
+public abstract class SortedNumericUnsignedLongValues {
+
+    /** Sole constructor. (For invocation by subclass
+     * constructors, typically implicit.) */
+    protected SortedNumericUnsignedLongValues() {}
+
+    /** Advance the iterator to exactly {@code target} and return whether
+     *  {@code target} has a value.
+     *  {@code target} must be greater than or equal to the current
+     *  doc ID and must be a valid doc ID, ie. &ge; 0 and
+     *  &lt; {@code maxDoc}.*/
+    public abstract boolean advanceExact(int target) throws IOException;
+
+    /**
+     * Iterates to the next value in the current document. Do not call this more than
+     * {@link #docValueCount} times for the document.
+     */
+    public abstract long nextValue() throws IOException;
+
+    /**
+     * Retrieves the number of values for the current document.  This must always
+     * be greater than zero.
+     * It is illegal to call this method after {@link #advanceExact(int)}
+     * returned {@code false}.
+     */
+    public abstract int docValueCount();
+
+    /**
+     * Advances to the first beyond the current whose document number is greater than or equal to
+     * <i>target</i>, and returns the document number itself. Exhausts the iterator and returns {@link
+     * org.apache.lucene.search.DocIdSetIterator#NO_MORE_DOCS} if <i>target</i> is greater than the highest document number in the set.
+     *
+     * This method is being used by {@link org.apache.lucene.search.comparators.NumericComparator.NumericLeafComparator} when point values optimization kicks
+     * in and is implemented by most numeric types.
+     */
+    public int advance(int target) throws IOException {
+        throw new UnsupportedOperationException();
+    }
+
+    public abstract int docID();
+}
diff --git a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/UnsignedLongValuesComparatorSource.java b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/UnsignedLongValuesComparatorSource.java
index 9db5817450cd0..6fc85bd0b2689 100644
--- a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/UnsignedLongValuesComparatorSource.java
+++ b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/UnsignedLongValuesComparatorSource.java
@@ -10,7 +10,6 @@
 
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.NumericDocValues;
-import org.apache.lucene.index.SortedNumericDocValues;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.FieldComparator;
 import org.apache.lucene.search.LeafFieldComparator;
@@ -24,6 +23,8 @@
 import org.opensearch.index.fielddata.IndexFieldData;
 import org.opensearch.index.fielddata.IndexNumericFieldData;
 import org.opensearch.index.fielddata.LeafNumericFieldData;
+import org.opensearch.index.fielddata.LongToSortedNumericUnsignedLongValues;
+import org.opensearch.index.fielddata.SortedNumericUnsignedLongValues;
 import org.opensearch.index.search.comparators.UnsignedLongComparator;
 import org.opensearch.search.DocValueFormat;
 import org.opensearch.search.MultiValueMode;
@@ -57,14 +58,13 @@ public SortField.Type reducedType() {
         return SortField.Type.LONG;
     }
 
-    private SortedNumericDocValues loadDocValues(LeafReaderContext context) {
+    private SortedNumericUnsignedLongValues loadDocValues(LeafReaderContext context) {
         final LeafNumericFieldData data = indexFieldData.load(context);
-        SortedNumericDocValues values = data.getLongValues();
-        return values;
+        return new LongToSortedNumericUnsignedLongValues(data.getLongValues());
     }
 
     private NumericDocValues getNumericDocValues(LeafReaderContext context, BigInteger missingValue) throws IOException {
-        final SortedNumericDocValues values = loadDocValues(context);
+        final SortedNumericUnsignedLongValues values = loadDocValues(context);
         if (nested == null) {
             return FieldData.replaceMissing(sortMode.select(values), missingValue);
         }
diff --git a/server/src/main/java/org/opensearch/search/MultiValueMode.java b/server/src/main/java/org/opensearch/search/MultiValueMode.java
index a99da674836f2..fa2e776eca67a 100644
--- a/server/src/main/java/org/opensearch/search/MultiValueMode.java
+++ b/server/src/main/java/org/opensearch/search/MultiValueMode.java
@@ -42,6 +42,7 @@
 import org.apache.lucene.util.BitSet;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefBuilder;
+import org.opensearch.common.Numbers;
 import org.opensearch.common.annotation.PublicApi;
 import org.opensearch.core.common.io.stream.StreamInput;
 import org.opensearch.core.common.io.stream.StreamOutput;
@@ -50,9 +51,11 @@
 import org.opensearch.index.fielddata.AbstractNumericDocValues;
 import org.opensearch.index.fielddata.AbstractSortedDocValues;
 import org.opensearch.index.fielddata.FieldData;
+import org.opensearch.index.fielddata.LongToSortedNumericUnsignedLongValues;
 import org.opensearch.index.fielddata.NumericDoubleValues;
 import org.opensearch.index.fielddata.SortedBinaryDocValues;
 import org.opensearch.index.fielddata.SortedNumericDoubleValues;
+import org.opensearch.index.fielddata.SortedNumericUnsignedLongValues;
 
 import java.io.IOException;
 import java.util.Locale;
@@ -143,6 +146,44 @@ protected double pick(
 
             return totalCount > 0 ? totalValue : missingValue;
         }
+
+        @Override
+        protected long pick(SortedNumericUnsignedLongValues values) throws IOException {
+            final int count = values.docValueCount();
+            long total = 0;
+            for (int index = 0; index < count; ++index) {
+                total += values.nextValue();
+            }
+            return total;
+        }
+
+        @Override
+        protected long pick(
+            SortedNumericUnsignedLongValues values,
+            long missingValue,
+            DocIdSetIterator docItr,
+            int startDoc,
+            int endDoc,
+            int maxChildren
+        ) throws IOException {
+            int totalCount = 0;
+            long totalValue = 0;
+            int count = 0;
+            for (int doc = startDoc; doc < endDoc; doc = docItr.nextDoc()) {
+                if (values.advanceExact(doc)) {
+                    if (++count > maxChildren) {
+                        break;
+                    }
+
+                    final int docCount = values.docValueCount();
+                    for (int index = 0; index < docCount; ++index) {
+                        totalValue += values.nextValue();
+                    }
+                    totalCount += docCount;
+                }
+            }
+            return totalCount > 0 ? totalValue : missingValue;
+        }
     },
 
     /**
@@ -228,6 +269,46 @@ protected double pick(
             }
             return totalValue / totalCount;
         }
+
+        @Override
+        protected long pick(SortedNumericUnsignedLongValues values) throws IOException {
+            final int count = values.docValueCount();
+            long total = 0;
+            for (int index = 0; index < count; ++index) {
+                total += values.nextValue();
+            }
+            return count > 1 ? divideUnsignedAndRoundUp(total, count) : total;
+        }
+
+        @Override
+        protected long pick(
+            SortedNumericUnsignedLongValues values,
+            long missingValue,
+            DocIdSetIterator docItr,
+            int startDoc,
+            int endDoc,
+            int maxChildren
+        ) throws IOException {
+            int totalCount = 0;
+            long totalValue = 0;
+            int count = 0;
+            for (int doc = startDoc; doc < endDoc; doc = docItr.nextDoc()) {
+                if (values.advanceExact(doc)) {
+                    if (++count > maxChildren) {
+                        break;
+                    }
+                    final int docCount = values.docValueCount();
+                    for (int index = 0; index < docCount; ++index) {
+                        totalValue += values.nextValue();
+                    }
+                    totalCount += docCount;
+                }
+            }
+            if (totalCount < 1) {
+                return missingValue;
+            }
+            return totalCount > 1 ? divideUnsignedAndRoundUp(totalValue, totalCount) : totalValue;
+        }
     },
 
     /**
@@ -259,6 +340,45 @@ protected double pick(SortedNumericDoubleValues values) throws IOException {
                 return values.nextValue();
             }
         }
+
+        @Override
+        protected long pick(SortedNumericUnsignedLongValues values) throws IOException {
+            int count = values.docValueCount();
+            long firstValue = values.nextValue();
+            if (count == 1) {
+                return firstValue;
+            } else if (count == 2) {
+                long total = firstValue + values.nextValue();
+                return (total >>> 1) + (total & 1);
+            } else if (firstValue >= 0) {
+                for (int i = 1; i < (count - 1) / 2; ++i) {
+                    values.nextValue();
+                }
+                if (count % 2 == 0) {
+                    long total = values.nextValue() + values.nextValue();
+                    return (total >>> 1) + (total & 1);
+                } else {
+                    return values.nextValue();
+                }
+            }
+
+            final long[] docValues = new long[count];
+            docValues[0] = firstValue;
+            int firstPositiveIndex = 0;
+            for (int i = 1; i < count; ++i) {
+                docValues[i] = values.nextValue();
+                if (docValues[i] >= 0 && firstPositiveIndex == 0) {
+                    firstPositiveIndex = i;
+                }
+            }
+            final int mid = ((count - 1) / 2 + firstPositiveIndex) % count;
+            if (count % 2 == 0) {
+                long total = docValues[mid] + docValues[(mid + 1) % count];
+                return (total >>> 1) + (total & 1);
+            } else {
+                return docValues[mid];
+            }
+        }
     },
 
     /**
@@ -382,6 +502,47 @@ protected int pick(SortedDocValues values, DocIdSetIterator docItr, int startDoc
 
             return hasValue ? ord : -1;
         }
+
+        @Override
+        protected long pick(SortedNumericUnsignedLongValues values) throws IOException {
+            final int count = values.docValueCount();
+            final long min = values.nextValue();
+            if (count == 1 || min > 0) {
+                return min;
+            }
+            for (int i = 1; i < count; ++i) {
+                long val = values.nextValue();
+                if (val >= 0) {
+                    return val;
+                }
+            }
+            return min;
+        }
+
+        @Override
+        protected long pick(
+            SortedNumericUnsignedLongValues values,
+            long missingValue,
+            DocIdSetIterator docItr,
+            int startDoc,
+            int endDoc,
+            int maxChildren
+        ) throws IOException {
+            boolean hasValue = false;
+            long minValue = Numbers.MAX_UNSIGNED_LONG_VALUE_AS_LONG;
+            int count = 0;
+            for (int doc = startDoc; doc < endDoc; doc = docItr.nextDoc()) {
+                if (values.advanceExact(doc)) {
+                    if (++count > maxChildren) {
+                        break;
+                    }
+                    final long docMin = pick(values);
+                    minValue = Long.compareUnsigned(docMin, minValue) < 0 ? docMin : minValue;
+                    hasValue = true;
+                }
+            }
+            return hasValue ? minValue : missingValue;
+        }
     },
 
     /**
@@ -525,6 +686,46 @@ protected int pick(SortedDocValues values, DocIdSetIterator docItr, int startDoc
             }
             return ord;
         }
+
+        @Override
+        protected long pick(SortedNumericUnsignedLongValues values) throws IOException {
+            final int count = values.docValueCount();
+            long max = values.nextValue();
+            long val;
+            for (int i = 1; i < count; ++i) {
+                val = values.nextValue();
+                if (max < 0 && val >= 0) {
+                    return max;
+                }
+                max = val;
+            }
+            return max;
+        }
+
+        @Override
+        protected long pick(
+            SortedNumericUnsignedLongValues values,
+            long missingValue,
+            DocIdSetIterator docItr,
+            int startDoc,
+            int endDoc,
+            int maxChildren
+        ) throws IOException {
+            boolean hasValue = false;
+            long maxValue = Numbers.MIN_UNSIGNED_LONG_VALUE_AS_LONG;
+            int count = 0;
+            for (int doc = startDoc; doc < endDoc; doc = docItr.nextDoc()) {
+                if (values.advanceExact(doc)) {
+                    if (++count > maxChildren) {
+                        break;
+                    }
+                    final long docMax = pick(values);
+                    maxValue = Long.compareUnsigned(maxValue, docMax) < 0 ? docMax : maxValue;
+                    hasValue = true;
+                }
+            }
+            return hasValue ? maxValue : missingValue;
+        }
     };
 
     /**
@@ -1032,6 +1233,126 @@ protected int pick(SortedDocValues values, DocIdSetIterator docItr, int startDoc
         throw new IllegalArgumentException("Unsupported sort mode: " + this);
     }
 
+    /**
+     * Return a {@link NumericDoubleValues} instance that can be used to sort documents
+     * with this mode and the provided values. When a document has no value,
+     * <code>missingValue</code> is returned.
+     * <p>
+     * Allowed Modes: SUM, AVG, MEDIAN, MIN, MAX
+     */
+    public NumericDocValues select(final SortedNumericUnsignedLongValues values) {
+        SortedNumericDocValues sortedNumericDocValues = null;
+        if (values instanceof LongToSortedNumericUnsignedLongValues) {
+            sortedNumericDocValues = ((LongToSortedNumericUnsignedLongValues) values).getNumericUnsignedLongValues();
+        }
+
+        final NumericDocValues singleton = DocValues.unwrapSingleton(sortedNumericDocValues);
+        if (singleton != null) {
+            return singleton;
+        } else {
+            return new AbstractNumericDocValues() {
+
+                private long value;
+
+                @Override
+                public boolean advanceExact(int target) throws IOException {
+                    if (values.advanceExact(target)) {
+                        value = pick(values);
+                        return true;
+                    }
+                    return false;
+                }
+
+                @Override
+                public int docID() {
+                    return values.docID();
+                }
+
+                @Override
+                public long longValue() throws IOException {
+                    return value;
+                }
+            };
+        }
+    }
+
+    protected long pick(SortedNumericUnsignedLongValues values) throws IOException {
+        throw new IllegalArgumentException("Unsupported sort mode: " + this);
+    }
+
+    /**
+     * Return a {@link SortedDocValues} instance that can be used to sort root documents
+     * with this mode, the provided values and filters for root/inner documents.
+     * <p>
+     * For every root document, the values of its inner documents will be aggregated.
+     * <p>
+     * Allowed Modes: MIN, MAX
+     * <p>
+     * NOTE: Calling the returned instance on docs that are not root docs is illegal
+     *       The returned instance can only be evaluate the current and upcoming docs
+     */
+    public NumericDocValues select(
+        final SortedNumericUnsignedLongValues values,
+        final long missingValue,
+        final BitSet parentDocs,
+        final DocIdSetIterator childDocs,
+        int maxDoc,
+        int maxChildren
+    ) throws IOException {
+        if (parentDocs == null || childDocs == null) {
+            return FieldData.replaceMissing(DocValues.emptyNumeric(), missingValue);
+        }
+
+        return new AbstractNumericDocValues() {
+
+            int lastSeenParentDoc = -1;
+            long lastEmittedValue = missingValue;
+
+            @Override
+            public boolean advanceExact(int parentDoc) throws IOException {
+                assert parentDoc >= lastSeenParentDoc : "can only evaluate current and upcoming parent docs";
+                if (parentDoc == lastSeenParentDoc) {
+                    return true;
+                } else if (parentDoc == 0) {
+                    lastEmittedValue = missingValue;
+                    return true;
+                }
+                final int prevParentDoc = parentDocs.prevSetBit(parentDoc - 1);
+                final int firstChildDoc;
+                if (childDocs.docID() > prevParentDoc) {
+                    firstChildDoc = childDocs.docID();
+                } else {
+                    firstChildDoc = childDocs.advance(prevParentDoc + 1);
+                }
+
+                lastSeenParentDoc = parentDoc;
+                lastEmittedValue = pick(values, missingValue, childDocs, firstChildDoc, parentDoc, maxChildren);
+                return true;
+            }
+
+            @Override
+            public int docID() {
+                return lastSeenParentDoc;
+            }
+
+            @Override
+            public long longValue() {
+                return lastEmittedValue;
+            }
+        };
+    }
+
+    protected long pick(
+        SortedNumericUnsignedLongValues values,
+        long missingValue,
+        DocIdSetIterator docItr,
+        int startDoc,
+        int endDoc,
+        int maxChildren
+    ) throws IOException {
+        throw new IllegalArgumentException("Unsupported sort mode: " + this);
+    }
+
     @Override
     public void writeTo(StreamOutput out) throws IOException {
         out.writeEnum(this);
@@ -1040,4 +1361,16 @@ public void writeTo(StreamOutput out) throws IOException {
     public static MultiValueMode readMultiValueModeFrom(StreamInput in) throws IOException {
         return in.readEnum(MultiValueMode.class);
     }
+
+    /**
+     * Copied from {@link Long#divideUnsigned(long, long)} and {@link Long#remainderUnsigned(long, long)}
+     */
+    private static long divideUnsignedAndRoundUp(long dividend, long divisor) {
+        assert divisor > 0;
+        final long q = (dividend >>> 1) / divisor << 1;
+        final long r = dividend - q * divisor;
+        final long quotient = q + ((r | ~(r - divisor)) >>> (Long.SIZE - 1));
+        final long rem = r - ((~(r - divisor) >> (Long.SIZE - 1)) & divisor);
+        return quotient + Math.round((double) rem / divisor);
+    }
 }
diff --git a/server/src/test/java/org/opensearch/search/MultiValueModeTests.java b/server/src/test/java/org/opensearch/search/MultiValueModeTests.java
index 948d2cffceabe..e011dd0bcf6c0 100644
--- a/server/src/test/java/org/opensearch/search/MultiValueModeTests.java
+++ b/server/src/test/java/org/opensearch/search/MultiValueModeTests.java
@@ -41,6 +41,7 @@
 import org.apache.lucene.util.BitSetIterator;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.FixedBitSet;
+import org.opensearch.common.Numbers;
 import org.opensearch.common.io.stream.BytesStreamOutput;
 import org.opensearch.core.common.io.stream.StreamInput;
 import org.opensearch.index.fielddata.AbstractBinaryDocValues;
@@ -52,9 +53,13 @@
 import org.opensearch.index.fielddata.NumericDoubleValues;
 import org.opensearch.index.fielddata.SortedBinaryDocValues;
 import org.opensearch.index.fielddata.SortedNumericDoubleValues;
+import org.opensearch.index.fielddata.SortedNumericUnsignedLongValues;
 import org.opensearch.test.OpenSearchTestCase;
 
 import java.io.IOException;
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.math.RoundingMode;
 import java.util.Arrays;
 
 import static org.hamcrest.Matchers.equalTo;
@@ -776,6 +781,96 @@ public int docValueCount() {
         verifySortedSet(multiValues, numDocs, rootDocs, innerDocs, randomIntBetween(1, numDocs));
     }
 
+    public void testSingleValuedUnsignedLongs() throws Exception {
+        final int numDocs = scaledRandomIntBetween(1, 100);
+        final long[] array = new long[numDocs];
+        final FixedBitSet docsWithValue = randomBoolean() ? null : new FixedBitSet(numDocs);
+        for (int i = 0; i < array.length; ++i) {
+            if (randomBoolean()) {
+                array[i] = randomUnsignedLong().longValue();
+                if (docsWithValue != null) {
+                    docsWithValue.set(i);
+                }
+            } else if (docsWithValue != null && randomBoolean()) {
+                docsWithValue.set(i);
+            }
+        }
+
+        final Supplier<SortedNumericUnsignedLongValues> multiValues = () -> new SortedNumericUnsignedLongValues() {
+            int docId = -1;
+
+            @Override
+            public boolean advanceExact(int target) throws IOException {
+                this.docId = target;
+                return docsWithValue == null || docsWithValue.get(docId);
+            }
+
+            @Override
+            public int docID() {
+                return docId;
+            }
+
+            @Override
+            public long nextValue() {
+                return array[docId];
+            }
+
+            @Override
+            public int docValueCount() {
+                return 1;
+            }
+        };
+        verifySortedUnsignedLong(multiValues, numDocs);
+        final FixedBitSet rootDocs = randomRootDocs(numDocs);
+        final FixedBitSet innerDocs = randomInnerDocs(rootDocs);
+        verifySortedUnsignedLong(multiValues, numDocs, rootDocs, innerDocs, Integer.MAX_VALUE);
+        verifySortedUnsignedLong(multiValues, numDocs, rootDocs, innerDocs, randomIntBetween(1, numDocs));
+    }
+
+    public void testMultiValuedUnsignedLongs() throws Exception {
+        final int numDocs = scaledRandomIntBetween(1, 100);
+        final long[][] array = new long[numDocs][];
+        for (int i = 0; i < numDocs; ++i) {
+            final long[] values = new long[randomInt(4)];
+            for (int j = 0; j < values.length; ++j) {
+                values[j] = randomUnsignedLong().longValue();
+            }
+            Arrays.sort(values);
+            array[i] = values;
+        }
+        final Supplier<SortedNumericUnsignedLongValues> multiValues = () -> new SortedNumericUnsignedLongValues() {
+            int doc;
+            int i;
+
+            @Override
+            public long nextValue() {
+                return array[doc][i++];
+            }
+
+            @Override
+            public boolean advanceExact(int doc) {
+                this.doc = doc;
+                i = 0;
+                return array[doc].length > 0;
+            }
+
+            @Override
+            public int docValueCount() {
+                return array[doc].length;
+            }
+
+            @Override
+            public int docID() {
+                return doc;
+            }
+        };
+        verifySortedUnsignedLong(multiValues, numDocs);
+        final FixedBitSet rootDocs = randomRootDocs(numDocs);
+        final FixedBitSet innerDocs = randomInnerDocs(rootDocs);
+        verifySortedUnsignedLong(multiValues, numDocs, rootDocs, innerDocs, Integer.MAX_VALUE);
+        verifySortedUnsignedLong(multiValues, numDocs, rootDocs, innerDocs, randomIntBetween(1, numDocs));
+    }
+
     private void verifySortedSet(Supplier<SortedSetDocValues> supplier, int maxDoc) throws IOException {
         for (MultiValueMode mode : new MultiValueMode[] { MultiValueMode.MIN, MultiValueMode.MAX }) {
             SortedSetDocValues values = supplier.get();
@@ -857,6 +952,141 @@ private void verifySortedSet(
         }
     }
 
+    private void verifySortedUnsignedLong(Supplier<SortedNumericUnsignedLongValues> supplier, int maxDoc) throws IOException {
+        for (MultiValueMode mode : MultiValueMode.values()) {
+            SortedNumericUnsignedLongValues values = supplier.get();
+            final NumericDocValues selected = mode.select(values);
+            for (int i = 0; i < maxDoc; ++i) {
+                Long actual = null;
+                if (selected.advanceExact(i)) {
+                    actual = selected.longValue();
+                    verifyLongValueCanCalledMoreThanOnce(selected, actual);
+                }
+
+                BigInteger expected = null;
+                if (values.advanceExact(i)) {
+                    int numValues = values.docValueCount();
+                    if (mode == MultiValueMode.MAX) {
+                        expected = Numbers.MIN_UNSIGNED_LONG_VALUE;
+                    } else if (mode == MultiValueMode.MIN) {
+                        expected = Numbers.MAX_UNSIGNED_LONG_VALUE;
+                    } else {
+                        expected = BigInteger.ZERO;
+                    }
+                    for (int j = 0; j < numValues; ++j) {
+                        if (mode == MultiValueMode.SUM || mode == MultiValueMode.AVG) {
+                            expected = expected.add(Numbers.toUnsignedBigInteger(values.nextValue()));
+                        } else if (mode == MultiValueMode.MIN) {
+                            expected = expected.min(Numbers.toUnsignedBigInteger(values.nextValue()));
+                        } else if (mode == MultiValueMode.MAX) {
+                            expected = expected.max(Numbers.toUnsignedBigInteger(values.nextValue()));
+                        }
+                    }
+                    if (mode == MultiValueMode.AVG) {
+                        expected = Numbers.toUnsignedBigInteger(expected.longValue());
+                        expected = numValues > 1
+                            ? new BigDecimal(expected).divide(new BigDecimal(numValues), RoundingMode.HALF_UP).toBigInteger()
+                            : expected;
+                    } else if (mode == MultiValueMode.MEDIAN) {
+                        final Long[] docValues = new Long[numValues];
+                        for (int j = 0; j < numValues; ++j) {
+                            docValues[j] = values.nextValue();
+                        }
+                        Arrays.sort(docValues, Long::compareUnsigned);
+                        int value = numValues / 2;
+                        if (numValues % 2 == 0) {
+                            expected = Numbers.toUnsignedBigInteger(docValues[value - 1])
+                                .add(Numbers.toUnsignedBigInteger(docValues[value]));
+                            expected = Numbers.toUnsignedBigInteger(expected.longValue());
+                            expected = new BigDecimal(expected).divide(new BigDecimal(2), RoundingMode.HALF_UP).toBigInteger();
+                        } else {
+                            expected = Numbers.toUnsignedBigInteger(docValues[value]);
+                        }
+                    }
+                }
+
+                final Long expectedLong = expected == null ? null : expected.longValue();
+                assertEquals(mode.toString() + " docId=" + i, expectedLong, actual);
+            }
+        }
+    }
+
+    private void verifySortedUnsignedLong(
+        Supplier<SortedNumericUnsignedLongValues> supplier,
+        int maxDoc,
+        FixedBitSet rootDocs,
+        FixedBitSet innerDocs,
+        int maxChildren
+    ) throws IOException {
+        for (long missingValue : new long[] { 0, randomUnsignedLong().longValue() }) {
+            for (MultiValueMode mode : new MultiValueMode[] {
+                MultiValueMode.MIN,
+                MultiValueMode.MAX,
+                MultiValueMode.SUM,
+                MultiValueMode.AVG }) {
+                SortedNumericUnsignedLongValues values = supplier.get();
+                final NumericDocValues selected = mode.select(
+                    values,
+                    missingValue,
+                    rootDocs,
+                    new BitSetIterator(innerDocs, 0L),
+                    maxDoc,
+                    maxChildren
+                );
+                int prevRoot = -1;
+                for (int root = rootDocs.nextSetBit(0); root != -1; root = root + 1 < maxDoc ? rootDocs.nextSetBit(root + 1) : -1) {
+                    assertTrue(selected.advanceExact(root));
+                    final long actual = selected.longValue();
+                    verifyLongValueCanCalledMoreThanOnce(selected, actual);
+
+                    BigInteger expected = BigInteger.ZERO;
+                    if (mode == MultiValueMode.MAX) {
+                        expected = Numbers.MIN_UNSIGNED_LONG_VALUE;
+                    } else if (mode == MultiValueMode.MIN) {
+                        expected = Numbers.MAX_UNSIGNED_LONG_VALUE;
+                    }
+                    int numValues = 0;
+                    int count = 0;
+                    for (int child = innerDocs.nextSetBit(prevRoot + 1); child != -1 && child < root; child = innerDocs.nextSetBit(
+                        child + 1
+                    )) {
+                        if (values.advanceExact(child)) {
+                            if (++count > maxChildren) {
+                                break;
+                            }
+                            for (int j = 0; j < values.docValueCount(); ++j) {
+                                if (mode == MultiValueMode.SUM || mode == MultiValueMode.AVG) {
+                                    expected = expected.add(Numbers.toUnsignedBigInteger(values.nextValue()));
+                                } else if (mode == MultiValueMode.MIN) {
+                                    expected = expected.min(Numbers.toUnsignedBigInteger(values.nextValue()));
+                                } else if (mode == MultiValueMode.MAX) {
+                                    expected = expected.max(Numbers.toUnsignedBigInteger(values.nextValue()));
+                                }
+                                ++numValues;
+                            }
+                        }
+                    }
+                    final long expectedLong;
+                    if (numValues == 0) {
+                        expectedLong = missingValue;
+                    } else if (mode == MultiValueMode.AVG) {
+                        expected = Numbers.toUnsignedBigInteger(expected.longValue());
+                        expected = numValues > 1
+                            ? new BigDecimal(expected).divide(new BigDecimal(numValues), RoundingMode.HALF_UP).toBigInteger()
+                            : expected;
+                        expectedLong = expected.longValue();
+                    } else {
+                        expectedLong = expected.longValue();
+                    }
+
+                    assertEquals(mode.toString() + " docId=" + root, expectedLong, actual);
+
+                    prevRoot = root;
+                }
+            }
+        }
+    }
+
     public void testValidOrdinals() {
         assertThat(MultiValueMode.SUM.ordinal(), equalTo(0));
         assertThat(MultiValueMode.AVG.ordinal(), equalTo(1));

From 9bb1fbe2d615602971cb786d06ff80ba377d1c7f Mon Sep 17 00:00:00 2001
From: Andriy Redko <andriy.redko@aiven.io>
Date: Fri, 10 Jan 2025 12:39:17 -0500
Subject: [PATCH 34/61] Update Gradle to 8.12 (#16884)

Signed-off-by: Andriy Redko <andriy.redko@aiven.io>
---
 build.gradle                                  |  8 +--
 buildSrc/build.gradle                         |  2 +-
 .../org/opensearch/gradle/NoticeTask.groovy   | 16 +++--
 .../gradle/plugin/PluginBuildPlugin.groovy    |  4 +-
 .../precommit/LicenseHeadersTask.groovy       | 11 +++-
 .../opensearch/gradle/test/AntFixture.groovy  | 11 +++-
 .../org/opensearch/gradle/EmptyDirTask.java   |  9 ++-
 .../ExportOpenSearchBuildResourcesTask.java   |  8 ++-
 .../org/opensearch/gradle/LoggedExec.java     |  6 +-
 .../gradle/docker/DockerBuildTask.java        | 13 ++--
 .../precommit/DependencyLicensesTask.java     | 17 ++++-
 .../gradle/precommit/FilePermissionsTask.java | 15 +++--
 .../precommit/ForbiddenPatternsTask.java      | 17 +++--
 .../gradle/precommit/JarHellTask.java         | 11 +++-
 .../gradle/precommit/LoggerUsageTask.java     | 15 +++--
 .../gradle/precommit/PomValidationTask.java   | 11 +++-
 .../gradle/precommit/PrecommitTask.java       | 11 +++-
 .../precommit/TestingConventionsTasks.java    | 66 +++++++++----------
 .../gradle/precommit/ThirdPartyAuditTask.java | 45 ++++++++-----
 .../test/ErrorReportingTestListener.java      |  4 ++
 .../gradle/test/GradleDistroTestTask.java     | 18 +++--
 .../gradle/test/RestIntegTestTask.java        | 13 +++-
 .../gradle/test/RestTestBasePlugin.java       |  2 +-
 .../org/opensearch/gradle/test/TestTask.java  | 12 +++-
 .../gradle/test/rest/CopyRestApiTask.java     | 17 +++--
 .../gradle/test/rest/CopyRestTestsTask.java   | 15 +++--
 .../StandaloneRestIntegTestTask.java          |  8 ++-
 .../testclusters/TestClustersAware.java       |  8 ++-
 .../testfixtures/TestFixturesPlugin.java      |  2 +-
 .../gradle/vagrant/VagrantShellTask.java      | 20 ++++--
 .../build.gradle                              |  6 +-
 distribution/build.gradle                     |  2 +-
 distribution/docker/build.gradle              |  6 +-
 distribution/packages/build.gradle            | 54 +++++++--------
 doc-tools/build.gradle                        |  4 +-
 doc-tools/missing-doclet/build.gradle         |  4 +-
 gradle/ide.gradle                             |  2 +-
 gradle/missing-javadoc.gradle                 | 13 +++-
 gradle/wrapper/gradle-wrapper.properties      |  4 +-
 libs/common/build.gradle                      |  2 +-
 modules/aggs-matrix-stats/build.gradle        |  4 +-
 modules/analysis-common/build.gradle          |  4 +-
 modules/build.gradle                          |  2 +-
 modules/cache-common/build.gradle             |  4 +-
 modules/geo/build.gradle                      |  4 +-
 modules/ingest-common/build.gradle            |  4 +-
 modules/ingest-geoip/build.gradle             |  4 +-
 modules/ingest-user-agent/build.gradle        |  4 +-
 modules/lang-expression/build.gradle          |  4 +-
 modules/lang-mustache/build.gradle            |  4 +-
 modules/lang-painless/build.gradle            |  4 +-
 modules/mapper-extras/build.gradle            |  4 +-
 modules/opensearch-dashboards/build.gradle    |  4 +-
 modules/parent-join/build.gradle              |  4 +-
 modules/percolator/build.gradle               |  4 +-
 modules/rank-eval/build.gradle                |  4 +-
 modules/reindex/build.gradle                  |  4 +-
 modules/repository-url/build.gradle           |  6 +-
 modules/search-pipeline-common/build.gradle   |  4 +-
 modules/systemd/build.gradle                  |  4 +-
 modules/transport-netty4/build.gradle         |  4 +-
 plugins/analysis-icu/build.gradle             |  4 +-
 plugins/analysis-kuromoji/build.gradle        |  4 +-
 plugins/analysis-nori/build.gradle            |  4 +-
 plugins/analysis-phonenumber/build.gradle     |  4 +-
 plugins/analysis-phonetic/build.gradle        |  4 +-
 plugins/analysis-smartcn/build.gradle         |  4 +-
 plugins/analysis-stempel/build.gradle         |  4 +-
 plugins/analysis-ukrainian/build.gradle       |  4 +-
 plugins/build.gradle                          |  6 +-
 plugins/cache-ehcache/build.gradle            |  4 +-
 plugins/crypto-kms/build.gradle               |  4 +-
 plugins/discovery-azure-classic/build.gradle  |  4 +-
 plugins/discovery-ec2/build.gradle            |  4 +-
 .../discovery-ec2/qa/amazon-ec2/build.gradle  |  6 +-
 plugins/discovery-gce/build.gradle            |  7 +-
 plugins/discovery-gce/qa/gce/build.gradle     |  4 +-
 plugins/examples/custom-settings/build.gradle | 10 +--
 .../build.gradle                              | 10 +--
 .../examples/custom-suggester/build.gradle    | 10 +--
 .../examples/painless-allowlist/build.gradle  | 10 +--
 plugins/examples/rescore/build.gradle         | 10 +--
 plugins/examples/rest-handler/build.gradle    | 12 ++--
 .../script-expert-scoring/build.gradle        | 10 +--
 plugins/identity-shiro/build.gradle           | 10 +--
 plugins/ingest-attachment/build.gradle        |  4 +-
 plugins/mapper-annotated-text/build.gradle    |  4 +-
 plugins/mapper-murmur3/build.gradle           |  4 +-
 plugins/mapper-size/build.gradle              |  4 +-
 plugins/repository-azure/build.gradle         |  4 +-
 plugins/repository-gcs/build.gradle           |  4 +-
 plugins/repository-hdfs/build.gradle          | 14 ++--
 plugins/repository-s3/build.gradle            |  4 +-
 plugins/store-smb/build.gradle                |  4 +-
 plugins/telemetry-otel/build.gradle           |  4 +-
 plugins/transport-grpc/build.gradle           |  4 +-
 plugins/transport-nio/build.gradle            |  4 +-
 plugins/transport-reactor-netty4/build.gradle |  4 +-
 plugins/workload-management/build.gradle      |  4 +-
 qa/die-with-dignity/build.gradle              |  4 +-
 qa/full-cluster-restart/build.gradle          |  4 +-
 qa/mixed-cluster/build.gradle                 |  2 +-
 qa/multi-cluster-search/build.gradle          |  2 +-
 qa/remote-clusters/build.gradle               |  2 +-
 qa/repository-multi-version/build.gradle      |  8 +--
 qa/rolling-upgrade/build.gradle               |  8 +--
 qa/smoke-test-multinode/build.gradle          |  2 +-
 qa/verify-version-constants/build.gradle      |  2 +-
 sandbox/plugins/build.gradle                  |  6 +-
 server/build.gradle                           |  2 +-
 test/external-modules/build.gradle            |  6 +-
 .../delayed-aggs/build.gradle                 |  4 +-
 test/fixtures/azure-fixture/build.gradle      |  2 +-
 test/fixtures/gcs-fixture/build.gradle        |  2 +-
 test/fixtures/s3-fixture/build.gradle         |  2 +-
 115 files changed, 521 insertions(+), 352 deletions(-)

diff --git a/build.gradle b/build.gradle
index f720b46bec143..679f7b9299248 100644
--- a/build.gradle
+++ b/build.gradle
@@ -127,8 +127,8 @@ subprojects {
           name = 'Snapshots'
           url = 'https://aws.oss.sonatype.org/content/repositories/snapshots'
           credentials {
-            username "$System.env.SONATYPE_USERNAME"
-            password "$System.env.SONATYPE_PASSWORD"
+            username = "$System.env.SONATYPE_USERNAME"
+            password = "$System.env.SONATYPE_PASSWORD"
           }
         }
       }
@@ -420,7 +420,7 @@ allprojects {
 gradle.projectsEvaluated {
   allprojects {
     project.tasks.withType(JavaForkOptions) {
-      maxHeapSize project.property('options.forkOptions.memoryMaximumSize')
+      maxHeapSize = project.property('options.forkOptions.memoryMaximumSize')
     }
 
     if (project.path == ':test:framework') {
@@ -736,7 +736,7 @@ tasks.named(JavaBasePlugin.CHECK_TASK_NAME) {
 }
 
 tasks.register('checkCompatibility', CheckCompatibilityTask) {
-  description('Checks the compatibility with child components')
+  description = 'Checks the compatibility with child components'
 }
 
 allprojects { project ->
diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle
index c62f20e106e8c..f7fc0d7760993 100644
--- a/buildSrc/build.gradle
+++ b/buildSrc/build.gradle
@@ -106,7 +106,7 @@ dependencies {
   api "org.apache.commons:commons-compress:${props.getProperty('commonscompress')}"
   api 'org.apache.ant:ant:1.10.14'
   api 'com.netflix.nebula:gradle-extra-configurations-plugin:10.0.0'
-  api 'com.netflix.nebula:nebula-publishing-plugin:21.0.0'
+  api 'com.netflix.nebula:nebula-publishing-plugin:21.1.0'
   api 'com.netflix.nebula:gradle-info-plugin:12.1.6'
   api 'org.apache.rat:apache-rat:0.15'
   api "commons-io:commons-io:${props.getProperty('commonsio')}"
diff --git a/buildSrc/src/main/groovy/org/opensearch/gradle/NoticeTask.groovy b/buildSrc/src/main/groovy/org/opensearch/gradle/NoticeTask.groovy
index 7b3a0fc01ab65..6a7a011d08dc4 100644
--- a/buildSrc/src/main/groovy/org/opensearch/gradle/NoticeTask.groovy
+++ b/buildSrc/src/main/groovy/org/opensearch/gradle/NoticeTask.groovy
@@ -30,6 +30,7 @@
 package org.opensearch.gradle
 
 import org.gradle.api.DefaultTask
+import org.gradle.api.Project
 import org.gradle.api.file.FileCollection
 import org.gradle.api.file.FileTree
 import org.gradle.api.file.SourceDirectorySet
@@ -39,6 +40,8 @@ import org.gradle.api.tasks.Optional
 import org.gradle.api.tasks.OutputFile
 import org.gradle.api.tasks.TaskAction
 
+import javax.inject.Inject
+
 import java.nio.file.Files
 import java.nio.file.attribute.PosixFilePermissions
 
@@ -58,8 +61,12 @@ class NoticeTask extends DefaultTask {
     /** Directories to include notices from */
     private List<File> licensesDirs = new ArrayList<>()
 
-    NoticeTask() {
-        description = 'Create a notice file from dependencies'
+    private final Project project
+
+    @Inject
+    NoticeTask(Project project) {
+        this.project = project
+        this.description = 'Create a notice file from dependencies'
         // Default licenses directory is ${projectDir}/licenses (if it exists)
         File licensesDir = new File(project.projectDir, 'licenses')
         if (licensesDir.exists()) {
@@ -161,11 +168,12 @@ class NoticeTask extends DefaultTask {
     @Optional
     FileCollection getNoticeFiles() {
         FileTree tree
+        def p = project
         licensesDirs.each { dir ->
             if (tree == null) {
-                tree = project.fileTree(dir)
+                tree = p.fileTree(dir)
             } else {
-                tree += project.fileTree(dir)
+                tree += p.fileTree(dir)
             }
         }
 
diff --git a/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy
index 13f5f8724c6f2..ad4bdb3258fcc 100644
--- a/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy
+++ b/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy
@@ -160,14 +160,14 @@ class PluginBuildPlugin implements Plugin<Project> {
                 archiveBaseName = archiveBaseName.get() +  "-client"
             }
             // always configure publishing for client jars
-            project.publishing.publications.nebula(MavenPublication).artifactId(extension.name + "-client")
+            project.publishing.publications.nebula(MavenPublication).artifactId = extension.name + "-client"
             final BasePluginExtension base = project.getExtensions().findByType(BasePluginExtension.class)
             project.tasks.withType(GenerateMavenPom.class).configureEach { GenerateMavenPom generatePOMTask ->
                 generatePOMTask.destination = "${project.buildDir}/distributions/${base.archivesName}-client-${project.versions.opensearch}.pom"
             }
         } else {
             if (project.plugins.hasPlugin(MavenPublishPlugin)) {
-                project.publishing.publications.nebula(MavenPublication).artifactId(extension.name)
+                project.publishing.publications.nebula(MavenPublication).artifactId = extension.name
             }
         }
     }
diff --git a/buildSrc/src/main/groovy/org/opensearch/gradle/precommit/LicenseHeadersTask.groovy b/buildSrc/src/main/groovy/org/opensearch/gradle/precommit/LicenseHeadersTask.groovy
index b8d0ed2b9c43c..e3f7469b527c8 100644
--- a/buildSrc/src/main/groovy/org/opensearch/gradle/precommit/LicenseHeadersTask.groovy
+++ b/buildSrc/src/main/groovy/org/opensearch/gradle/precommit/LicenseHeadersTask.groovy
@@ -32,6 +32,7 @@ import org.apache.rat.anttasks.Report
 import org.apache.rat.anttasks.SubstringLicenseMatcher
 import org.apache.rat.license.SimpleLicenseFamily
 import org.opensearch.gradle.AntTask
+import org.gradle.api.Project
 import org.gradle.api.file.FileCollection
 import org.gradle.api.tasks.Input
 import org.gradle.api.tasks.InputFiles
@@ -41,6 +42,8 @@ import org.gradle.api.tasks.PathSensitive
 import org.gradle.api.tasks.PathSensitivity
 import org.gradle.api.tasks.SkipWhenEmpty
 
+import javax.inject.Inject
+
 import java.nio.file.Files
 
 /**
@@ -65,14 +68,18 @@ class LicenseHeadersTask extends AntTask {
     @Input
     List<String> excludes = []
 
+    private final Project project
+
     /**
      * Additional license families that may be found. The key is the license category name (5 characters),
      * followed by the family name and the value list of patterns to search for.
      */
     protected Map<String, String> additionalLicenses = new HashMap<>()
 
-    LicenseHeadersTask() {
-        description = "Checks sources for missing, incorrect, or unacceptable license headers"
+    @Inject
+    LicenseHeadersTask(Project project) {
+        this.project = project
+        this.description = "Checks sources for missing, incorrect, or unacceptable license headers"
     }
 
     /**
diff --git a/buildSrc/src/main/groovy/org/opensearch/gradle/test/AntFixture.groovy b/buildSrc/src/main/groovy/org/opensearch/gradle/test/AntFixture.groovy
index 316db8aa01764..42db92fd83515 100644
--- a/buildSrc/src/main/groovy/org/opensearch/gradle/test/AntFixture.groovy
+++ b/buildSrc/src/main/groovy/org/opensearch/gradle/test/AntFixture.groovy
@@ -30,12 +30,16 @@
 package org.opensearch.gradle.test
 
 import org.apache.tools.ant.taskdefs.condition.Os
+import org.gradle.api.Project
 import org.gradle.api.GradleException
 import org.gradle.api.tasks.Exec
 import org.gradle.api.tasks.Internal
 import org.gradle.api.tasks.TaskProvider
 import org.opensearch.gradle.AntTask
 import org.opensearch.gradle.LoggedExec
+
+import javax.inject.Inject
+
 /**
  * A fixture for integration tests which runs in a separate process launched by Ant.
  */
@@ -90,9 +94,12 @@ class AntFixture extends AntTask implements Fixture {
     }
 
     private final TaskProvider stopTask
+    private final Project project
 
-    AntFixture() {
-        stopTask = createStopTask()
+    @Inject
+    AntFixture(Project project) {
+        this.project = project
+        this.stopTask = createStopTask()
         finalizedBy(stopTask)
     }
 
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/EmptyDirTask.java b/buildSrc/src/main/java/org/opensearch/gradle/EmptyDirTask.java
index 96d7c69699c68..36aa1f99aa894 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/EmptyDirTask.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/EmptyDirTask.java
@@ -32,6 +32,7 @@
 package org.opensearch.gradle;
 
 import org.gradle.api.DefaultTask;
+import org.gradle.api.Project;
 import org.gradle.api.tasks.Input;
 import org.gradle.api.tasks.Internal;
 import org.gradle.api.tasks.TaskAction;
@@ -48,6 +49,12 @@ public class EmptyDirTask extends DefaultTask {
 
     private File dir;
     private int dirMode = 0755;
+    private final Project project;
+
+    @Inject
+    public EmptyDirTask(Project project) {
+        this.project = project;
+    }
 
     /**
      * Creates an empty directory with the configured permissions.
@@ -84,7 +91,7 @@ public void setDir(File dir) {
      * @param dir The path of the directory to create. Takes a String and coerces it to a file.
      */
     public void setDir(String dir) {
-        this.dir = getProject().file(dir);
+        this.dir = project.file(dir);
     }
 
     @Input
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/ExportOpenSearchBuildResourcesTask.java b/buildSrc/src/main/java/org/opensearch/gradle/ExportOpenSearchBuildResourcesTask.java
index d00e790c94fcc..072b6fa788cbd 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/ExportOpenSearchBuildResourcesTask.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/ExportOpenSearchBuildResourcesTask.java
@@ -33,6 +33,7 @@
 
 import org.gradle.api.DefaultTask;
 import org.gradle.api.GradleException;
+import org.gradle.api.Project;
 import org.gradle.api.file.DirectoryProperty;
 import org.gradle.api.logging.Logger;
 import org.gradle.api.logging.Logging;
@@ -42,6 +43,8 @@
 import org.gradle.api.tasks.StopExecutionException;
 import org.gradle.api.tasks.TaskAction;
 
+import javax.inject.Inject;
+
 import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
@@ -67,8 +70,9 @@ public class ExportOpenSearchBuildResourcesTask extends DefaultTask {
 
     private DirectoryProperty outputDir;
 
-    public ExportOpenSearchBuildResourcesTask() {
-        outputDir = getProject().getObjects().directoryProperty();
+    @Inject
+    public ExportOpenSearchBuildResourcesTask(Project project) {
+        outputDir = project.getObjects().directoryProperty();
     }
 
     @OutputDirectory
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/LoggedExec.java b/buildSrc/src/main/java/org/opensearch/gradle/LoggedExec.java
index 4c62f4a6b4ee8..3557ef6ef3df7 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/LoggedExec.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/LoggedExec.java
@@ -70,6 +70,7 @@ public class LoggedExec extends Exec implements FileSystemOperationsAware {
     private static final Logger LOGGER = Logging.getLogger(LoggedExec.class);
     private Consumer<Logger> outputLogger;
     private FileSystemOperations fileSystemOperations;
+    private final Project project;
 
     interface InjectedExecOps {
         @Inject
@@ -77,8 +78,9 @@ interface InjectedExecOps {
     }
 
     @Inject
-    public LoggedExec(FileSystemOperations fileSystemOperations) {
+    public LoggedExec(FileSystemOperations fileSystemOperations, Project project) {
         this.fileSystemOperations = fileSystemOperations;
+        this.project = project;
         if (getLogger().isInfoEnabled() == false) {
             setIgnoreExitValue(true);
             setSpoolOutput(false);
@@ -111,7 +113,7 @@ public void execute(Task task) {
     public void setSpoolOutput(boolean spoolOutput) {
         final OutputStream out;
         if (spoolOutput) {
-            File spoolFile = new File(getProject().getBuildDir() + "/buffered-output/" + this.getName());
+            File spoolFile = new File(project.getBuildDir() + "/buffered-output/" + this.getName());
             out = new LazyFileOutputStream(spoolFile);
             outputLogger = logger -> {
                 try {
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerBuildTask.java b/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerBuildTask.java
index 08f0e7488a43c..94a8592d9bc2f 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerBuildTask.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerBuildTask.java
@@ -34,6 +34,7 @@
 import org.opensearch.gradle.LoggedExec;
 import org.gradle.api.DefaultTask;
 import org.gradle.api.GradleException;
+import org.gradle.api.Project;
 import org.gradle.api.file.DirectoryProperty;
 import org.gradle.api.file.RegularFileProperty;
 import org.gradle.api.logging.Logger;
@@ -60,18 +61,22 @@ public class DockerBuildTask extends DefaultTask {
     private static final Logger LOGGER = Logging.getLogger(DockerBuildTask.class);
 
     private final WorkerExecutor workerExecutor;
-    private final RegularFileProperty markerFile = getProject().getObjects().fileProperty();
-    private final DirectoryProperty dockerContext = getProject().getObjects().directoryProperty();
+    private final RegularFileProperty markerFile;
+    private final DirectoryProperty dockerContext;
 
     private String[] tags;
     private boolean pull = true;
     private boolean noCache = true;
     private String[] baseImages;
+    private final Project project;
 
     @Inject
-    public DockerBuildTask(WorkerExecutor workerExecutor) {
+    public DockerBuildTask(WorkerExecutor workerExecutor, Project project) {
         this.workerExecutor = workerExecutor;
-        this.markerFile.set(getProject().getLayout().getBuildDirectory().file("markers/" + this.getName() + ".marker"));
+        this.project = project;
+        this.markerFile = project.getObjects().fileProperty();
+        this.dockerContext = project.getObjects().directoryProperty();
+        this.markerFile.set(project.getLayout().getBuildDirectory().file("markers/" + this.getName() + ".marker"));
     }
 
     @TaskAction
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/DependencyLicensesTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/DependencyLicensesTask.java
index 7248e0bc14431..337ac5d62c3fd 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/DependencyLicensesTask.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/DependencyLicensesTask.java
@@ -36,6 +36,7 @@
 import org.gradle.api.DefaultTask;
 import org.gradle.api.GradleException;
 import org.gradle.api.InvalidUserDataException;
+import org.gradle.api.Project;
 import org.gradle.api.file.FileCollection;
 import org.gradle.api.logging.Logger;
 import org.gradle.api.logging.Logging;
@@ -48,6 +49,8 @@
 import org.gradle.api.tasks.OutputDirectory;
 import org.gradle.api.tasks.TaskAction;
 
+import javax.inject.Inject;
+
 import java.io.File;
 import java.io.IOException;
 import java.nio.charset.StandardCharsets;
@@ -127,7 +130,7 @@ public class DependencyLicensesTask extends DefaultTask {
     /**
      * The directory to find the license and sha files in.
      */
-    private File licensesDir = new File(getProject().getProjectDir(), "licenses");
+    private File licensesDir;
 
     /**
      * A map of patterns to prefix, used to find the LICENSE and NOTICE file.
@@ -139,6 +142,14 @@ public class DependencyLicensesTask extends DefaultTask {
      */
     private Set<String> ignoreShas = new HashSet<>();
 
+    private final Project project;
+
+    @Inject
+    public DependencyLicensesTask(Project project) {
+        this.project = project;
+        this.licensesDir = new File(project.getProjectDir(), "licenses");
+    }
+
     /**
      * Add a mapping from a regex pattern for the jar name, to a prefix to find
      * the LICENSE and NOTICE file for that jar.
@@ -161,7 +172,7 @@ public void mapping(Map<String, String> props) {
     @InputFiles
     public Property<FileCollection> getDependencies() {
         if (dependenciesProvider == null) {
-            dependenciesProvider = getProject().getObjects().property(FileCollection.class);
+            dependenciesProvider = project.getObjects().property(FileCollection.class);
         }
         return dependenciesProvider;
     }
@@ -250,7 +261,7 @@ public void checkDependencies() throws IOException, NoSuchAlgorithmException {
     // by this output but when successful we can safely mark the task as up-to-date.
     @OutputDirectory
     public File getOutputMarker() {
-        return new File(getProject().getBuildDir(), "dependencyLicense");
+        return new File(project.getBuildDir(), "dependencyLicense");
     }
 
     private void failIfAnyMissing(String item, Boolean exists, String type) {
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/FilePermissionsTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/FilePermissionsTask.java
index 2c17666d8ee0c..0e5276bfdf033 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/FilePermissionsTask.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/FilePermissionsTask.java
@@ -35,6 +35,7 @@
 import org.opensearch.gradle.util.GradleUtils;
 import org.gradle.api.DefaultTask;
 import org.gradle.api.GradleException;
+import org.gradle.api.Project;
 import org.gradle.api.file.FileCollection;
 import org.gradle.api.file.FileTree;
 import org.gradle.api.tasks.IgnoreEmptyDirectories;
@@ -48,6 +49,8 @@
 import org.gradle.api.tasks.util.PatternFilterable;
 import org.gradle.api.tasks.util.PatternSet;
 
+import javax.inject.Inject;
+
 import java.io.File;
 import java.io.IOException;
 import java.nio.file.Files;
@@ -71,10 +74,14 @@ public class FilePermissionsTask extends DefaultTask {
         // exclude sh files that might have the executable bit set
         .exclude("**/*.sh");
 
-    private File outputMarker = new File(getProject().getBuildDir(), "markers/filePermissions");
+    private final File outputMarker;
+    private final Project project;
 
-    public FilePermissionsTask() {
+    @Inject
+    public FilePermissionsTask(Project project) {
         setDescription("Checks java source files for correct file permissions");
+        this.project = project;
+        this.outputMarker = new File(project.getBuildDir(), "markers/filePermissions");
     }
 
     private static boolean isExecutableFile(File file) {
@@ -98,11 +105,11 @@ private static boolean isExecutableFile(File file) {
     @IgnoreEmptyDirectories
     @PathSensitive(PathSensitivity.RELATIVE)
     public FileCollection getFiles() {
-        return GradleUtils.getJavaSourceSets(getProject())
+        return GradleUtils.getJavaSourceSets(project)
             .stream()
             .map(sourceSet -> sourceSet.getAllSource().matching(filesFilter))
             .reduce(FileTree::plus)
-            .orElse(getProject().files().getAsFileTree());
+            .orElse(project.files().getAsFileTree());
     }
 
     @TaskAction
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenPatternsTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenPatternsTask.java
index 6ef1e77f5138f..1790b32fb2f36 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenPatternsTask.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenPatternsTask.java
@@ -34,6 +34,7 @@
 import org.gradle.api.DefaultTask;
 import org.gradle.api.GradleException;
 import org.gradle.api.InvalidUserDataException;
+import org.gradle.api.Project;
 import org.gradle.api.file.FileCollection;
 import org.gradle.api.file.FileTree;
 import org.gradle.api.plugins.JavaPluginExtension;
@@ -48,6 +49,8 @@
 import org.gradle.api.tasks.util.PatternFilterable;
 import org.gradle.api.tasks.util.PatternSet;
 
+import javax.inject.Inject;
+
 import java.io.File;
 import java.io.IOException;
 import java.io.UncheckedIOException;
@@ -89,8 +92,10 @@ public class ForbiddenPatternsTask extends DefaultTask {
      * The rules: a map from the rule name, to a rule regex pattern.
      */
     private final Map<String, String> patterns = new HashMap<>();
+    private final Project project;
 
-    public ForbiddenPatternsTask() {
+    @Inject
+    public ForbiddenPatternsTask(Project project) {
         setDescription("Checks source files for invalid patterns like nocommits or tabs");
         getInputs().property("excludes", filesFilter.getExcludes());
         getInputs().property("rules", patterns);
@@ -99,6 +104,8 @@ public ForbiddenPatternsTask() {
         patterns.put("nocommit", "nocommit|NOCOMMIT");
         patterns.put("nocommit should be all lowercase or all uppercase", "((?i)nocommit)(?<!(nocommit|NOCOMMIT))");
         patterns.put("tab", "\t");
+
+        this.project = project;
     }
 
     @InputFiles
@@ -106,13 +113,13 @@ public ForbiddenPatternsTask() {
     @IgnoreEmptyDirectories
     @PathSensitive(PathSensitivity.RELATIVE)
     public FileCollection getFiles() {
-        return getProject().getExtensions()
+        return project.getExtensions()
             .getByType(JavaPluginExtension.class)
             .getSourceSets()
             .stream()
             .map(sourceSet -> sourceSet.getAllSource().matching(filesFilter))
             .reduce(FileTree::plus)
-            .orElse(getProject().files().getAsFileTree());
+            .orElse(project.files().getAsFileTree());
     }
 
     @TaskAction
@@ -131,7 +138,7 @@ public void checkInvalidPatterns() throws IOException {
                 .boxed()
                 .collect(Collectors.toList());
 
-            String path = getProject().getRootProject().getProjectDir().toURI().relativize(f.toURI()).toString();
+            String path = project.getRootProject().getProjectDir().toURI().relativize(f.toURI()).toString();
             failures.addAll(
                 invalidLines.stream()
                     .map(l -> new AbstractMap.SimpleEntry<>(l + 1, lines.get(l)))
@@ -155,7 +162,7 @@ public void checkInvalidPatterns() throws IOException {
 
     @OutputFile
     public File getOutputMarker() {
-        return new File(getProject().getBuildDir(), "markers/" + getName());
+        return new File(project.getBuildDir(), "markers/" + getName());
     }
 
     @Input
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/JarHellTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/JarHellTask.java
index 7726133562e9f..ebe0b25a3a685 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/JarHellTask.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/JarHellTask.java
@@ -33,11 +33,14 @@
 package org.opensearch.gradle.precommit;
 
 import org.opensearch.gradle.LoggedExec;
+import org.gradle.api.Project;
 import org.gradle.api.file.FileCollection;
 import org.gradle.api.tasks.CacheableTask;
 import org.gradle.api.tasks.CompileClasspath;
 import org.gradle.api.tasks.TaskAction;
 
+import javax.inject.Inject;
+
 import java.io.File;
 
 /**
@@ -47,14 +50,18 @@
 public class JarHellTask extends PrecommitTask {
 
     private FileCollection classpath;
+    private final Project project;
 
-    public JarHellTask() {
+    @Inject
+    public JarHellTask(Project project) {
+        super(project);
         setDescription("Runs CheckJarHell on the configured classpath");
+        this.project = project;
     }
 
     @TaskAction
     public void runJarHellCheck() {
-        LoggedExec.javaexec(getProject(), spec -> {
+        LoggedExec.javaexec(project, spec -> {
             spec.environment("CLASSPATH", getClasspath().getAsPath());
             spec.getMainClass().set("org.opensearch.bootstrap.JarHell");
         });
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/LoggerUsageTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/LoggerUsageTask.java
index db215fb65ef95..70acdcc26c212 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/LoggerUsageTask.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/LoggerUsageTask.java
@@ -33,6 +33,7 @@
 package org.opensearch.gradle.precommit;
 
 import org.opensearch.gradle.LoggedExec;
+import org.gradle.api.Project;
 import org.gradle.api.file.FileCollection;
 import org.gradle.api.plugins.JavaPluginExtension;
 import org.gradle.api.tasks.CacheableTask;
@@ -45,6 +46,8 @@
 import org.gradle.api.tasks.SourceSet;
 import org.gradle.api.tasks.TaskAction;
 
+import javax.inject.Inject;
+
 import java.io.File;
 
 /**
@@ -54,14 +57,18 @@
 public class LoggerUsageTask extends PrecommitTask {
 
     private FileCollection classpath;
+    private final Project project;
 
-    public LoggerUsageTask() {
+    @Inject
+    public LoggerUsageTask(Project project) {
+        super(project);
         setDescription("Runs LoggerUsageCheck on output directories of all source sets");
+        this.project = project;
     }
 
     @TaskAction
     public void runLoggerUsageTask() {
-        LoggedExec.javaexec(getProject(), spec -> {
+        LoggedExec.javaexec(project, spec -> {
             spec.getMainClass().set("org.opensearch.test.loggerusage.OpenSearchLoggerUsageChecker");
             spec.classpath(getClasspath());
             getClassDirectories().forEach(spec::args);
@@ -82,7 +89,7 @@ public void setClasspath(FileCollection classpath) {
     @SkipWhenEmpty
     @IgnoreEmptyDirectories
     public FileCollection getClassDirectories() {
-        return getProject().getExtensions()
+        return project.getExtensions()
             .getByType(JavaPluginExtension.class)
             .getSourceSets()
             .stream()
@@ -93,7 +100,7 @@ public FileCollection getClassDirectories() {
             )
             .map(sourceSet -> sourceSet.getOutput().getClassesDirs())
             .reduce(FileCollection::plus)
-            .orElse(getProject().files())
+            .orElse(project.files())
             .filter(File::exists);
     }
 
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/PomValidationTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/PomValidationTask.java
index b76e0d6dd93cf..f7dea88cb2e30 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/PomValidationTask.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/PomValidationTask.java
@@ -35,10 +35,13 @@
 import org.apache.maven.model.Model;
 import org.apache.maven.model.io.xpp3.MavenXpp3Reader;
 import org.gradle.api.GradleException;
+import org.gradle.api.Project;
 import org.gradle.api.file.RegularFileProperty;
 import org.gradle.api.tasks.InputFile;
 import org.gradle.api.tasks.TaskAction;
 
+import javax.inject.Inject;
+
 import java.io.FileReader;
 import java.util.Collection;
 import java.util.function.Consumer;
@@ -46,10 +49,16 @@
 
 public class PomValidationTask extends PrecommitTask {
 
-    private final RegularFileProperty pomFile = getProject().getObjects().fileProperty();
+    private final RegularFileProperty pomFile;
 
     private boolean foundError;
 
+    @Inject
+    public PomValidationTask(Project project) {
+        super(project);
+        this.pomFile = project.getObjects().fileProperty();
+    }
+
     @InputFile
     public RegularFileProperty getPomFile() {
         return pomFile;
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/PrecommitTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/PrecommitTask.java
index 52646206e4792..670614aa48087 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/PrecommitTask.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/PrecommitTask.java
@@ -32,19 +32,28 @@
 package org.opensearch.gradle.precommit;
 
 import org.gradle.api.DefaultTask;
+import org.gradle.api.Project;
 import org.gradle.api.tasks.OutputFile;
 import org.gradle.api.tasks.TaskAction;
 
+import javax.inject.Inject;
+
 import java.io.File;
 import java.io.IOException;
 import java.nio.file.Files;
 import java.nio.file.StandardOpenOption;
 
 public class PrecommitTask extends DefaultTask {
+    private final Project project;
+
+    @Inject
+    public PrecommitTask(Project project) {
+        this.project = project;
+    }
 
     @OutputFile
     public File getSuccessMarker() {
-        return new File(getProject().getBuildDir(), "markers/" + this.getName());
+        return new File(project.getBuildDir(), "markers/" + this.getName());
     }
 
     @TaskAction
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/TestingConventionsTasks.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/TestingConventionsTasks.java
index d66b1f9d25cdd..9c1285914a03e 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/TestingConventionsTasks.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/TestingConventionsTasks.java
@@ -38,6 +38,7 @@
 import org.opensearch.gradle.util.Util;
 import org.gradle.api.DefaultTask;
 import org.gradle.api.NamedDomainObjectContainer;
+import org.gradle.api.Project;
 import org.gradle.api.Task;
 import org.gradle.api.file.FileCollection;
 import org.gradle.api.file.FileTree;
@@ -85,12 +86,15 @@ public class TestingConventionsTasks extends DefaultTask {
     private Map<String, File> testClassNames;
 
     private final NamedDomainObjectContainer<TestingConventionRule> naming;
+    private final Project project;
 
-    public TestingConventionsTasks() {
+    @Inject
+    public TestingConventionsTasks(Project project) {
         setDescription("Tests various testing conventions");
         // Run only after everything is compiled
-        GradleUtils.getJavaSourceSets(getProject()).all(sourceSet -> dependsOn(sourceSet.getOutput().getClassesDirs()));
-        naming = getProject().container(TestingConventionRule.class);
+        GradleUtils.getJavaSourceSets(project).all(sourceSet -> dependsOn(sourceSet.getOutput().getClassesDirs()));
+        this.naming = project.container(TestingConventionRule.class);
+        this.project = project;
     }
 
     @Inject
@@ -100,38 +104,34 @@ protected Factory<PatternSet> getPatternSetFactory() {
 
     @Input
     public Map<String, Set<File>> getClassFilesPerEnabledTask() {
-        return getProject().getTasks()
-            .withType(Test.class)
-            .stream()
-            .filter(Task::getEnabled)
-            .collect(Collectors.toMap(Task::getPath, task -> {
-                // See please https://docs.gradle.org/8.1/userguide/upgrading_version_8.html#test_task_default_classpath
-                final JvmTestSuite jvmTestSuite = JvmTestSuiteHelper.getDefaultTestSuite(getProject()).orElse(null);
-                if (jvmTestSuite != null) {
-                    final PatternFilterable patternSet = getPatternSetFactory().create()
-                        .include(task.getIncludes())
-                        .exclude(task.getExcludes());
-
-                    final Set<File> files = jvmTestSuite.getSources()
-                        .getOutput()
-                        .getClassesDirs()
-                        .getAsFileTree()
-                        .matching(patternSet)
-                        .getFiles();
-
-                    if (!files.isEmpty()) {
-                        return files;
-                    }
+        return project.getTasks().withType(Test.class).stream().filter(Task::getEnabled).collect(Collectors.toMap(Task::getPath, task -> {
+            // See please https://docs.gradle.org/8.1/userguide/upgrading_version_8.html#test_task_default_classpath
+            final JvmTestSuite jvmTestSuite = JvmTestSuiteHelper.getDefaultTestSuite(project).orElse(null);
+            if (jvmTestSuite != null) {
+                final PatternFilterable patternSet = getPatternSetFactory().create()
+                    .include(task.getIncludes())
+                    .exclude(task.getExcludes());
+
+                final Set<File> files = jvmTestSuite.getSources()
+                    .getOutput()
+                    .getClassesDirs()
+                    .getAsFileTree()
+                    .matching(patternSet)
+                    .getFiles();
+
+                if (!files.isEmpty()) {
+                    return files;
                 }
+            }
 
-                return task.getCandidateClassFiles().getFiles();
-            }));
+            return task.getCandidateClassFiles().getFiles();
+        }));
     }
 
     @Input
     public Map<String, File> getTestClassNames() {
         if (testClassNames == null) {
-            testClassNames = Util.getJavaTestSourceSet(getProject())
+            testClassNames = Util.getJavaTestSourceSet(project)
                 .get()
                 .getOutput()
                 .getClassesDirs()
@@ -151,7 +151,7 @@ public NamedDomainObjectContainer<TestingConventionRule> getNaming() {
 
     @OutputFile
     public File getSuccessMarker() {
-        return new File(getProject().getBuildDir(), "markers/" + getName());
+        return new File(project.getBuildDir(), "markers/" + getName());
     }
 
     public void naming(Closure<?> action) {
@@ -160,7 +160,7 @@ public void naming(Closure<?> action) {
 
     @Input
     public Set<String> getMainClassNamedLikeTests() {
-        SourceSetContainer javaSourceSets = GradleUtils.getJavaSourceSets(getProject());
+        SourceSetContainer javaSourceSets = GradleUtils.getJavaSourceSets(project);
         if (javaSourceSets.findByName(SourceSet.MAIN_SOURCE_SET_NAME) == null) {
             // some test projects don't have a main source set
             return Collections.emptySet();
@@ -195,7 +195,7 @@ public void doCheck() throws IOException {
                 .stream()
                 .collect(Collectors.toMap(Map.Entry::getValue, entry -> loadClassWithoutInitializing(entry.getKey(), isolatedClassLoader)));
 
-            final FileTree allTestClassFiles = getProject().files(
+            final FileTree allTestClassFiles = project.files(
                 classes.values()
                     .stream()
                     .filter(isStaticClass.negate())
@@ -207,7 +207,7 @@ public void doCheck() throws IOException {
 
             final Map<String, Set<File>> classFilesPerTask = getClassFilesPerEnabledTask();
 
-            final Set<File> testSourceSetFiles = Util.getJavaTestSourceSet(getProject()).get().getRuntimeClasspath().getFiles();
+            final Set<File> testSourceSetFiles = Util.getJavaTestSourceSet(project).get().getRuntimeClasspath().getFiles();
             final Map<String, Set<Class<?>>> testClassesPerTask = classFilesPerTask.entrySet()
                 .stream()
                 .filter(entry -> testSourceSetFiles.containsAll(entry.getValue()))
@@ -398,7 +398,7 @@ private boolean isAnnotated(Method method, Class<?> annotation) {
 
     @Classpath
     public FileCollection getTestsClassPath() {
-        return Util.getJavaTestSourceSet(getProject()).get().getRuntimeClasspath();
+        return Util.getJavaTestSourceSet(project).get().getRuntimeClasspath();
     }
 
     private Map<String, File> walkPathAndLoadClasses(File testRoot) {
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java
index 6842f0e541abe..2ed801b7fb9c6 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java
@@ -40,6 +40,7 @@
 import org.opensearch.gradle.util.GradleUtils;
 import org.gradle.api.DefaultTask;
 import org.gradle.api.JavaVersion;
+import org.gradle.api.Project;
 import org.gradle.api.artifacts.Configuration;
 import org.gradle.api.artifacts.Dependency;
 import org.gradle.api.file.FileCollection;
@@ -107,7 +108,15 @@ public class ThirdPartyAuditTask extends DefaultTask {
 
     private FileCollection jdkJarHellClasspath;
 
-    private final Property<JavaVersion> targetCompatibility = getProject().getObjects().property(JavaVersion.class);
+    private final Project project;
+
+    private final Property<JavaVersion> targetCompatibility;
+
+    @Inject
+    public ThirdPartyAuditTask(Project project) {
+        this.project = project;
+        this.targetCompatibility = project.getObjects().property(JavaVersion.class);
+    }
 
     public boolean jarHellEnabled = true;
 
@@ -124,7 +133,7 @@ public Property<JavaVersion> getTargetCompatibility() {
     @InputFiles
     @PathSensitive(PathSensitivity.NAME_ONLY)
     public Configuration getForbiddenAPIsConfiguration() {
-        return getProject().getConfigurations().getByName("forbiddenApisCliJar");
+        return project.getConfigurations().getByName("forbiddenApisCliJar");
     }
 
     @InputFile
@@ -149,12 +158,12 @@ public void setJavaHome(String javaHome) {
 
     @Internal
     public File getJarExpandDir() {
-        return new File(new File(getProject().getBuildDir(), "precommit/thirdPartyAudit"), getName());
+        return new File(new File(project.getBuildDir(), "precommit/thirdPartyAudit"), getName());
     }
 
     @OutputFile
     public File getSuccessMarker() {
-        return new File(getProject().getBuildDir(), "markers/" + getName());
+        return new File(project.getBuildDir(), "markers/" + getName());
     }
 
     // We use compile classpath normalization here because class implementation changes are irrelevant for the purposes of jdk jar hell.
@@ -213,10 +222,10 @@ public Set<File> getJarsToScan() {
         // err on the side of scanning these to make sure we don't miss anything
         Spec<Dependency> reallyThirdParty = dep -> dep.getGroup() != null && dep.getGroup().startsWith("org.opensearch") == false;
 
-        Set<File> jars = GradleUtils.getFiles(getProject(), getRuntimeConfiguration(), reallyThirdParty).getFiles();
+        Set<File> jars = GradleUtils.getFiles(project, getRuntimeConfiguration(), reallyThirdParty).getFiles();
         Set<File> compileOnlyConfiguration = GradleUtils.getFiles(
-            getProject(),
-            getProject().getConfigurations().getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME),
+            project,
+            project.getConfigurations().getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME),
             reallyThirdParty
         ).getFiles();
         // don't scan provided dependencies that we already scanned, e.x. don't scan cores dependencies for every plugin
@@ -310,14 +319,14 @@ private Set<File> extractJars(Set<File> jars) {
         Set<File> extractedJars = new TreeSet<>();
         File jarExpandDir = getJarExpandDir();
         // We need to clean up to make sure old dependencies don't linger
-        getProject().delete(jarExpandDir);
+        project.delete(jarExpandDir);
 
         jars.forEach(jar -> {
             String jarPrefix = jar.getName().replace(".jar", "");
             File jarSubDir = new File(jarExpandDir, jarPrefix);
             extractedJars.add(jarSubDir);
-            FileTree jarFiles = getProject().zipTree(jar);
-            getProject().copy(spec -> {
+            FileTree jarFiles = project.zipTree(jar);
+            project.copy(spec -> {
                 spec.from(jarFiles);
                 spec.into(jarSubDir);
                 // exclude classes from multi release jars
@@ -336,8 +345,8 @@ private Set<File> extractJars(Set<File> jars) {
             IntStream.rangeClosed(
                 Integer.parseInt(JavaVersion.VERSION_1_9.getMajorVersion()),
                 Integer.parseInt(targetCompatibility.get().getMajorVersion())
-            ).forEach(majorVersion -> getProject().copy(spec -> {
-                spec.from(getProject().zipTree(jar));
+            ).forEach(majorVersion -> project.copy(spec -> {
+                spec.from(project.zipTree(jar));
                 spec.into(jarSubDir);
                 String metaInfPrefix = "META-INF/versions/" + majorVersion;
                 spec.include(metaInfPrefix + "/**");
@@ -376,7 +385,7 @@ private String formatClassList(Set<String> classList) {
 
     private String runForbiddenAPIsCli() throws IOException {
         ByteArrayOutputStream errorOut = new ByteArrayOutputStream();
-        InjectedExecOps execOps = getProject().getObjects().newInstance(InjectedExecOps.class);
+        InjectedExecOps execOps = project.getObjects().newInstance(InjectedExecOps.class);
         ExecResult result = execOps.getExecOps().javaexec(spec -> {
             if (javaHome != null) {
                 spec.setExecutable(javaHome + "/bin/java");
@@ -384,7 +393,7 @@ private String runForbiddenAPIsCli() throws IOException {
             spec.classpath(
                 getForbiddenAPIsConfiguration(),
                 getRuntimeConfiguration(),
-                getProject().getConfigurations().getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME)
+                project.getConfigurations().getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME)
             );
             spec.jvmArgs("-Xmx1g");
             spec.jvmArgs(LoggedExec.shortLivedArgs());
@@ -416,12 +425,12 @@ private String runForbiddenAPIsCli() throws IOException {
      */
     private Set<String> runJdkJarHellCheck(Set<File> jars) throws IOException {
         ByteArrayOutputStream standardOut = new ByteArrayOutputStream();
-        InjectedExecOps execOps = getProject().getObjects().newInstance(InjectedExecOps.class);
+        InjectedExecOps execOps = project.getObjects().newInstance(InjectedExecOps.class);
         ExecResult execResult = execOps.getExecOps().javaexec(spec -> {
             spec.classpath(
                 jdkJarHellClasspath,
                 getRuntimeConfiguration(),
-                getProject().getConfigurations().getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME)
+                project.getConfigurations().getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME)
             );
             spec.getMainClass().set(JDK_JAR_HELL_MAIN_CLASS);
             spec.args(jars);
@@ -442,9 +451,9 @@ private Set<String> runJdkJarHellCheck(Set<File> jars) throws IOException {
     }
 
     private Configuration getRuntimeConfiguration() {
-        Configuration runtime = getProject().getConfigurations().findByName("runtimeClasspath");
+        Configuration runtime = project.getConfigurations().findByName("runtimeClasspath");
         if (runtime == null) {
-            return getProject().getConfigurations().getByName("testCompileClasspath");
+            return project.getConfigurations().getByName("testCompileClasspath");
         }
         return runtime;
     }
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/ErrorReportingTestListener.java b/buildSrc/src/main/java/org/opensearch/gradle/test/ErrorReportingTestListener.java
index aff9198e15772..4bdc75457ba75 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/test/ErrorReportingTestListener.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/test/ErrorReportingTestListener.java
@@ -192,6 +192,10 @@ public Destination getDestination() {
                         public String getMessage() {
                             return message;
                         }
+
+                        public long getLogTime() {
+                            return System.currentTimeMillis();
+                        }
                     });
                 }
             }
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/GradleDistroTestTask.java b/buildSrc/src/main/java/org/opensearch/gradle/test/GradleDistroTestTask.java
index fa417da1a1007..caac3ede98588 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/test/GradleDistroTestTask.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/test/GradleDistroTestTask.java
@@ -34,9 +34,12 @@
 
 import org.opensearch.gradle.vagrant.VagrantMachine;
 import org.opensearch.gradle.vagrant.VagrantShellTask;
+import org.gradle.api.Project;
 import org.gradle.api.tasks.Input;
 import org.gradle.api.tasks.options.Option;
 
+import javax.inject.Inject;
+
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
@@ -49,6 +52,13 @@ public class GradleDistroTestTask extends VagrantShellTask {
     private String taskName;
     private String testClass;
     private List<String> extraArgs = new ArrayList<>();
+    private final Project project;
+
+    @Inject
+    public GradleDistroTestTask(Project project) {
+        super(project);
+        this.project = project;
+    }
 
     public void setTaskName(String taskName) {
         this.taskName = taskName;
@@ -84,17 +94,15 @@ protected List<String> getLinuxScript() {
     }
 
     private List<String> getScript(boolean isWindows) {
-        String cacheDir = getProject().getBuildDir() + "/gradle-cache";
+        String cacheDir = project.getBuildDir() + "/gradle-cache";
         StringBuilder line = new StringBuilder();
         line.append(isWindows ? "& .\\gradlew " : "./gradlew ");
         line.append(taskName);
         line.append(" --project-cache-dir ");
-        line.append(
-            isWindows ? VagrantMachine.convertWindowsPath(getProject(), cacheDir) : VagrantMachine.convertLinuxPath(getProject(), cacheDir)
-        );
+        line.append(isWindows ? VagrantMachine.convertWindowsPath(project, cacheDir) : VagrantMachine.convertLinuxPath(project, cacheDir));
         line.append(" -S");
         line.append(" --parallel");
-        line.append(" -D'org.gradle.logging.level'=" + getProject().getGradle().getStartParameter().getLogLevel());
+        line.append(" -D'org.gradle.logging.level'=" + project.getGradle().getStartParameter().getLogLevel());
         if (testClass != null) {
             line.append(" --tests=");
             line.append(testClass);
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/RestIntegTestTask.java b/buildSrc/src/main/java/org/opensearch/gradle/test/RestIntegTestTask.java
index aec31d02b9bee..474c04eabbcaf 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/test/RestIntegTestTask.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/test/RestIntegTestTask.java
@@ -35,9 +35,12 @@
 import groovy.lang.Closure;
 
 import org.opensearch.gradle.testclusters.StandaloneRestIntegTestTask;
+import org.gradle.api.Project;
 import org.gradle.api.Task;
 import org.gradle.api.tasks.CacheableTask;
 
+import javax.inject.Inject;
+
 /**
  * Sub typed version of {@link StandaloneRestIntegTestTask}  that is used to differentiate between plain standalone
  * integ test tasks based on {@link StandaloneRestIntegTestTask} and
@@ -45,11 +48,19 @@
  */
 @CacheableTask
 public abstract class RestIntegTestTask extends StandaloneRestIntegTestTask implements TestSuiteConventionMappings {
+    private final Project project;
+
+    @Inject
+    public RestIntegTestTask(Project project) {
+        super(project);
+        this.project = project;
+    }
+
     @SuppressWarnings("rawtypes")
     @Override
     public Task configure(Closure closure) {
         final Task t = super.configure(closure);
-        applyConventionMapping(getProject(), getConventionMapping());
+        applyConventionMapping(project, getConventionMapping());
         return t;
     }
 }
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/RestTestBasePlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/test/RestTestBasePlugin.java
index ce5210482c055..24c4a46abfe29 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/test/RestTestBasePlugin.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/test/RestTestBasePlugin.java
@@ -55,7 +55,7 @@ public void apply(Project project) {
                 .getExtensions()
                 .getByName(TestClustersPlugin.EXTENSION_NAME);
             OpenSearchCluster cluster = testClusters.maybeCreate(restIntegTestTask.getName());
-            restIntegTestTask.useCluster(cluster);
+            restIntegTestTask.useCluster(project, cluster);
             restIntegTestTask.include("**/*IT.class");
             restIntegTestTask.systemProperty("tests.rest.load_packaged", Boolean.FALSE.toString());
             if (System.getProperty(TESTS_REST_CLUSTER) == null) {
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/TestTask.java b/buildSrc/src/main/java/org/opensearch/gradle/test/TestTask.java
index f7511a2ac7f1c..abd40d2e0665a 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/test/TestTask.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/test/TestTask.java
@@ -10,17 +10,27 @@
 
 import groovy.lang.Closure;
 
+import org.gradle.api.Project;
 import org.gradle.api.Task;
 import org.gradle.api.tasks.CacheableTask;
 import org.gradle.api.tasks.testing.Test;
 
+import javax.inject.Inject;
+
 @CacheableTask
 public abstract class TestTask extends Test implements TestSuiteConventionMappings {
+    private final Project project;
+
+    @Inject
+    public TestTask(Project project) {
+        this.project = project;
+    }
+
     @SuppressWarnings("rawtypes")
     @Override
     public Task configure(Closure closure) {
         final Task t = super.configure(closure);
-        applyConventionMapping(getProject(), getConventionMapping());
+        applyConventionMapping(project, getConventionMapping());
         return t;
     }
 }
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestApiTask.java b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestApiTask.java
index 485561a305291..4d6be4beaccf8 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestApiTask.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestApiTask.java
@@ -74,16 +74,20 @@
  */
 public class CopyRestApiTask extends DefaultTask {
     private static final String REST_API_PREFIX = "rest-api-spec/api";
-    final ListProperty<String> includeCore = getProject().getObjects().listProperty(String.class);
+    final ListProperty<String> includeCore;
     String sourceSetName;
     boolean skipHasRestTestCheck;
     Configuration coreConfig;
     Configuration additionalConfig;
+    private final Project project;
 
     private final PatternFilterable corePatternSet;
 
-    public CopyRestApiTask() {
-        corePatternSet = getPatternSetFactory().create();
+    @Inject
+    public CopyRestApiTask(Project project) {
+        this.project = project;
+        this.corePatternSet = getPatternSetFactory().create();
+        this.includeCore = project.getObjects().listProperty(String.class);
     }
 
     @Inject
@@ -133,8 +137,8 @@ public FileTree getInputDir() {
         }
 
         ConfigurableFileCollection fileCollection = additionalConfig == null
-            ? getProject().files(coreFileTree)
-            : getProject().files(coreFileTree, additionalConfig.getAsFileTree());
+            ? project.files(coreFileTree)
+            : project.files(coreFileTree, additionalConfig.getAsFileTree());
 
         // if project has rest tests or the includes are explicitly configured execute the task, else NO-SOURCE due to the null input
         return projectHasYamlRestTests || includeCore.get().isEmpty() == false ? fileCollection.getAsFileTree() : null;
@@ -210,7 +214,7 @@ private boolean projectHasYamlRestTests() {
                     .anyMatch(p -> p.getFileName().toString().endsWith("yml"));
             }
         } catch (IOException e) {
-            throw new IllegalStateException(String.format("Error determining if this project [%s] has rest tests.", getProject()), e);
+            throw new IllegalStateException(String.format("Error determining if this project [%s] has rest tests.", project), e);
         }
         return false;
     }
@@ -240,7 +244,6 @@ private File getTestOutputResourceDir() {
     }
 
     private Optional<SourceSet> getSourceSet() {
-        Project project = getProject();
         return project.getExtensions().findByType(JavaPluginExtension.class) == null
             ? Optional.empty()
             : Optional.ofNullable(GradleUtils.getJavaSourceSets(project).findByName(getSourceSetName()));
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestTestsTask.java b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestTestsTask.java
index 0d5af7ca06b50..6f7c99889e3a2 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestTestsTask.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestTestsTask.java
@@ -71,16 +71,20 @@
  */
 public class CopyRestTestsTask extends DefaultTask {
     private static final String REST_TEST_PREFIX = "rest-api-spec/test";
-    final ListProperty<String> includeCore = getProject().getObjects().listProperty(String.class);
+    final ListProperty<String> includeCore;
 
     String sourceSetName;
     Configuration coreConfig;
     Configuration additionalConfig;
+    private final Project project;
 
     private final PatternFilterable corePatternSet;
 
-    public CopyRestTestsTask() {
-        corePatternSet = getPatternSetFactory().create();
+    @Inject
+    public CopyRestTestsTask(Project project) {
+        this.project = project;
+        this.corePatternSet = getPatternSetFactory().create();
+        this.includeCore = project.getObjects().listProperty(String.class);
     }
 
     @Inject
@@ -123,8 +127,8 @@ public FileTree getInputDir() {
             }
         }
         ConfigurableFileCollection fileCollection = additionalConfig == null
-            ? getProject().files(coreFileTree)
-            : getProject().files(coreFileTree, additionalConfig.getAsFileTree());
+            ? project.files(coreFileTree)
+            : project.files(coreFileTree, additionalConfig.getAsFileTree());
 
         // copy tests only if explicitly requested
         return includeCore.get().isEmpty() == false || additionalConfig != null ? fileCollection.getAsFileTree() : null;
@@ -178,7 +182,6 @@ void copy() {
     }
 
     private Optional<SourceSet> getSourceSet() {
-        Project project = getProject();
         return project.getExtensions().findByType(JavaPluginExtension.class) == null
             ? Optional.empty()
             : Optional.ofNullable(GradleUtils.getJavaSourceSets(project).findByName(getSourceSetName()));
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/StandaloneRestIntegTestTask.java b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/StandaloneRestIntegTestTask.java
index ddcbf77b0d5e6..5b883f8068825 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/StandaloneRestIntegTestTask.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/StandaloneRestIntegTestTask.java
@@ -36,6 +36,7 @@
 import org.opensearch.gradle.FileSystemOperationsAware;
 import org.opensearch.gradle.test.Fixture;
 import org.opensearch.gradle.util.GradleUtils;
+import org.gradle.api.Project;
 import org.gradle.api.Task;
 import org.gradle.api.provider.Provider;
 import org.gradle.api.services.internal.BuildServiceProvider;
@@ -48,6 +49,8 @@
 import org.gradle.internal.resources.ResourceLock;
 import org.gradle.internal.resources.SharedResource;
 
+import javax.inject.Inject;
+
 import java.lang.invoke.MethodHandles;
 import java.lang.invoke.MethodType;
 import java.util.ArrayList;
@@ -67,7 +70,8 @@ public abstract class StandaloneRestIntegTestTask extends Test implements TestCl
     private Collection<OpenSearchCluster> clusters = new HashSet<>();
     private Closure<Void> beforeStart;
 
-    public StandaloneRestIntegTestTask() {
+    @Inject
+    public StandaloneRestIntegTestTask(Project project) {
         this.getOutputs()
             .doNotCacheIf(
                 "Caching disabled for this task since it uses a cluster shared by other tasks",
@@ -77,7 +81,7 @@ public StandaloneRestIntegTestTask() {
                  * avoid any undesired behavior we simply disable the cache if we detect that this task uses a cluster shared between
                  * multiple tasks.
                  */
-                t -> getProject().getTasks()
+                t -> project.getTasks()
                     .withType(StandaloneRestIntegTestTask.class)
                     .stream()
                     .filter(task -> task != this)
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/TestClustersAware.java b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/TestClustersAware.java
index e5c413df00d0d..f2eeec08fc71f 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/TestClustersAware.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/TestClustersAware.java
@@ -31,6 +31,7 @@
 
 package org.opensearch.gradle.testclusters;
 
+import org.gradle.api.Project;
 import org.gradle.api.Task;
 import org.gradle.api.artifacts.Configuration;
 import org.gradle.api.tasks.Nested;
@@ -43,8 +44,13 @@ public interface TestClustersAware extends Task {
     @Nested
     Collection<OpenSearchCluster> getClusters();
 
+    @Deprecated(forRemoval = true)
     default void useCluster(OpenSearchCluster cluster) {
-        if (cluster.getPath().equals(getProject().getPath()) == false) {
+        useCluster(getProject(), cluster);
+    }
+
+    default void useCluster(Project project, OpenSearchCluster cluster) {
+        if (cluster.getPath().equals(project.getPath()) == false) {
             throw new TestClustersException("Task " + getPath() + " can't use test cluster from" + " another project " + cluster);
         }
 
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java
index 79b5f837c75ce..c3b870e4ce5ad 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java
@@ -249,7 +249,7 @@ private void configureServiceInfoForTask(
         task.doFirst(new Action<Task>() {
             @Override
             public void execute(Task theTask) {
-                TestFixtureExtension extension = theTask.getProject().getExtensions().getByType(TestFixtureExtension.class);
+                TestFixtureExtension extension = fixtureProject.getExtensions().getByType(TestFixtureExtension.class);
 
                 fixtureProject.getExtensions()
                     .getByType(ComposeExtension.class)
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantShellTask.java b/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantShellTask.java
index ca1b95183505f..665f690b8b146 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantShellTask.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantShellTask.java
@@ -33,9 +33,12 @@
 package org.opensearch.gradle.vagrant;
 
 import org.gradle.api.DefaultTask;
+import org.gradle.api.Project;
 import org.gradle.api.tasks.Input;
 import org.gradle.api.tasks.TaskAction;
 
+import javax.inject.Inject;
+
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
@@ -55,13 +58,16 @@ public abstract class VagrantShellTask extends DefaultTask {
     private final VagrantExtension extension;
     private final VagrantMachine service;
     private UnaryOperator<String> progressHandler = UnaryOperator.identity();
+    private final Project project;
 
-    public VagrantShellTask() {
-        extension = getProject().getExtensions().findByType(VagrantExtension.class);
-        if (extension == null) {
+    @Inject
+    public VagrantShellTask(Project project) {
+        this.project = project;
+        this.extension = project.getExtensions().findByType(VagrantExtension.class);
+        if (this.extension == null) {
             throw new IllegalStateException("opensearch.vagrant-base must be applied to create " + getClass().getName());
         }
-        service = getProject().getExtensions().getByType(VagrantMachine.class);
+        this.service = project.getExtensions().getByType(VagrantMachine.class);
     }
 
     @Input
@@ -81,14 +87,14 @@ public void setProgressHandler(UnaryOperator<String> progressHandler) {
 
     @TaskAction
     public void runScript() {
-        String rootDir = getProject().getRootDir().toString();
+        String rootDir = project.getRootDir().toString();
         if (extension.isWindowsVM()) {
             service.execute(spec -> {
                 spec.setCommand("winrm");
 
                 List<String> script = new ArrayList<>();
                 script.add("try {");
-                script.add("cd " + convertWindowsPath(getProject(), rootDir));
+                script.add("cd " + convertWindowsPath(project, rootDir));
                 extension.getVmEnv().forEach((k, v) -> script.add("$Env:" + k + " = \"" + v + "\""));
                 script.addAll(getWindowsScript().stream().map(s -> "    " + s).collect(Collectors.toList()));
                 script.addAll(
@@ -111,7 +117,7 @@ public void runScript() {
                 List<String> script = new ArrayList<>();
                 script.add("sudo bash -c '"); // start inline bash script
                 script.add("pwd");
-                script.add("cd " + convertLinuxPath(getProject(), rootDir));
+                script.add("cd " + convertLinuxPath(project, rootDir));
                 extension.getVmEnv().forEach((k, v) -> script.add("export " + k + "=" + v));
                 script.addAll(getLinuxScript());
                 script.add("'"); // end inline bash script
diff --git a/client/client-benchmark-noop-api-plugin/build.gradle b/client/client-benchmark-noop-api-plugin/build.gradle
index 8e4f40c096851..feec78547edb6 100644
--- a/client/client-benchmark-noop-api-plugin/build.gradle
+++ b/client/client-benchmark-noop-api-plugin/build.gradle
@@ -33,9 +33,9 @@ group = 'org.opensearch.plugin'
 apply plugin: 'opensearch.opensearchplugin'
 
 opensearchplugin {
-  name 'client-benchmark-noop-api'
-  description 'Stubbed out OpenSearch actions that can be used for client-side benchmarking'
-  classname 'org.opensearch.plugin.noop.NoopPlugin'
+  name = 'client-benchmark-noop-api'
+  description = 'Stubbed out OpenSearch actions that can be used for client-side benchmarking'
+  classname = 'org.opensearch.plugin.noop.NoopPlugin'
 }
 
 // Not published so no need to assemble
diff --git a/distribution/build.gradle b/distribution/build.gradle
index 36efe2e0d45e8..b04b04062134f 100644
--- a/distribution/build.gradle
+++ b/distribution/build.gradle
@@ -150,7 +150,7 @@ void copyModule(TaskProvider<Sync> copyTask, Project module) {
 
     dependsOn moduleConfig
     from({ zipTree(moduleConfig.singleFile) }) {
-      includeEmptyDirs false
+      includeEmptyDirs = false
 
       // these are handled separately in the log4j config tasks below
       exclude '*/config/log4j2.properties'
diff --git a/distribution/docker/build.gradle b/distribution/docker/build.gradle
index 64471139e025b..d2b99ab051327 100644
--- a/distribution/docker/build.gradle
+++ b/distribution/docker/build.gradle
@@ -178,7 +178,7 @@ tasks.named("preProcessFixture").configure {
   }
   doLast {
     // tests expect to have an empty repo
-    project.delete(
+    delete(
       "${buildDir}/repo"
     )
     createAndSetWritable(
@@ -273,8 +273,8 @@ subprojects { Project subProject ->
     }
 
     artifacts.add('default', file(tarFile)) {
-      type 'tar'
-      name artifactName
+      type = 'tar'
+      name = artifactName
       builtBy exportTaskName
     }
 
diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle
index e1fa4de5a0caa..ada19dfa38e78 100644
--- a/distribution/packages/build.gradle
+++ b/distribution/packages/build.gradle
@@ -111,21 +111,21 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) {
       OS.current().equals(OS.WINDOWS) == false
     }
     dependsOn "process'${jdk ? '' : 'NoJdk'}${type.capitalize()}Files"
-    packageName "opensearch"
+    packageName = "opensearch"
     if (type == 'deb') {
       if (architecture == 'x64') {
-        arch('amd64')
+        arch = 'amd64'
       } else {
         assert architecture == 'arm64' : architecture
-        arch('arm64')
+        arch = 'arm64'
       }
     } else {
       assert type == 'rpm' : type
       if (architecture == 'x64') {
-        arch('x86_64')
+        arch = 'x86_64'
       } else {
         assert architecture == 'arm64' : architecture
-        arch('aarch64')
+        arch = 'aarch64'
       }
     }
     // Follow opensearch's file naming convention
@@ -224,8 +224,8 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) {
       }
       into('/etc')
       permissionGroup 'opensearch'
-      includeEmptyDirs true
-      createDirectoryEntry true
+      includeEmptyDirs = true
+      createDirectoryEntry = true
       include("opensearch") // empty dir, just to add directory entry
       include("opensearch/jvm.options.d") // empty dir, just to add directory entry
     }
@@ -238,8 +238,8 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) {
         unix 0660
       }
       permissionGroup 'opensearch'
-      includeEmptyDirs true
-      createDirectoryEntry true
+      includeEmptyDirs = true
+      createDirectoryEntry = true
       fileType CONFIG | NOREPLACE
     }
     String envFile = expansionsForDistribution(type, jdk)['path.env']
@@ -298,8 +298,8 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) {
       into(file.parent) {
         from "${packagingFiles}/${file.parent}"
         include file.name
-        includeEmptyDirs true
-        createDirectoryEntry true
+        includeEmptyDirs = true
+        createDirectoryEntry = true
         user u
         permissionGroup g
         dirPermissions {
@@ -320,13 +320,13 @@ apply plugin: 'com.netflix.nebula.ospackage-base'
 
 // this is package indepdendent configuration
 ospackage {
-  maintainer 'OpenSearch Team <opensearch@amazon.com>'
-  summary 'Distributed RESTful search engine built for the cloud'
-  packageDescription '''
+  maintainer ='OpenSearch Team <opensearch@amazon.com>'
+  summary = 'Distributed RESTful search engine built for the cloud'
+  packageDescription = '''
     Reference documentation can be found at
     https://github.com/opensearch-project/OpenSearch
   '''.stripIndent().trim()
-  url 'https://github.com/opensearch-project/OpenSearch'
+  url = 'https://github.com/opensearch-project/OpenSearch'
 
   // signing setup
   if (project.hasProperty('signing.password') && BuildParams.isSnapshotBuild() == false) {
@@ -340,10 +340,10 @@ ospackage {
   // version found on oldest supported distro, centos-6
   requires('coreutils', '8.4', GREATER | EQUAL)
 
-  fileMode 0644
-  dirMode 0755
-  user 'root'
-  permissionGroup 'root'
+  fileMode = 0644
+  dirMode = 0755
+  user = 'root'
+  permissionGroup = 'root'
 
   into '/usr/share/opensearch'
 }
@@ -357,7 +357,7 @@ Closure commonDebConfig(boolean jdk, String architecture) {
     customFields['License'] = 'ASL-2.0'
 
     archiveVersion = project.version.replace('-', '~')
-    packageGroup 'web'
+    packageGroup = 'web'
 
     // versions found on oldest supported distro, centos-6
     requires('bash', '4.1', GREATER | EQUAL)
@@ -394,24 +394,24 @@ Closure commonRpmConfig(boolean jdk, String architecture) {
   return {
     configure(commonPackageConfig('rpm', jdk, architecture))
 
-    license 'ASL-2.0'
+    license = 'ASL-2.0'
 
-    packageGroup 'Application/Internet'
+    packageGroup = 'Application/Internet'
     requires '/bin/bash'
 
     obsoletes packageName, '7.0.0', Flags.LESS
 
     prefix '/usr'
-    packager 'OpenSearch'
+    packager = 'OpenSearch'
     archiveVersion = project.version.replace('-', '_')
     release = '1'
-    os 'LINUX'
-    distribution 'OpenSearch'
-    vendor 'OpenSearch'
+    os = 'LINUX'
+    distribution = 'OpenSearch'
+    vendor = 'OpenSearch'
     // TODO ospackage doesn't support icon but we used to have one
 
     // without this the rpm will have parent dirs of any files we copy in, eg /etc/opensearch
-    addParentDirs false
+    addParentDirs = false
   }
 }
 
diff --git a/doc-tools/build.gradle b/doc-tools/build.gradle
index e6ace21420dda..9639c7d7048d6 100644
--- a/doc-tools/build.gradle
+++ b/doc-tools/build.gradle
@@ -3,8 +3,8 @@ plugins {
 }
 
 base {
-  group 'org.opensearch'
-  version '1.0.0-SNAPSHOT'
+  group = 'org.opensearch'
+  version = '1.0.0-SNAPSHOT'
 }
 
 repositories {
diff --git a/doc-tools/missing-doclet/build.gradle b/doc-tools/missing-doclet/build.gradle
index 114ccc948951a..c3c951fbcaf47 100644
--- a/doc-tools/missing-doclet/build.gradle
+++ b/doc-tools/missing-doclet/build.gradle
@@ -2,8 +2,8 @@ plugins {
   id 'java-library'
 }
 
-group 'org.opensearch'
-version '1.0.0-SNAPSHOT'
+group = 'org.opensearch'
+version = '1.0.0-SNAPSHOT'
 
 tasks.withType(JavaCompile) {
   options.compilerArgs += ["--release", targetCompatibility.toString()]
diff --git a/gradle/ide.gradle b/gradle/ide.gradle
index e266d9add172d..c16205468d63d 100644
--- a/gradle/ide.gradle
+++ b/gradle/ide.gradle
@@ -16,7 +16,7 @@ import org.jetbrains.gradle.ext.JUnit
 buildscript {
   repositories {
     maven {
-      url "https://plugins.gradle.org/m2/"
+      url = "https://plugins.gradle.org/m2/"
     }
   }
   dependencies {
diff --git a/gradle/missing-javadoc.gradle b/gradle/missing-javadoc.gradle
index 5a98a60e806ea..179c905c880b4 100644
--- a/gradle/missing-javadoc.gradle
+++ b/gradle/missing-javadoc.gradle
@@ -64,8 +64,8 @@ allprojects {
 
 
     tasks.register('missingJavadoc', MissingJavadocTask) {
-      description "This task validates and generates Javadoc API documentation for the main source code."
-      group "documentation"
+      description = "This task validates and generates Javadoc API documentation for the main source code."
+      group = "documentation"
 
       taskResources = resources
       dependsOn sourceSets.main.compileClasspath
@@ -227,11 +227,18 @@ class MissingJavadocTask extends DefaultTask {
   @PathSensitive(PathSensitivity.RELATIVE)
   def taskResources
 
+  Project project
+
   // See please https://docs.gradle.org/8.11/userguide/service_injection.html#execoperations
   interface InjectedExecOps {
     @Inject ExecOperations getExecOps()
   }
 
+  @Inject
+  MissingJavadocTask(Project project) {
+    this.project = project
+  }
+
   /** Utility method to recursively collect all tasks with same name like this one that we depend on */
   private Set findRenderTasksInDependencies() {
     Set found = []
@@ -350,7 +357,7 @@ class MissingJavadocTask extends DefaultTask {
         // force locale to be "en_US" (fix for: https://bugs.openjdk.java.net/browse/JDK-8222793)
         args += [ "-J-Duser.language=en", "-J-Duser.country=US" ]
 
-        ignoreExitValue true
+        ignoreExitValue = true
       }
     }
 
diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties
index ec480eaeb61ef..8b3d2296213c2 100644
--- a/gradle/wrapper/gradle-wrapper.properties
+++ b/gradle/wrapper/gradle-wrapper.properties
@@ -11,7 +11,7 @@
 
 distributionBase=GRADLE_USER_HOME
 distributionPath=wrapper/dists
-distributionUrl=https\://services.gradle.org/distributions/gradle-8.11.1-all.zip
+distributionUrl=https\://services.gradle.org/distributions/gradle-8.12-all.zip
 zipStoreBase=GRADLE_USER_HOME
 zipStorePath=wrapper/dists
-distributionSha256Sum=89d4e70e4e84e2d2dfbb63e4daa53e21b25017cc70c37e4eea31ee51fb15098a
+distributionSha256Sum=7ebdac923867a3cec0098302416d1e3c6c0c729fc4e2e05c10637a8af33a76c5
diff --git a/libs/common/build.gradle b/libs/common/build.gradle
index 60bf488833393..2bf2dbb803d9f 100644
--- a/libs/common/build.gradle
+++ b/libs/common/build.gradle
@@ -92,7 +92,7 @@ if (BuildParams.runtimeJavaVersion >= JavaVersion.VERSION_20) {
   }
 
   tasks.register('roundableSimdTest', Test) {
-    group 'verification'
+    group = 'verification'
     include '**/RoundableTests.class'
     systemProperty 'opensearch.experimental.feature.simd.rounding.enabled', 'forced'
   }
diff --git a/modules/aggs-matrix-stats/build.gradle b/modules/aggs-matrix-stats/build.gradle
index 705fa17456a79..fc3e009e0660e 100644
--- a/modules/aggs-matrix-stats/build.gradle
+++ b/modules/aggs-matrix-stats/build.gradle
@@ -30,8 +30,8 @@
 apply plugin: 'opensearch.yaml-rest-test'
 
 opensearchplugin {
-  description 'Adds aggregations whose input are a list of numeric fields and output includes a matrix.'
-  classname 'org.opensearch.search.aggregations.matrix.MatrixAggregationModulePlugin'
+  description = 'Adds aggregations whose input are a list of numeric fields and output includes a matrix.'
+  classname = 'org.opensearch.search.aggregations.matrix.MatrixAggregationModulePlugin'
   hasClientJar = true
 }
 
diff --git a/modules/analysis-common/build.gradle b/modules/analysis-common/build.gradle
index 58ecf79cda0d7..b0e1aaa2de814 100644
--- a/modules/analysis-common/build.gradle
+++ b/modules/analysis-common/build.gradle
@@ -31,8 +31,8 @@ apply plugin: 'opensearch.yaml-rest-test'
 apply plugin: 'opensearch.internal-cluster-test'
 
 opensearchplugin {
-  description 'Adds "built in" analyzers to OpenSearch.'
-  classname 'org.opensearch.analysis.common.CommonAnalysisModulePlugin'
+  description = 'Adds "built in" analyzers to OpenSearch.'
+  classname = 'org.opensearch.analysis.common.CommonAnalysisModulePlugin'
   extendedPlugins = ['lang-painless']
 }
 
diff --git a/modules/build.gradle b/modules/build.gradle
index 126bf0c8870ac..0c69a43af0509 100644
--- a/modules/build.gradle
+++ b/modules/build.gradle
@@ -35,7 +35,7 @@ configure(subprojects.findAll { it.parent.path == project.path }) {
   
   opensearchplugin {
     // for local OpenSearch plugins, the name of the plugin is the same as the directory
-    name project.name
+    name = project.name
   }
 
   if (project.file('src/main/packaging').exists()) {
diff --git a/modules/cache-common/build.gradle b/modules/cache-common/build.gradle
index 98cdec83b9ad1..996c47b26b4d9 100644
--- a/modules/cache-common/build.gradle
+++ b/modules/cache-common/build.gradle
@@ -9,8 +9,8 @@
 apply plugin: 'opensearch.internal-cluster-test'
 
 opensearchplugin {
-  description 'Module for caches which are optional and do not require additional security permission'
-  classname 'org.opensearch.cache.common.tier.TieredSpilloverCachePlugin'
+  description = 'Module for caches which are optional and do not require additional security permission'
+  classname = 'org.opensearch.cache.common.tier.TieredSpilloverCachePlugin'
 }
 
 test {
diff --git a/modules/geo/build.gradle b/modules/geo/build.gradle
index 7ab6f80b65ca2..dc135ce7a4e35 100644
--- a/modules/geo/build.gradle
+++ b/modules/geo/build.gradle
@@ -31,8 +31,8 @@ apply plugin: 'opensearch.yaml-rest-test'
 apply plugin: 'opensearch.internal-cluster-test'
 
 opensearchplugin {
-  description 'Plugin for geospatial features in OpenSearch. Registering the geo_shape and aggregations on GeoShape and GeoPoint'
-  classname 'org.opensearch.geo.GeoModulePlugin'
+  description = 'Plugin for geospatial features in OpenSearch. Registering the geo_shape and aggregations on GeoShape and GeoPoint'
+  classname = 'org.opensearch.geo.GeoModulePlugin'
 }
 
 restResources {
diff --git a/modules/ingest-common/build.gradle b/modules/ingest-common/build.gradle
index 7b567eb9110c5..721aef35f5ff3 100644
--- a/modules/ingest-common/build.gradle
+++ b/modules/ingest-common/build.gradle
@@ -31,8 +31,8 @@ apply plugin: 'opensearch.yaml-rest-test'
 apply plugin: 'opensearch.internal-cluster-test'
 
 opensearchplugin {
-  description 'Module for ingest processors that do not require additional security permissions or have large dependencies and resources'
-  classname 'org.opensearch.ingest.common.IngestCommonModulePlugin'
+  description = 'Module for ingest processors that do not require additional security permissions or have large dependencies and resources'
+  classname = 'org.opensearch.ingest.common.IngestCommonModulePlugin'
   extendedPlugins = ['lang-painless']
 }
 
diff --git a/modules/ingest-geoip/build.gradle b/modules/ingest-geoip/build.gradle
index f74de1dc290dd..3f74690e3ef4f 100644
--- a/modules/ingest-geoip/build.gradle
+++ b/modules/ingest-geoip/build.gradle
@@ -34,8 +34,8 @@ apply plugin: 'opensearch.yaml-rest-test'
 apply plugin: 'opensearch.internal-cluster-test'
 
 opensearchplugin {
-  description 'Ingest processor that uses looksup geo data based on ip adresses using the Maxmind geo database'
-  classname 'org.opensearch.ingest.geoip.IngestGeoIpModulePlugin'
+  description = 'Ingest processor that uses looksup geo data based on ip adresses using the Maxmind geo database'
+  classname = 'org.opensearch.ingest.geoip.IngestGeoIpModulePlugin'
 }
 
 dependencies {
diff --git a/modules/ingest-user-agent/build.gradle b/modules/ingest-user-agent/build.gradle
index 187e72d192a3d..85206861ab5f2 100644
--- a/modules/ingest-user-agent/build.gradle
+++ b/modules/ingest-user-agent/build.gradle
@@ -30,8 +30,8 @@
 apply plugin: 'opensearch.yaml-rest-test'
 
 opensearchplugin {
-  description 'Ingest processor that extracts information from a user agent'
-  classname 'org.opensearch.ingest.useragent.IngestUserAgentModulePlugin'
+  description = 'Ingest processor that extracts information from a user agent'
+  classname = 'org.opensearch.ingest.useragent.IngestUserAgentModulePlugin'
 }
 
 restResources {
diff --git a/modules/lang-expression/build.gradle b/modules/lang-expression/build.gradle
index 94811cb608553..6efa3f3e667b5 100644
--- a/modules/lang-expression/build.gradle
+++ b/modules/lang-expression/build.gradle
@@ -31,8 +31,8 @@ apply plugin: 'opensearch.yaml-rest-test'
 apply plugin: 'opensearch.internal-cluster-test'
 
 opensearchplugin {
-  description 'Lucene expressions integration for OpenSearch'
-  classname 'org.opensearch.script.expression.ExpressionModulePlugin'
+  description = 'Lucene expressions integration for OpenSearch'
+  classname = 'org.opensearch.script.expression.ExpressionModulePlugin'
 }
 
 dependencies {
diff --git a/modules/lang-mustache/build.gradle b/modules/lang-mustache/build.gradle
index a836124f94b41..4aaaa9fea1c59 100644
--- a/modules/lang-mustache/build.gradle
+++ b/modules/lang-mustache/build.gradle
@@ -32,8 +32,8 @@ apply plugin: 'opensearch.java-rest-test'
 apply plugin: 'opensearch.internal-cluster-test'
 
 opensearchplugin {
-  description 'Mustache scripting integration for OpenSearch'
-  classname 'org.opensearch.script.mustache.MustacheModulePlugin'
+  description = 'Mustache scripting integration for OpenSearch'
+  classname = 'org.opensearch.script.mustache.MustacheModulePlugin'
   hasClientJar = true // For the template apis and query
 }
 
diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle
index ffb1fe6117c06..3895c512c61b4 100644
--- a/modules/lang-painless/build.gradle
+++ b/modules/lang-painless/build.gradle
@@ -36,8 +36,8 @@ apply plugin: 'opensearch.yaml-rest-test'
 apply plugin: 'opensearch.internal-cluster-test'
 
 opensearchplugin {
-  description 'An easy, safe and fast scripting language for OpenSearch'
-  classname 'org.opensearch.painless.PainlessModulePlugin'
+  description = 'An easy, safe and fast scripting language for OpenSearch'
+  classname = 'org.opensearch.painless.PainlessModulePlugin'
 }
 
 ext {
diff --git a/modules/mapper-extras/build.gradle b/modules/mapper-extras/build.gradle
index b16176ca5aa72..1867abafc79c8 100644
--- a/modules/mapper-extras/build.gradle
+++ b/modules/mapper-extras/build.gradle
@@ -31,8 +31,8 @@ apply plugin: 'opensearch.yaml-rest-test'
 apply plugin: 'opensearch.java-rest-test'
 
 opensearchplugin {
-  description 'Adds advanced field mappers'
-  classname 'org.opensearch.index.mapper.MapperExtrasModulePlugin'
+  description = 'Adds advanced field mappers'
+  classname = 'org.opensearch.index.mapper.MapperExtrasModulePlugin'
   hasClientJar = true
 }
 
diff --git a/modules/opensearch-dashboards/build.gradle b/modules/opensearch-dashboards/build.gradle
index 07453e1f70f1c..8c590a348a9c4 100644
--- a/modules/opensearch-dashboards/build.gradle
+++ b/modules/opensearch-dashboards/build.gradle
@@ -30,8 +30,8 @@
 apply plugin: 'opensearch.java-rest-test'
 
 opensearchplugin {
-  description 'Plugin exposing APIs for OpenSearch Dashboards system indices'
-  classname 'org.opensearch.dashboards.OpenSearchDashboardsModulePlugin'
+  description = 'Plugin exposing APIs for OpenSearch Dashboards system indices'
+  classname = 'org.opensearch.dashboards.OpenSearchDashboardsModulePlugin'
 }
 
 dependencies {
diff --git a/modules/parent-join/build.gradle b/modules/parent-join/build.gradle
index d509e65106e7b..08b624ea4f3fa 100644
--- a/modules/parent-join/build.gradle
+++ b/modules/parent-join/build.gradle
@@ -31,8 +31,8 @@ apply plugin: 'opensearch.yaml-rest-test'
 apply plugin: 'opensearch.internal-cluster-test'
 
 opensearchplugin {
-  description 'This module adds the support parent-child queries and aggregations'
-  classname 'org.opensearch.join.ParentJoinModulePlugin'
+  description = 'This module adds the support parent-child queries and aggregations'
+  classname = 'org.opensearch.join.ParentJoinModulePlugin'
   hasClientJar = true
 }
 
diff --git a/modules/percolator/build.gradle b/modules/percolator/build.gradle
index 2312f7bda80b2..9669d1057fb41 100644
--- a/modules/percolator/build.gradle
+++ b/modules/percolator/build.gradle
@@ -31,8 +31,8 @@ apply plugin: 'opensearch.yaml-rest-test'
 apply plugin: 'opensearch.internal-cluster-test'
 
 opensearchplugin {
-  description 'Percolator module adds capability to index queries and query these queries by specifying documents'
-  classname 'org.opensearch.percolator.PercolatorModulePlugin'
+  description = 'Percolator module adds capability to index queries and query these queries by specifying documents'
+  classname = 'org.opensearch.percolator.PercolatorModulePlugin'
   hasClientJar = true
 }
 
diff --git a/modules/rank-eval/build.gradle b/modules/rank-eval/build.gradle
index 4232d583dc984..f6946c631221d 100644
--- a/modules/rank-eval/build.gradle
+++ b/modules/rank-eval/build.gradle
@@ -31,8 +31,8 @@ apply plugin: 'opensearch.yaml-rest-test'
 apply plugin: 'opensearch.internal-cluster-test'
 
 opensearchplugin {
-  description 'The Rank Eval module adds APIs to evaluate ranking quality.'
-  classname 'org.opensearch.index.rankeval.RankEvalModulePlugin'
+  description = 'The Rank Eval module adds APIs to evaluate ranking quality.'
+  classname = 'org.opensearch.index.rankeval.RankEvalModulePlugin'
   hasClientJar = true
 }
 
diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle
index cad7d67f3ef84..a44e1004d93ad 100644
--- a/modules/reindex/build.gradle
+++ b/modules/reindex/build.gradle
@@ -40,8 +40,8 @@ apply plugin: 'opensearch.java-rest-test'
 apply plugin: 'opensearch.internal-cluster-test'
 
 opensearchplugin {
-  description 'The Reindex module adds APIs to reindex from one index to another or update documents in place.'
-  classname 'org.opensearch.index.reindex.ReindexModulePlugin'
+  description = 'The Reindex module adds APIs to reindex from one index to another or update documents in place.'
+  classname = 'org.opensearch.index.reindex.ReindexModulePlugin'
   hasClientJar = true
 }
 
diff --git a/modules/repository-url/build.gradle b/modules/repository-url/build.gradle
index 7a697623eb8d9..49c3a12f23fe0 100644
--- a/modules/repository-url/build.gradle
+++ b/modules/repository-url/build.gradle
@@ -37,8 +37,8 @@ apply plugin: 'opensearch.internal-cluster-test'
 
 
 opensearchplugin {
-  description 'Module for URL repository'
-  classname 'org.opensearch.plugin.repository.url.URLRepositoryModulePlugin'
+  description = 'Module for URL repository'
+  classname = 'org.opensearch.plugin.repository.url.URLRepositoryModulePlugin'
 }
 
 restResources {
@@ -56,7 +56,7 @@ task urlFixture(type: AntFixture) {
   doFirst {
     repositoryDir.mkdirs()
   }
-  env 'CLASSPATH', "${-> project.sourceSets.test.runtimeClasspath.asPath}"
+  env 'CLASSPATH', "${-> sourceSets.test.runtimeClasspath.asPath}"
   executable = "${BuildParams.runtimeJavaHome}/bin/java"
   args 'org.opensearch.repositories.url.URLFixture', baseDir, "${repositoryDir.absolutePath}"
 }
diff --git a/modules/search-pipeline-common/build.gradle b/modules/search-pipeline-common/build.gradle
index 657392d884e97..4b6d579dc22e8 100644
--- a/modules/search-pipeline-common/build.gradle
+++ b/modules/search-pipeline-common/build.gradle
@@ -13,8 +13,8 @@ apply plugin: 'opensearch.yaml-rest-test'
 apply plugin: 'opensearch.internal-cluster-test'
 
 opensearchplugin {
-  description 'Module for search pipeline processors that do not require additional security permissions or have large dependencies and resources'
-  classname 'org.opensearch.search.pipeline.common.SearchPipelineCommonModulePlugin'
+  description = 'Module for search pipeline processors that do not require additional security permissions or have large dependencies and resources'
+  classname = 'org.opensearch.search.pipeline.common.SearchPipelineCommonModulePlugin'
   extendedPlugins = ['lang-painless']
 }
 
diff --git a/modules/systemd/build.gradle b/modules/systemd/build.gradle
index 726092ffe4273..25a32616777b7 100644
--- a/modules/systemd/build.gradle
+++ b/modules/systemd/build.gradle
@@ -29,6 +29,6 @@
  */
 
 opensearchplugin {
-  description 'Integrates OpenSearch with systemd'
-  classname 'org.opensearch.systemd.SystemdModulePlugin'
+  description = 'Integrates OpenSearch with systemd'
+  classname = 'org.opensearch.systemd.SystemdModulePlugin'
 }
diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle
index cdaf8350055f0..4e68a4ce17f73 100644
--- a/modules/transport-netty4/build.gradle
+++ b/modules/transport-netty4/build.gradle
@@ -49,8 +49,8 @@ apply plugin: 'opensearch.publish'
    * maybe figure out a way to run all tests from core with netty4/network?
  */
 opensearchplugin {
-  description 'Netty 4 based transport implementation'
-  classname 'org.opensearch.transport.Netty4ModulePlugin'
+  description = 'Netty 4 based transport implementation'
+  classname = 'org.opensearch.transport.Netty4ModulePlugin'
   hasClientJar = true
 }
 
diff --git a/plugins/analysis-icu/build.gradle b/plugins/analysis-icu/build.gradle
index e5c084559f0a6..25e1587136d78 100644
--- a/plugins/analysis-icu/build.gradle
+++ b/plugins/analysis-icu/build.gradle
@@ -32,8 +32,8 @@ apply plugin: 'opensearch.yaml-rest-test'
 apply plugin: 'opensearch.internal-cluster-test'
 
 opensearchplugin {
-  description 'The ICU Analysis plugin integrates the Lucene ICU module into OpenSearch, adding ICU-related analysis components.'
-  classname 'org.opensearch.plugin.analysis.icu.AnalysisICUPlugin'
+  description = 'The ICU Analysis plugin integrates the Lucene ICU module into OpenSearch, adding ICU-related analysis components.'
+  classname = 'org.opensearch.plugin.analysis.icu.AnalysisICUPlugin'
   hasClientJar = true
 }
 
diff --git a/plugins/analysis-kuromoji/build.gradle b/plugins/analysis-kuromoji/build.gradle
index 426b85f44bf55..5babcb2757f5e 100644
--- a/plugins/analysis-kuromoji/build.gradle
+++ b/plugins/analysis-kuromoji/build.gradle
@@ -30,8 +30,8 @@
 apply plugin: 'opensearch.yaml-rest-test'
 
 opensearchplugin {
-  description 'The Japanese (kuromoji) Analysis plugin integrates Lucene kuromoji analysis module into opensearch.'
-  classname 'org.opensearch.plugin.analysis.kuromoji.AnalysisKuromojiPlugin'
+  description = 'The Japanese (kuromoji) Analysis plugin integrates Lucene kuromoji analysis module into opensearch.'
+  classname = 'org.opensearch.plugin.analysis.kuromoji.AnalysisKuromojiPlugin'
 }
 
 dependencies {
diff --git a/plugins/analysis-nori/build.gradle b/plugins/analysis-nori/build.gradle
index 3def7f9c6c60f..41a73fb3895ef 100644
--- a/plugins/analysis-nori/build.gradle
+++ b/plugins/analysis-nori/build.gradle
@@ -30,8 +30,8 @@
 apply plugin: 'opensearch.yaml-rest-test'
 
 opensearchplugin {
-  description 'The Korean (nori) Analysis plugin integrates Lucene nori analysis module into opensearch.'
-  classname 'org.opensearch.plugin.analysis.nori.AnalysisNoriPlugin'
+  description = 'The Korean (nori) Analysis plugin integrates Lucene nori analysis module into opensearch.'
+  classname = 'org.opensearch.plugin.analysis.nori.AnalysisNoriPlugin'
 }
 
 dependencies {
diff --git a/plugins/analysis-phonenumber/build.gradle b/plugins/analysis-phonenumber/build.gradle
index c9913b36f8508..1e19167582e19 100644
--- a/plugins/analysis-phonenumber/build.gradle
+++ b/plugins/analysis-phonenumber/build.gradle
@@ -12,8 +12,8 @@
 apply plugin: 'opensearch.yaml-rest-test'
 
 opensearchplugin {
-  description 'Adds an analyzer for phone numbers to OpenSearch.'
-  classname 'org.opensearch.analysis.phone.PhoneNumberAnalysisPlugin'
+  description = 'Adds an analyzer for phone numbers to OpenSearch.'
+  classname = 'org.opensearch.analysis.phone.PhoneNumberAnalysisPlugin'
 }
 
 dependencies {
diff --git a/plugins/analysis-phonetic/build.gradle b/plugins/analysis-phonetic/build.gradle
index ffa0466d43170..c0272b78c3db8 100644
--- a/plugins/analysis-phonetic/build.gradle
+++ b/plugins/analysis-phonetic/build.gradle
@@ -30,8 +30,8 @@
 apply plugin: 'opensearch.yaml-rest-test'
 
 opensearchplugin {
-  description 'The Phonetic Analysis plugin integrates phonetic token filter analysis with opensearch.'
-  classname 'org.opensearch.plugin.analysis.AnalysisPhoneticPlugin'
+  description = 'The Phonetic Analysis plugin integrates phonetic token filter analysis with opensearch.'
+  classname = 'org.opensearch.plugin.analysis.AnalysisPhoneticPlugin'
 }
 
 dependencies {
diff --git a/plugins/analysis-smartcn/build.gradle b/plugins/analysis-smartcn/build.gradle
index d74d314ab0673..448a3a5e0a637 100644
--- a/plugins/analysis-smartcn/build.gradle
+++ b/plugins/analysis-smartcn/build.gradle
@@ -30,8 +30,8 @@
 apply plugin: 'opensearch.yaml-rest-test'
 
 opensearchplugin {
-  description 'Smart Chinese Analysis plugin integrates Lucene Smart Chinese analysis module into opensearch.'
-  classname 'org.opensearch.plugin.analysis.smartcn.AnalysisSmartChinesePlugin'
+  description = 'Smart Chinese Analysis plugin integrates Lucene Smart Chinese analysis module into opensearch.'
+  classname = 'org.opensearch.plugin.analysis.smartcn.AnalysisSmartChinesePlugin'
 }
 
 dependencies {
diff --git a/plugins/analysis-stempel/build.gradle b/plugins/analysis-stempel/build.gradle
index d713f80172c58..90523ae2d9d95 100644
--- a/plugins/analysis-stempel/build.gradle
+++ b/plugins/analysis-stempel/build.gradle
@@ -30,8 +30,8 @@
 apply plugin: 'opensearch.yaml-rest-test'
 
 opensearchplugin {
-  description 'The Stempel (Polish) Analysis plugin integrates Lucene stempel (polish) analysis module into opensearch.'
-  classname 'org.opensearch.plugin.analysis.stempel.AnalysisStempelPlugin'
+  description = 'The Stempel (Polish) Analysis plugin integrates Lucene stempel (polish) analysis module into opensearch.'
+  classname = 'org.opensearch.plugin.analysis.stempel.AnalysisStempelPlugin'
 }
 
 dependencies {
diff --git a/plugins/analysis-ukrainian/build.gradle b/plugins/analysis-ukrainian/build.gradle
index 6122c055c788e..7e760423438c1 100644
--- a/plugins/analysis-ukrainian/build.gradle
+++ b/plugins/analysis-ukrainian/build.gradle
@@ -30,8 +30,8 @@
 apply plugin: 'opensearch.yaml-rest-test'
 
 opensearchplugin {
-  description 'The Ukrainian Analysis plugin integrates the Lucene UkrainianMorfologikAnalyzer into opensearch.'
-  classname 'org.opensearch.plugin.analysis.ukrainian.AnalysisUkrainianPlugin'
+  description = 'The Ukrainian Analysis plugin integrates the Lucene UkrainianMorfologikAnalyzer into opensearch.'
+  classname = 'org.opensearch.plugin.analysis.ukrainian.AnalysisUkrainianPlugin'
 }
 
 dependencies {
diff --git a/plugins/build.gradle b/plugins/build.gradle
index 4e6de2c120d35..6c7fb749d08ac 100644
--- a/plugins/build.gradle
+++ b/plugins/build.gradle
@@ -39,9 +39,9 @@ configure(subprojects.findAll { it.parent.path == project.path }) {
 
   opensearchplugin {
     // for local ES plugins, the name of the plugin is the same as the directory
-    name project.name
+    name = project.name
 
-    licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt')
-    noticeFile rootProject.file('NOTICE.txt')
+    licenseFile = rootProject.file('licenses/APACHE-LICENSE-2.0.txt')
+    noticeFile = rootProject.file('NOTICE.txt')
   }
 }
diff --git a/plugins/cache-ehcache/build.gradle b/plugins/cache-ehcache/build.gradle
index 5747624e2fb69..6390b045db8ea 100644
--- a/plugins/cache-ehcache/build.gradle
+++ b/plugins/cache-ehcache/build.gradle
@@ -14,8 +14,8 @@ import org.opensearch.gradle.info.BuildParams
 apply plugin: 'opensearch.internal-cluster-test'
 
 opensearchplugin {
-  description 'Ehcache based cache implementation.'
-  classname 'org.opensearch.cache.EhcacheCachePlugin'
+  description = 'Ehcache based cache implementation.'
+  classname = 'org.opensearch.cache.EhcacheCachePlugin'
 }
 
 versions << [
diff --git a/plugins/crypto-kms/build.gradle b/plugins/crypto-kms/build.gradle
index c4a8609b6df48..fa63a4a7153d3 100644
--- a/plugins/crypto-kms/build.gradle
+++ b/plugins/crypto-kms/build.gradle
@@ -16,8 +16,8 @@ apply plugin: 'opensearch.publish'
 apply plugin: 'opensearch.yaml-rest-test'
 
 opensearchplugin {
-  description 'AWS KMS plugin to provide crypto keys'
-  classname 'org.opensearch.crypto.kms.CryptoKmsPlugin'
+  description = 'AWS KMS plugin to provide crypto keys'
+  classname = 'org.opensearch.crypto.kms.CryptoKmsPlugin'
 }
 
 ext {
diff --git a/plugins/discovery-azure-classic/build.gradle b/plugins/discovery-azure-classic/build.gradle
index 7f34cec94499c..2627b3061bdf2 100644
--- a/plugins/discovery-azure-classic/build.gradle
+++ b/plugins/discovery-azure-classic/build.gradle
@@ -35,8 +35,8 @@ apply plugin: 'opensearch.yaml-rest-test'
 apply plugin: 'opensearch.internal-cluster-test'
 
 opensearchplugin {
-  description 'The Azure Classic Discovery plugin allows to use Azure Classic API for the unicast discovery mechanism'
-  classname 'org.opensearch.plugin.discovery.azure.classic.AzureDiscoveryPlugin'
+  description = 'The Azure Classic Discovery plugin allows to use Azure Classic API for the unicast discovery mechanism'
+  classname = 'org.opensearch.plugin.discovery.azure.classic.AzureDiscoveryPlugin'
 }
 
 versions << [
diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle
index 9c9f64f09b915..8d615e0bf8d9d 100644
--- a/plugins/discovery-ec2/build.gradle
+++ b/plugins/discovery-ec2/build.gradle
@@ -34,8 +34,8 @@ apply plugin: 'opensearch.yaml-rest-test'
 apply plugin: 'opensearch.internal-cluster-test'
 
 opensearchplugin {
-  description 'The EC2 discovery plugin allows to use AWS API for the unicast discovery mechanism.'
-  classname 'org.opensearch.discovery.ec2.Ec2DiscoveryPlugin'
+  description = 'The EC2 discovery plugin allows to use AWS API for the unicast discovery mechanism.'
+  classname = 'org.opensearch.discovery.ec2.Ec2DiscoveryPlugin'
 }
 
 dependencies {
diff --git a/plugins/discovery-ec2/qa/amazon-ec2/build.gradle b/plugins/discovery-ec2/qa/amazon-ec2/build.gradle
index a844576d67ece..41c423c57ba36 100644
--- a/plugins/discovery-ec2/qa/amazon-ec2/build.gradle
+++ b/plugins/discovery-ec2/qa/amazon-ec2/build.gradle
@@ -76,8 +76,8 @@ yamlRestTest.enabled = false
  */
 ['KeyStore', 'EnvVariables', 'SystemProperties', 'ContainerCredentials', 'InstanceProfile'].forEach { action ->
   AntFixture fixture = tasks.create(name: "ec2Fixture${action}", type: AntFixture) {
-    dependsOn project.sourceSets.yamlRestTest.runtimeClasspath
-    env 'CLASSPATH', "${-> project.sourceSets.yamlRestTest.runtimeClasspath.asPath}"
+    dependsOn sourceSets.yamlRestTest.runtimeClasspath
+    env 'CLASSPATH', "${-> sourceSets.yamlRestTest.runtimeClasspath.asPath}"
     executable = "${BuildParams.runtimeJavaHome}/bin/java"
     args 'org.opensearch.discovery.ec2.AmazonEC2Fixture', baseDir, "${buildDir}/testclusters/yamlRestTest${action}-1/config/unicast_hosts.txt"
   }
@@ -85,7 +85,7 @@ yamlRestTest.enabled = false
   tasks.create(name: "yamlRestTest${action}", type: RestIntegTestTask) {
     dependsOn fixture
   }
-  SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class);
+  SourceSetContainer sourceSets = getExtensions().getByType(SourceSetContainer.class);
   SourceSet yamlRestTestSourceSet = sourceSets.getByName(YamlRestTestPlugin.SOURCE_SET_NAME)
   "yamlRestTest${action}" {
     setTestClassesDirs(yamlRestTestSourceSet.getOutput().getClassesDirs())
diff --git a/plugins/discovery-gce/build.gradle b/plugins/discovery-gce/build.gradle
index 3214db2074198..a9338bfc43a2c 100644
--- a/plugins/discovery-gce/build.gradle
+++ b/plugins/discovery-gce/build.gradle
@@ -13,8 +13,8 @@ apply plugin: 'opensearch.yaml-rest-test'
 apply plugin: 'opensearch.internal-cluster-test'
 
 opensearchplugin {
-  description 'The Google Compute Engine (GCE) Discovery plugin allows to use GCE API for the unicast discovery mechanism.'
-  classname 'org.opensearch.plugin.discovery.gce.GceDiscoveryPlugin'
+  description = 'The Google Compute Engine (GCE) Discovery plugin allows to use GCE API for the unicast discovery mechanism.'
+  classname = 'org.opensearch.plugin.discovery.gce.GceDiscoveryPlugin'
 }
 
 dependencies {
@@ -52,9 +52,10 @@ check {
   dependsOn 'qa:gce:check'
 }
 
+def name = project.name
 test {
   // this is needed for insecure plugins, remove if possible!
-  systemProperty 'tests.artifact', project.name
+  systemProperty 'tests.artifact', name
 }
 
 thirdPartyAudit {
diff --git a/plugins/discovery-gce/qa/gce/build.gradle b/plugins/discovery-gce/qa/gce/build.gradle
index 841cd396a8bcf..562ec4e1db482 100644
--- a/plugins/discovery-gce/qa/gce/build.gradle
+++ b/plugins/discovery-gce/qa/gce/build.gradle
@@ -51,8 +51,8 @@ restResources {
 
 /** A task to start the GCEFixture which emulates a GCE service **/
 task gceFixture(type: AntFixture) {
-  dependsOn project.sourceSets.yamlRestTest.runtimeClasspath
-  env 'CLASSPATH', "${-> project.sourceSets.yamlRestTest.runtimeClasspath.asPath}"
+  dependsOn sourceSets.yamlRestTest.runtimeClasspath
+  env 'CLASSPATH', "${-> sourceSets.yamlRestTest.runtimeClasspath.asPath}"
   executable = "${BuildParams.runtimeJavaHome}/bin/java"
   args 'org.opensearch.cloud.gce.GCEFixture', baseDir, "${buildDir}/testclusters/yamlRestTest-1/config/unicast_hosts.txt"
 }
diff --git a/plugins/examples/custom-settings/build.gradle b/plugins/examples/custom-settings/build.gradle
index 5b35d887b3db1..c83e710283322 100644
--- a/plugins/examples/custom-settings/build.gradle
+++ b/plugins/examples/custom-settings/build.gradle
@@ -31,11 +31,11 @@ apply plugin: 'opensearch.opensearchplugin'
 apply plugin: 'opensearch.yaml-rest-test'
 
 opensearchplugin {
-  name 'custom-settings'
-  description 'An example plugin showing how to register custom settings'
-  classname 'org.opensearch.example.customsettings.ExampleCustomSettingsPlugin'
-  licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt')
-  noticeFile rootProject.file('NOTICE.txt')
+  name = 'custom-settings'
+  description = 'An example plugin showing how to register custom settings'
+  classname = 'org.opensearch.example.customsettings.ExampleCustomSettingsPlugin'
+  licenseFile = rootProject.file('licenses/APACHE-LICENSE-2.0.txt')
+  noticeFile = rootProject.file('NOTICE.txt')
 }
 
 testClusters.all {
diff --git a/plugins/examples/custom-significance-heuristic/build.gradle b/plugins/examples/custom-significance-heuristic/build.gradle
index ab013657fed23..72efbaafad8e3 100644
--- a/plugins/examples/custom-significance-heuristic/build.gradle
+++ b/plugins/examples/custom-significance-heuristic/build.gradle
@@ -31,9 +31,9 @@ apply plugin: 'opensearch.opensearchplugin'
 apply plugin: 'opensearch.yaml-rest-test'
 
 opensearchplugin {
-  name 'custom-significance-heuristic'
-  description 'An example plugin showing how to write and register a custom significance heuristic'
-  classname 'org.opensearch.example.customsigheuristic.CustomSignificanceHeuristicPlugin'
-  licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt')
-  noticeFile rootProject.file('NOTICE.txt')
+  name = 'custom-significance-heuristic'
+  description = 'An example plugin showing how to write and register a custom significance heuristic'
+  classname = 'org.opensearch.example.customsigheuristic.CustomSignificanceHeuristicPlugin'
+  licenseFile = rootProject.file('licenses/APACHE-LICENSE-2.0.txt')
+  noticeFile = rootProject.file('NOTICE.txt')
 }
diff --git a/plugins/examples/custom-suggester/build.gradle b/plugins/examples/custom-suggester/build.gradle
index d60523306b3c1..977cad7d1452e 100644
--- a/plugins/examples/custom-suggester/build.gradle
+++ b/plugins/examples/custom-suggester/build.gradle
@@ -31,11 +31,11 @@ apply plugin: 'opensearch.opensearchplugin'
 apply plugin: 'opensearch.yaml-rest-test'
 
 opensearchplugin {
-  name 'custom-suggester'
-  description 'An example plugin showing how to write and register a custom suggester'
-  classname 'org.opensearch.example.customsuggester.CustomSuggesterPlugin'
-  licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt')
-  noticeFile rootProject.file('NOTICE.txt')
+  name = 'custom-suggester'
+  description = 'An example plugin showing how to write and register a custom suggester'
+  classname = 'org.opensearch.example.customsuggester.CustomSuggesterPlugin'
+  licenseFile = rootProject.file('licenses/APACHE-LICENSE-2.0.txt')
+  noticeFile = rootProject.file('NOTICE.txt')
 }
 
 testClusters.all {
diff --git a/plugins/examples/painless-allowlist/build.gradle b/plugins/examples/painless-allowlist/build.gradle
index 99722126dd171..d8b4c15536a75 100644
--- a/plugins/examples/painless-allowlist/build.gradle
+++ b/plugins/examples/painless-allowlist/build.gradle
@@ -31,12 +31,12 @@ apply plugin: 'opensearch.opensearchplugin'
 apply plugin: 'opensearch.yaml-rest-test'
 
 opensearchplugin {
-  name 'painless-allowlist'
-  description 'An example allowlisting additional classes and methods in painless'
-  classname 'org.opensearch.example.painlessallowlist.MyAllowlistPlugin'
+  name = 'painless-allowlist'
+  description = 'An example allowlisting additional classes and methods in painless'
+  classname = 'org.opensearch.example.painlessallowlist.MyAllowlistPlugin'
   extendedPlugins = ['lang-painless']
-  licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt')
-  noticeFile rootProject.file('NOTICE.txt')
+  licenseFile = rootProject.file('licenses/APACHE-LICENSE-2.0.txt')
+  noticeFile = rootProject.file('NOTICE.txt')
 }
 
 dependencies {
diff --git a/plugins/examples/rescore/build.gradle b/plugins/examples/rescore/build.gradle
index b33d79395d92b..ad450798514ea 100644
--- a/plugins/examples/rescore/build.gradle
+++ b/plugins/examples/rescore/build.gradle
@@ -31,9 +31,9 @@ apply plugin: 'opensearch.opensearchplugin'
 apply plugin: 'opensearch.yaml-rest-test'
 
 opensearchplugin {
-  name 'example-rescore'
-  description 'An example plugin implementing rescore and verifying that plugins *can* implement rescore'
-  classname 'org.opensearch.example.rescore.ExampleRescorePlugin'
-  licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt')
-  noticeFile rootProject.file('NOTICE.txt')
+  name = 'example-rescore'
+  description = 'An example plugin implementing rescore and verifying that plugins *can* implement rescore'
+  classname = 'org.opensearch.example.rescore.ExampleRescorePlugin'
+  licenseFile = rootProject.file('licenses/APACHE-LICENSE-2.0.txt')
+  noticeFile = rootProject.file('NOTICE.txt')
 }
diff --git a/plugins/examples/rest-handler/build.gradle b/plugins/examples/rest-handler/build.gradle
index b97d091af9d08..c3c25b4b0a841 100644
--- a/plugins/examples/rest-handler/build.gradle
+++ b/plugins/examples/rest-handler/build.gradle
@@ -35,11 +35,11 @@ apply plugin: 'opensearch.yaml-rest-test'
 apply plugin: 'opensearch.java-rest-test'
 
 opensearchplugin {
-  name 'rest-handler'
-  description 'An example plugin showing how to register a REST handler'
-  classname 'org.opensearch.example.resthandler.ExampleRestHandlerPlugin'
-  licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt')
-  noticeFile rootProject.file('NOTICE.txt')
+  name = 'rest-handler'
+  description = 'An example plugin showing how to register a REST handler'
+  classname = 'org.opensearch.example.resthandler.ExampleRestHandlerPlugin'
+  licenseFile = rootProject.file('licenses/APACHE-LICENSE-2.0.txt')
+  noticeFile = rootProject.file('NOTICE.txt')
 }
 
 // No unit tests in this example
@@ -47,7 +47,7 @@ test.enabled = false
 
 tasks.register("exampleFixture", org.opensearch.gradle.test.AntFixture) {
   dependsOn sourceSets.javaRestTest.runtimeClasspath
-  env 'CLASSPATH', "${-> project.sourceSets.javaRestTest.runtimeClasspath.asPath}"
+  env 'CLASSPATH', "${-> sourceSets.javaRestTest.runtimeClasspath.asPath}"
   executable = "${BuildParams.runtimeJavaHome}/bin/java"
   args 'org.opensearch.example.resthandler.ExampleFixture', baseDir, 'TEST'
 }
diff --git a/plugins/examples/script-expert-scoring/build.gradle b/plugins/examples/script-expert-scoring/build.gradle
index e4ddd97abbe4c..1a880e80d2e49 100644
--- a/plugins/examples/script-expert-scoring/build.gradle
+++ b/plugins/examples/script-expert-scoring/build.gradle
@@ -31,11 +31,11 @@ apply plugin: 'opensearch.opensearchplugin'
 apply plugin: 'opensearch.yaml-rest-test'
 
 opensearchplugin {
-  name 'script-expert-scoring'
-  description 'An example script engine to use low level Lucene internals for expert scoring'
-  classname 'org.opensearch.example.expertscript.ExpertScriptPlugin'
-  licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt')
-  noticeFile rootProject.file('NOTICE.txt')
+  name = 'script-expert-scoring'
+  description = 'An example script engine to use low level Lucene internals for expert scoring'
+  classname = 'org.opensearch.example.expertscript.ExpertScriptPlugin'
+  licenseFile = rootProject.file('licenses/APACHE-LICENSE-2.0.txt')
+  noticeFile = rootProject.file('NOTICE.txt')
 }
 
 test.enabled = false
diff --git a/plugins/identity-shiro/build.gradle b/plugins/identity-shiro/build.gradle
index 222443efcb214..2ea3e8e6b1e50 100644
--- a/plugins/identity-shiro/build.gradle
+++ b/plugins/identity-shiro/build.gradle
@@ -9,11 +9,11 @@
 apply plugin: 'opensearch.internal-cluster-test'
 
 opensearchplugin {
-  description 'Plugin for identity features in OpenSearch.'
-  classname 'org.opensearch.identity.shiro.ShiroIdentityPlugin'
-  name project.name
-  licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt')
-  noticeFile rootProject.file('NOTICE.txt')
+  description = 'Plugin for identity features in OpenSearch.'
+  classname = 'org.opensearch.identity.shiro.ShiroIdentityPlugin'
+  name = project.name
+  licenseFile = rootProject.file('licenses/APACHE-LICENSE-2.0.txt')
+  noticeFile = rootProject.file('NOTICE.txt')
 }
 
 dependencies {
diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle
index 2948ca12904f5..e0ad602266602 100644
--- a/plugins/ingest-attachment/build.gradle
+++ b/plugins/ingest-attachment/build.gradle
@@ -33,8 +33,8 @@ import org.opensearch.gradle.info.BuildParams
 apply plugin: 'opensearch.yaml-rest-test'
 
 opensearchplugin {
-  description 'Ingest processor that uses Apache Tika to extract contents'
-  classname 'org.opensearch.ingest.attachment.IngestAttachmentPlugin'
+  description = 'Ingest processor that uses Apache Tika to extract contents'
+  classname = 'org.opensearch.ingest.attachment.IngestAttachmentPlugin'
 }
 
 versions << [
diff --git a/plugins/mapper-annotated-text/build.gradle b/plugins/mapper-annotated-text/build.gradle
index 5ff3bbe37810b..c7bc5b795ed71 100644
--- a/plugins/mapper-annotated-text/build.gradle
+++ b/plugins/mapper-annotated-text/build.gradle
@@ -31,8 +31,8 @@ apply plugin: 'opensearch.yaml-rest-test'
 apply plugin: 'opensearch.internal-cluster-test'
 
 opensearchplugin {
-  description 'The Mapper Annotated_text plugin adds support for text fields with markup used to inject annotation tokens into the index.'
-  classname 'org.opensearch.plugin.mapper.AnnotatedTextPlugin'
+  description = 'The Mapper Annotated_text plugin adds support for text fields with markup used to inject annotation tokens into the index.'
+  classname = 'org.opensearch.plugin.mapper.AnnotatedTextPlugin'
 }
 
 restResources {
diff --git a/plugins/mapper-murmur3/build.gradle b/plugins/mapper-murmur3/build.gradle
index 67006f29b7565..42e27d7b3908a 100644
--- a/plugins/mapper-murmur3/build.gradle
+++ b/plugins/mapper-murmur3/build.gradle
@@ -30,8 +30,8 @@
 apply plugin: 'opensearch.yaml-rest-test'
 
 opensearchplugin {
-  description 'The Mapper Murmur3 plugin allows to compute hashes of a field\'s values at index-time and to store them in the index.'
-  classname 'org.opensearch.plugin.mapper.MapperMurmur3Plugin'
+  description = 'The Mapper Murmur3 plugin allows to compute hashes of a field\'s values at index-time and to store them in the index.'
+  classname = 'org.opensearch.plugin.mapper.MapperMurmur3Plugin'
 }
 
 restResources {
diff --git a/plugins/mapper-size/build.gradle b/plugins/mapper-size/build.gradle
index fb4f7c4e00c4f..8c6caaf09e01a 100644
--- a/plugins/mapper-size/build.gradle
+++ b/plugins/mapper-size/build.gradle
@@ -31,8 +31,8 @@ apply plugin: 'opensearch.yaml-rest-test'
 apply plugin: 'opensearch.internal-cluster-test'
 
 opensearchplugin {
-  description 'The Mapper Size plugin allows document to record their uncompressed size at index time.'
-  classname 'org.opensearch.plugin.mapper.MapperSizePlugin'
+  description = 'The Mapper Size plugin allows document to record their uncompressed size at index time.'
+  classname = 'org.opensearch.plugin.mapper.MapperSizePlugin'
 }
 
 restResources {
diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle
index ad12ec9003e64..c6b303f22112e 100644
--- a/plugins/repository-azure/build.gradle
+++ b/plugins/repository-azure/build.gradle
@@ -39,8 +39,8 @@ apply plugin: 'opensearch.yaml-rest-test'
 apply plugin: 'opensearch.internal-cluster-test'
 
 opensearchplugin {
-  description 'The Azure Repository plugin adds support for Azure storage repositories.'
-  classname 'org.opensearch.repositories.azure.AzureRepositoryPlugin'
+  description = 'The Azure Repository plugin adds support for Azure storage repositories.'
+  classname = 'org.opensearch.repositories.azure.AzureRepositoryPlugin'
 }
 
 dependencies {
diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle
index 97ae88aac5485..d4c870e1ca2b2 100644
--- a/plugins/repository-gcs/build.gradle
+++ b/plugins/repository-gcs/build.gradle
@@ -43,8 +43,8 @@ apply plugin: 'opensearch.yaml-rest-test'
 apply plugin: 'opensearch.internal-cluster-test'
 
 opensearchplugin {
-  description 'The GCS repository plugin adds Google Cloud Storage support for repositories.'
-  classname 'org.opensearch.repositories.gcs.GoogleCloudStoragePlugin'
+  description = 'The GCS repository plugin adds Google Cloud Storage support for repositories.'
+  classname = 'org.opensearch.repositories.gcs.GoogleCloudStoragePlugin'
 }
 
 dependencies {
diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle
index faa9b2bfff84d..441c6ae998406 100644
--- a/plugins/repository-hdfs/build.gradle
+++ b/plugins/repository-hdfs/build.gradle
@@ -43,8 +43,8 @@ apply plugin: 'opensearch.rest-resources'
 apply plugin: 'opensearch.rest-test'
 
 opensearchplugin {
-  description 'The HDFS repository plugin adds support for Hadoop Distributed File-System (HDFS) repositories.'
-  classname 'org.opensearch.repositories.hdfs.HdfsPlugin'
+  description = 'The HDFS repository plugin adds support for Hadoop Distributed File-System (HDFS) repositories.'
+  classname = 'org.opensearch.repositories.hdfs.HdfsPlugin'
 }
 
 versions << [
@@ -133,11 +133,11 @@ project(':test:fixtures:krb5kdc-fixture').tasks.preProcessFixture {
 
 // Create HDFS File System Testing Fixtures for HA/Secure combinations
 for (String fixtureName : ['hdfsFixture', 'haHdfsFixture', 'secureHdfsFixture', 'secureHaHdfsFixture']) {
-  def tsk = project.tasks.register(fixtureName, org.opensearch.gradle.test.AntFixture) {
-    dependsOn project.configurations.hdfsFixture, project(':test:fixtures:krb5kdc-fixture').tasks.postProcessFixture
+  def tsk = tasks.register(fixtureName, org.opensearch.gradle.test.AntFixture) {
+    dependsOn configurations.hdfsFixture, project(':test:fixtures:krb5kdc-fixture').tasks.postProcessFixture
     executable = "${BuildParams.runtimeJavaHome}/bin/java"
-    env 'CLASSPATH', "${-> project.configurations.hdfsFixture.asPath}"
-    maxWaitInSeconds 60
+    env 'CLASSPATH', "${-> configurations.hdfsFixture.asPath}"
+    maxWaitInSeconds = 60
     onlyIf { BuildParams.inFipsJvm == false }
     waitCondition = { fixture, ant ->
       // the hdfs.MiniHDFS fixture writes the ports file when
@@ -187,7 +187,7 @@ Set disabledIntegTestTaskNames = []
 for (String integTestTaskName : ['integTestHa', 'integTestSecure', 'integTestSecureHa']) {
   task "${integTestTaskName}"(type: RestIntegTestTask) {
     description = "Runs rest tests against an opensearch cluster with HDFS."
-    dependsOn(project.bundlePlugin)
+    dependsOn(bundlePlugin)
 
     if (disabledIntegTestTaskNames.contains(integTestTaskName)) {
       enabled = false;
diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle
index 398611a016ed2..6e84edddcc252 100644
--- a/plugins/repository-s3/build.gradle
+++ b/plugins/repository-s3/build.gradle
@@ -41,8 +41,8 @@ apply plugin: 'opensearch.yaml-rest-test'
 apply plugin: 'opensearch.internal-cluster-test'
 
 opensearchplugin {
-  description 'The S3 repository plugin adds S3 repositories'
-  classname 'org.opensearch.repositories.s3.S3RepositoryPlugin'
+  description = 'The S3 repository plugin adds S3 repositories'
+  classname = 'org.opensearch.repositories.s3.S3RepositoryPlugin'
 }
 
 dependencies {
diff --git a/plugins/store-smb/build.gradle b/plugins/store-smb/build.gradle
index add4abb22329f..d702978730f45 100644
--- a/plugins/store-smb/build.gradle
+++ b/plugins/store-smb/build.gradle
@@ -31,8 +31,8 @@ apply plugin: 'opensearch.yaml-rest-test'
 apply plugin: 'opensearch.internal-cluster-test'
 
 opensearchplugin {
-  description 'The Store SMB plugin adds support for SMB stores.'
-  classname 'org.opensearch.plugin.store.smb.SMBStorePlugin'
+  description = 'The Store SMB plugin adds support for SMB stores.'
+  classname = 'org.opensearch.plugin.store.smb.SMBStorePlugin'
 }
 restResources {
   restApi {
diff --git a/plugins/telemetry-otel/build.gradle b/plugins/telemetry-otel/build.gradle
index 872d928aa093f..3aba7d64cd96d 100644
--- a/plugins/telemetry-otel/build.gradle
+++ b/plugins/telemetry-otel/build.gradle
@@ -14,8 +14,8 @@ import org.opensearch.gradle.info.BuildParams
 apply plugin: 'opensearch.internal-cluster-test'
 
 opensearchplugin {
-  description 'Opentelemetry based telemetry implementation.'
-  classname 'org.opensearch.telemetry.OTelTelemetryPlugin'
+  description = 'Opentelemetry based telemetry implementation.'
+  classname = 'org.opensearch.telemetry.OTelTelemetryPlugin'
   hasClientJar = false
 }
 
diff --git a/plugins/transport-grpc/build.gradle b/plugins/transport-grpc/build.gradle
index 47f62b2b8c3f3..5c6bc8efe1098 100644
--- a/plugins/transport-grpc/build.gradle
+++ b/plugins/transport-grpc/build.gradle
@@ -9,8 +9,8 @@ import org.gradle.api.attributes.java.TargetJvmEnvironment
  */
 
 opensearchplugin {
-  description 'gRPC based transport implementation'
-  classname 'org.opensearch.transport.grpc.GrpcPlugin'
+  description = 'gRPC based transport implementation'
+  classname = 'org.opensearch.transport.grpc.GrpcPlugin'
 }
 
 dependencies {
diff --git a/plugins/transport-nio/build.gradle b/plugins/transport-nio/build.gradle
index 7132c97864238..6ac27b51f8902 100644
--- a/plugins/transport-nio/build.gradle
+++ b/plugins/transport-nio/build.gradle
@@ -34,8 +34,8 @@ apply plugin: "opensearch.publish"
 apply plugin: 'opensearch.internal-cluster-test'
 
 opensearchplugin {
-  description 'The nio transport.'
-  classname 'org.opensearch.transport.nio.NioTransportPlugin'
+  description = 'The nio transport.'
+  classname = 'org.opensearch.transport.nio.NioTransportPlugin'
   hasClientJar = true
 }
 
diff --git a/plugins/transport-reactor-netty4/build.gradle b/plugins/transport-reactor-netty4/build.gradle
index 1e76d1a29efc1..12ae5ce99632e 100644
--- a/plugins/transport-reactor-netty4/build.gradle
+++ b/plugins/transport-reactor-netty4/build.gradle
@@ -23,8 +23,8 @@ apply plugin: 'opensearch.internal-cluster-test'
 apply plugin: 'opensearch.publish'
 
 opensearchplugin {
-  description 'Reactor Netty 4 based transport implementation'
-  classname 'org.opensearch.transport.reactor.ReactorNetty4Plugin'
+  description = 'Reactor Netty 4 based transport implementation'
+  classname = 'org.opensearch.transport.reactor.ReactorNetty4Plugin'
   hasClientJar = true
 }
 
diff --git a/plugins/workload-management/build.gradle b/plugins/workload-management/build.gradle
index ad6737bbd24b0..2e8b0df468092 100644
--- a/plugins/workload-management/build.gradle
+++ b/plugins/workload-management/build.gradle
@@ -14,8 +14,8 @@ apply plugin: 'opensearch.java-rest-test'
 apply plugin: 'opensearch.internal-cluster-test'
 
 opensearchplugin {
-  description 'OpenSearch Workload Management Plugin.'
-  classname 'org.opensearch.plugin.wlm.WorkloadManagementPlugin'
+  description = 'OpenSearch Workload Management Plugin.'
+  classname = 'org.opensearch.plugin.wlm.WorkloadManagementPlugin'
 }
 
 dependencies {
diff --git a/qa/die-with-dignity/build.gradle b/qa/die-with-dignity/build.gradle
index db8762fe921bf..a3e5f295001bc 100644
--- a/qa/die-with-dignity/build.gradle
+++ b/qa/die-with-dignity/build.gradle
@@ -16,8 +16,8 @@ apply plugin: 'opensearch.java-rest-test'
 apply plugin: 'opensearch.opensearchplugin'
 
 opensearchplugin {
-  description 'Die with dignity plugin'
-  classname 'org.opensearch.DieWithDignityPlugin'
+  description = 'Die with dignity plugin'
+  classname = 'org.opensearch.DieWithDignityPlugin'
 }
 
 // let the javaRestTest see the classpath of main
diff --git a/qa/full-cluster-restart/build.gradle b/qa/full-cluster-restart/build.gradle
index 82aa4cd511ef1..4b04fcea872b0 100644
--- a/qa/full-cluster-restart/build.gradle
+++ b/qa/full-cluster-restart/build.gradle
@@ -52,7 +52,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.indexCompatible) {
   }
 
   tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) {
-    useCluster testClusters."${baseName}"
+    useCluster project, testClusters."${baseName}"
     mustRunAfter(precommit)
     doFirst {
       delete("${buildDir}/cluster/shared/repo/${baseName}")
@@ -62,7 +62,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.indexCompatible) {
   }
 
   tasks.register("${baseName}#upgradedClusterTest", StandaloneRestIntegTestTask) {
-    useCluster testClusters."${baseName}"
+    useCluster project, testClusters."${baseName}"
     dependsOn "${baseName}#oldClusterTest"
     doFirst {
       testClusters."${baseName}".goToNextVersion()
diff --git a/qa/mixed-cluster/build.gradle b/qa/mixed-cluster/build.gradle
index 822977c55368a..9148f5a3ba3e6 100644
--- a/qa/mixed-cluster/build.gradle
+++ b/qa/mixed-cluster/build.gradle
@@ -69,7 +69,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.wireCompatible) {
   }
 
   tasks.register("${baseName}#mixedClusterTest", StandaloneRestIntegTestTask) {
-    useCluster testClusters."${baseName}"
+    useCluster project, testClusters."${baseName}"
     mustRunAfter(precommit)
     doFirst {
       delete("${buildDir}/cluster/shared/repo/${baseName}")
diff --git a/qa/multi-cluster-search/build.gradle b/qa/multi-cluster-search/build.gradle
index 907791bd6a7de..a0a271fa01fb3 100644
--- a/qa/multi-cluster-search/build.gradle
+++ b/qa/multi-cluster-search/build.gradle
@@ -49,7 +49,7 @@ testClusters.'remote-cluster' {
 }
 
 task mixedClusterTest(type: RestIntegTestTask) {
-    useCluster testClusters.'remote-cluster'
+    useCluster project, testClusters.'remote-cluster'
     dependsOn 'remote-cluster'
     systemProperty 'tests.rest.suite', 'multi_cluster'
 }
diff --git a/qa/remote-clusters/build.gradle b/qa/remote-clusters/build.gradle
index 2f3cd9d2d898d..a52d4f2035bea 100644
--- a/qa/remote-clusters/build.gradle
+++ b/qa/remote-clusters/build.gradle
@@ -59,7 +59,7 @@ tasks.named("preProcessFixture").configure {
   }
   doLast {
     // tests expect to have an empty repo
-    project.delete(
+    delete(
       "${buildDir}/repo"
     )
     createAndSetWritable(
diff --git a/qa/repository-multi-version/build.gradle b/qa/repository-multi-version/build.gradle
index 67710095d30bc..2bf18d02254ae 100644
--- a/qa/repository-multi-version/build.gradle
+++ b/qa/repository-multi-version/build.gradle
@@ -59,7 +59,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.indexCompatible) {
   }
 
   tasks.register("${baseName}#Step1OldClusterTest", StandaloneRestIntegTestTask) {
-    useCluster testClusters."${oldClusterName}"
+    useCluster project, testClusters."${oldClusterName}"
     mustRunAfter(precommit)
     doFirst {
       delete("${buildDir}/cluster/shared/repo/${baseName}")
@@ -68,19 +68,19 @@ for (Version bwcVersion : BuildParams.bwcVersions.indexCompatible) {
   }
 
   tasks.register("${baseName}#Step2NewClusterTest", StandaloneRestIntegTestTask) {
-    useCluster testClusters."${newClusterName}"
+    useCluster project, testClusters."${newClusterName}"
     dependsOn "${baseName}#Step1OldClusterTest"
     systemProperty 'tests.rest.suite', 'step2'
   }
 
   tasks.register("${baseName}#Step3OldClusterTest", StandaloneRestIntegTestTask) {
-    useCluster testClusters."${oldClusterName}"
+    useCluster project, testClusters."${oldClusterName}"
     dependsOn "${baseName}#Step2NewClusterTest"
     systemProperty 'tests.rest.suite', 'step3'
   }
 
   tasks.register("${baseName}#Step4NewClusterTest", StandaloneRestIntegTestTask) {
-    useCluster testClusters."${newClusterName}"
+    useCluster project, testClusters."${newClusterName}"
     dependsOn "${baseName}#Step3OldClusterTest"
     systemProperty 'tests.rest.suite', 'step4'
   }
diff --git a/qa/rolling-upgrade/build.gradle b/qa/rolling-upgrade/build.gradle
index 3dff452be855f..ffcf815bfa264 100644
--- a/qa/rolling-upgrade/build.gradle
+++ b/qa/rolling-upgrade/build.gradle
@@ -67,7 +67,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.wireCompatible) {
 
   tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) {
     dependsOn processTestResources
-    useCluster testClusters."${baseName}"
+    useCluster project, testClusters."${baseName}"
     mustRunAfter(precommit)
     doFirst {
       delete("${buildDir}/cluster/shared/repo/${baseName}")
@@ -80,7 +80,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.wireCompatible) {
 
   tasks.register("${baseName}#oneThirdUpgradedTest", StandaloneRestIntegTestTask) {
     dependsOn "${baseName}#oldClusterTest"
-    useCluster testClusters."${baseName}"
+    useCluster project, testClusters."${baseName}"
     doFirst {
       testClusters."${baseName}".nextNodeToNextVersion()
     }
@@ -93,7 +93,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.wireCompatible) {
 
   tasks.register("${baseName}#twoThirdsUpgradedTest", StandaloneRestIntegTestTask) {
     dependsOn "${baseName}#oneThirdUpgradedTest"
-    useCluster testClusters."${baseName}"
+    useCluster project, testClusters."${baseName}"
     doFirst {
       testClusters."${baseName}".nextNodeToNextVersion()
     }
@@ -109,7 +109,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.wireCompatible) {
     doFirst {
       testClusters."${baseName}".nextNodeToNextVersion()
     }
-    useCluster testClusters."${baseName}"
+    useCluster project, testClusters."${baseName}"
     systemProperty 'tests.rest.suite', 'upgraded_cluster'
     systemProperty 'tests.upgrade_from_version', bwcVersionStr
 
diff --git a/qa/smoke-test-multinode/build.gradle b/qa/smoke-test-multinode/build.gradle
index 25261f5e3ff7d..af389a7c59835 100644
--- a/qa/smoke-test-multinode/build.gradle
+++ b/qa/smoke-test-multinode/build.gradle
@@ -47,7 +47,7 @@ testClusters.integTest {
 
 integTest {
   doFirst {
-    project.delete(repo)
+    delete(repo)
     repo.mkdirs()
   }
 }
diff --git a/qa/verify-version-constants/build.gradle b/qa/verify-version-constants/build.gradle
index 8b0dd20899862..18e4b5b549579 100644
--- a/qa/verify-version-constants/build.gradle
+++ b/qa/verify-version-constants/build.gradle
@@ -48,7 +48,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.indexCompatible) {
   }
 
   tasks.register("${baseName}#integTest", StandaloneRestIntegTestTask) {
-    useCluster testClusters."${baseName}"
+    useCluster project, testClusters."${baseName}"
     nonInputProperties.systemProperty('tests.rest.cluster', "${-> testClusters."${baseName}".allHttpSocketURI.join(",")}")
     nonInputProperties.systemProperty('tests.clustername', "${-> testClusters."${baseName}".getName()}")
   }
diff --git a/sandbox/plugins/build.gradle b/sandbox/plugins/build.gradle
index 61afb2c568e1b..1b7b6889972fd 100644
--- a/sandbox/plugins/build.gradle
+++ b/sandbox/plugins/build.gradle
@@ -12,8 +12,8 @@ configure(subprojects.findAll { it.parent.path == project.path }) {
   apply plugin: 'opensearch.opensearchplugin'
 
   opensearchplugin {
-    name project.name
-    licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt')
-    noticeFile rootProject.file('NOTICE.txt')
+    name = project.name
+    licenseFile = rootProject.file('licenses/APACHE-LICENSE-2.0.txt')
+    noticeFile = rootProject.file('NOTICE.txt')
   }
 }
diff --git a/server/build.gradle b/server/build.gradle
index 8dd23491ccd69..6559c7247200a 100644
--- a/server/build.gradle
+++ b/server/build.gradle
@@ -42,7 +42,7 @@ plugins {
 publishing {
   publications {
     nebula(MavenPublication) {
-      artifactId 'opensearch'
+      artifactId = 'opensearch'
     }
   }
 }
diff --git a/test/external-modules/build.gradle b/test/external-modules/build.gradle
index 8e59c309826e7..e575323b6248c 100644
--- a/test/external-modules/build.gradle
+++ b/test/external-modules/build.gradle
@@ -17,9 +17,9 @@ subprojects {
   apply plugin: 'opensearch.yaml-rest-test'
 
   opensearchplugin {
-    name it.name
-    licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt')
-    noticeFile rootProject.file('NOTICE.txt')
+    name = it.name
+    licenseFile = rootProject.file('licenses/APACHE-LICENSE-2.0.txt')
+    noticeFile = rootProject.file('NOTICE.txt')
   }
 
   tasks.named('yamlRestTest').configure {
diff --git a/test/external-modules/delayed-aggs/build.gradle b/test/external-modules/delayed-aggs/build.gradle
index d470269c8a6e2..a7662f72e64e6 100644
--- a/test/external-modules/delayed-aggs/build.gradle
+++ b/test/external-modules/delayed-aggs/build.gradle
@@ -29,8 +29,8 @@
  */
 
 opensearchplugin {
-  description 'A test module that allows to delay aggregations on shards with a configurable time'
-  classname 'org.opensearch.search.aggregations.DelayedShardAggregationPlugin'
+  description = 'A test module that allows to delay aggregations on shards with a configurable time'
+  classname = 'org.opensearch.search.aggregations.DelayedShardAggregationPlugin'
 }
 
 restResources {
diff --git a/test/fixtures/azure-fixture/build.gradle b/test/fixtures/azure-fixture/build.gradle
index e2b1d475fbab7..904297a3b4c65 100644
--- a/test/fixtures/azure-fixture/build.gradle
+++ b/test/fixtures/azure-fixture/build.gradle
@@ -46,7 +46,7 @@ preProcessFixture {
   }
   doLast {
     file("${testFixturesDir}/shared").mkdirs()
-    project.copy {
+    copy {
       from jar
       from configurations.runtimeClasspath
       into "${testFixturesDir}/shared"
diff --git a/test/fixtures/gcs-fixture/build.gradle b/test/fixtures/gcs-fixture/build.gradle
index 564cf33687436..60f672e6bd00b 100644
--- a/test/fixtures/gcs-fixture/build.gradle
+++ b/test/fixtures/gcs-fixture/build.gradle
@@ -46,7 +46,7 @@ preProcessFixture {
   }
   doLast {
     file("${testFixturesDir}/shared").mkdirs()
-    project.copy {
+    copy {
       from jar
       from configurations.runtimeClasspath
       into "${testFixturesDir}/shared"
diff --git a/test/fixtures/s3-fixture/build.gradle b/test/fixtures/s3-fixture/build.gradle
index 86456b3364c4c..519e8514af4d4 100644
--- a/test/fixtures/s3-fixture/build.gradle
+++ b/test/fixtures/s3-fixture/build.gradle
@@ -46,7 +46,7 @@ preProcessFixture {
   }
   doLast {
     file("${testFixturesDir}/shared").mkdirs()
-    project.copy {
+    copy {
       from jar
       from configurations.runtimeClasspath
       into "${testFixturesDir}/shared"

From 4d943993ac93e1a140c1b58c11e812a58578f27d Mon Sep 17 00:00:00 2001
From: Ralph Ursprung <39383228+rursprung@users.noreply.github.com>
Date: Fri, 10 Jan 2025 19:36:47 +0100
Subject: [PATCH 35/61] `phone-search` analyzer: don't emit sip/tel prefix,
 int'l prefix, extension & unformatted input (#16993)

* `phone-search` analyzer: don't emit int'l prefix

this was an oversight in the initial implementation: if the tokenizer
emits the international calling prefix in the search analyzer then all
documents with the same international calling prefix will match.

e.g. when searching for `+1-555-123-4567` not only documents with this
number would match but also any other document with a `1` token (i.e.
any other number with this prefix).

thus the search functionality is currently broken for this analyzer,
making it useless.

the test coverage has now been extended to cover these and other
use-cases.

Signed-off-by: Ralph Ursprung <Ralph.Ursprung@avaloq.com>

* `phone-search` analyzer: don't emit extension & unformatted input

if these tokens are emitted it meant that phone numbers with other
international dialling prefixes still matched.

e.g. searching for `+1 1234` would also match a number stored as
`+2 1234`, which was wrong.

the tokens still need to be emited for the `phone` analyzer, e.g. when
the user only enters the extension / local number it should still match,
the same is with the other ngrams: these are needed for
search-as-you-type style queries where the user input needs to match
against partial phone numbers.

Signed-off-by: Ralph Ursprung <Ralph.Ursprung@avaloq.com>

* `phone-search` analyzer: don't emit sip/tel prefix

in line with the previous two commits, this is something else the search
analyzer shouldn't emit since otherwise searching for any number with
such a prefix will match _any_ document with the same prefix.

Signed-off-by: Ralph Ursprung <Ralph.Ursprung@avaloq.com>

---------

Signed-off-by: Ralph Ursprung <Ralph.Ursprung@avaloq.com>
---
 CHANGELOG.md                                  |   1 +
 .../phone/PhoneNumberTermTokenizer.java       |  23 ++-
 .../phone/PhoneNumberAnalyzerTests.java       |  18 +--
 .../test/analysis-phone/20_search.yml         | 139 ++++++++++++++++++
 4 files changed, 166 insertions(+), 15 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 512ba48941c87..a57561da861ee 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -92,6 +92,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 - Always use `constant_score` query for `match_only_text` field ([#16964](https://github.com/opensearch-project/OpenSearch/pull/16964))
 - Fix Shallow copy snapshot failures on closed index ([#16868](https://github.com/opensearch-project/OpenSearch/pull/16868))
 - Fix multi-value sort for unsigned long ([#16732](https://github.com/opensearch-project/OpenSearch/pull/16732))
+- The `phone-search` analyzer no longer emits the tel/sip prefix, international calling code, extension numbers and unformatted input as a token ([#16993](https://github.com/opensearch-project/OpenSearch/pull/16993))
 
 ### Security
 
diff --git a/plugins/analysis-phonenumber/src/main/java/org/opensearch/analysis/phone/PhoneNumberTermTokenizer.java b/plugins/analysis-phonenumber/src/main/java/org/opensearch/analysis/phone/PhoneNumberTermTokenizer.java
index 6b95594204eb4..e0541755a2b3e 100644
--- a/plugins/analysis-phonenumber/src/main/java/org/opensearch/analysis/phone/PhoneNumberTermTokenizer.java
+++ b/plugins/analysis-phonenumber/src/main/java/org/opensearch/analysis/phone/PhoneNumberTermTokenizer.java
@@ -98,7 +98,9 @@ private Set<String> getTokens() throws IOException {
 
         // Rip off the "tel:" or "sip:" prefix
         if (input.indexOf("tel:") == 0 || input.indexOf("sip:") == 0) {
-            tokens.add(input.substring(0, 4));
+            if (addNgrams) {
+                tokens.add(input.substring(0, 4));
+            }
             input = input.substring(4);
         }
 
@@ -128,14 +130,23 @@ private Set<String> getTokens() throws IOException {
                 countryCode = Optional.of(String.valueOf(numberProto.getCountryCode()));
                 input = String.valueOf(numberProto.getNationalNumber());
 
-                // Add Country code, extension, and the number as tokens
-                tokens.add(countryCode.get());
+                // add full number as tokens
                 tokens.add(countryCode.get() + input);
-                if (!Strings.isEmpty(numberProto.getExtension())) {
-                    tokens.add(numberProto.getExtension());
+
+                if (addNgrams) {
+                    // Consider the country code as an ngram - it makes no sense in the search analyzer as it'd match all values with the
+                    // same country code
+                    tokens.add(countryCode.get());
+
+                    // Add extension without country code (not done for search analyzer as that might match numbers in other countries as
+                    // well!)
+                    if (!Strings.isEmpty(numberProto.getExtension())) {
+                        tokens.add(numberProto.getExtension());
+                    }
+                    // Add unformatted input (most likely the same as the extension now since the prefix has been removed)
+                    tokens.add(input);
                 }
 
-                tokens.add(input);
             }
         } catch (final NumberParseException | StringIndexOutOfBoundsException e) {
             // Libphone didn't like it, no biggie. We'll just ngram the number as it is.
diff --git a/plugins/analysis-phonenumber/src/test/java/org/opensearch/analysis/phone/PhoneNumberAnalyzerTests.java b/plugins/analysis-phonenumber/src/test/java/org/opensearch/analysis/phone/PhoneNumberAnalyzerTests.java
index 332f6d21f47d6..d55c0b2ce7d2a 100644
--- a/plugins/analysis-phonenumber/src/test/java/org/opensearch/analysis/phone/PhoneNumberAnalyzerTests.java
+++ b/plugins/analysis-phonenumber/src/test/java/org/opensearch/analysis/phone/PhoneNumberAnalyzerTests.java
@@ -87,11 +87,7 @@ public void testEuropeDetailled() throws IOException {
      * Test for all tokens which are emitted by the "phone" analyzer.
      */
     public void testEuropeDetailledSearch() throws IOException {
-        assertTokensAreInAnyOrder(
-            phoneSearchAnalyzer,
-            "tel:+441344840400",
-            Arrays.asList("tel:+441344840400", "tel:", "441344840400", "44", "1344840400")
-        );
+        assertTokensAreInAnyOrder(phoneSearchAnalyzer, "tel:+441344840400", Arrays.asList("tel:+441344840400", "441344840400"));
     }
 
     public void testEurope() throws IOException {
@@ -166,6 +162,10 @@ public void testTelPrefix() throws IOException {
         assertTokensInclude("tel:+1228", Arrays.asList("1228", "122", "228"));
     }
 
+    public void testTelPrefixSearch() throws IOException {
+        assertTokensInclude("tel:+1228", Arrays.asList("1228"));
+    }
+
     public void testNumberPrefix() throws IOException {
         assertTokensInclude("+1228", Arrays.asList("1228", "122", "228"));
     }
@@ -189,21 +189,21 @@ public void testLocalNumberWithCH() throws IOException {
     }
 
     public void testSearchInternationalPrefixWithZZ() throws IOException {
-        assertTokensInclude(phoneSearchAnalyzer, "+41583161010", Arrays.asList("41", "41583161010", "583161010"));
+        assertTokensAreInAnyOrder(phoneSearchAnalyzer, "+41583161010", Arrays.asList("+41583161010", "41583161010"));
     }
 
     public void testSearchInternationalPrefixWithCH() throws IOException {
-        assertTokensInclude(phoneSearchCHAnalyzer, "+41583161010", Arrays.asList("41", "41583161010", "583161010"));
+        assertTokensAreInAnyOrder(phoneSearchCHAnalyzer, "+41583161010", Arrays.asList("+41583161010", "41583161010"));
     }
 
     public void testSearchNationalPrefixWithCH() throws IOException {
         // + is equivalent to 00 in Switzerland
-        assertTokensInclude(phoneSearchCHAnalyzer, "0041583161010", Arrays.asList("41", "41583161010", "583161010"));
+        assertTokensAreInAnyOrder(phoneSearchCHAnalyzer, "0041583161010", Arrays.asList("0041583161010", "41583161010"));
     }
 
     public void testSearchLocalNumberWithCH() throws IOException {
         // when omitting the international prefix swiss numbers must start with '0'
-        assertTokensInclude(phoneSearchCHAnalyzer, "0583161010", Arrays.asList("41", "41583161010", "583161010"));
+        assertTokensAreInAnyOrder(phoneSearchCHAnalyzer, "0583161010", Arrays.asList("0583161010", "41583161010"));
     }
 
     /**
diff --git a/plugins/analysis-phonenumber/src/yamlRestTest/resources/rest-api-spec/test/analysis-phone/20_search.yml b/plugins/analysis-phonenumber/src/yamlRestTest/resources/rest-api-spec/test/analysis-phone/20_search.yml
index 0bd7d2c371bfc..1c51bfa3c5347 100644
--- a/plugins/analysis-phonenumber/src/yamlRestTest/resources/rest-api-spec/test/analysis-phone/20_search.yml
+++ b/plugins/analysis-phonenumber/src/yamlRestTest/resources/rest-api-spec/test/analysis-phone/20_search.yml
@@ -32,9 +32,37 @@
         index:  test
         id:     1
         body:   { "phone": "+41 58 316 10 10", "phone-ch": "058 316 10 10" }
+  - do:
+      index:
+        index:  test
+        id:     2
+        body:   { "phone": "+41 58 316 99 99", "phone-ch": "058 316 99 99" }
+  - do:
+      index:
+        index:  test
+        id:     3
+        # number not used in the examples below, just present to make sure that it's never matched
+        body:   { "phone": "+41 12 345 67 89", "phone-ch": "012 345 67 89" }
+  - do:
+      index:
+        index:  test
+        id:     4
+        # germany has a different phone number length, but for this test we ignore it and pretend they're the same
+        body:   { "phone": "+49 58 316 10 10", "phone-ch": "+49 58 316 10 10" }
+  - do:
+      index:
+        index:  test
+        id:     5
+        body:   { "phone": "+1-888-280-4331", "phone-ch": "+1-888-280-4331" }
+  - do:
+      index:
+        index:  test
+        id:     6
+        body:   { "phone": "tel:+441344840400", "phone-ch": "tel:+441344840400" }
   - do:
       indices.refresh: {}
 
+  # international format in document & search will always work
   - do:
       search:
         rest_total_hits_as_int: true
@@ -45,6 +73,7 @@
               "phone": "+41583161010"
   - match: { hits.total: 1 }
 
+  # correct national format & international format in search will always work
   - do:
       search:
         rest_total_hits_as_int: true
@@ -54,3 +83,113 @@
             match:
               "phone-ch": "+41583161010"
   - match: { hits.total: 1 }
+
+  # national format without country specified won't work
+  - do:
+      search:
+        rest_total_hits_as_int: true
+        index: test
+        body:
+          query:
+            match:
+              "phone": "0583161010"
+  - match: { hits.total: 0 }
+
+  # correct national format with country specified in document & search will always work
+  - do:
+      search:
+        rest_total_hits_as_int: true
+        index: test
+        body:
+          query:
+            match:
+              "phone-ch": "0583161010"
+  - match: { hits.total: 1 }
+
+  # search-as-you-type style query
+  - do:
+      search:
+        rest_total_hits_as_int: true
+        index: test
+        body:
+          query:
+            match:
+              "phone": "+4158316"
+  - match: { hits.total: 2 }
+
+  # search-as-you-type style query
+  - do:
+      search:
+        rest_total_hits_as_int: true
+        index: test
+        body:
+          query:
+            match:
+              "phone-ch": "058316"
+  - match: { hits.total: 2 }
+
+  # international format in document & search will always work
+  - do:
+      search:
+        rest_total_hits_as_int: true
+        index: test
+        body:
+          query:
+            match:
+              "phone": "+1 888 280 4331"
+  - match: { hits.total: 1 }
+
+  # international format in document & search will always work
+  - do:
+      search:
+        rest_total_hits_as_int: true
+        index: test
+        body:
+          query:
+            match:
+              "phone-ch": "+1 888 280 4331"
+  - match: { hits.total: 1 }
+
+  # national format in search won't work if no country is specified
+  - do:
+      search:
+        rest_total_hits_as_int: true
+        index: test
+        body:
+          query:
+            match:
+              "phone": "888 280 4331"
+  - match: { hits.total: 0 }
+
+  # document & search have a tel: prefix
+  - do:
+      search:
+        rest_total_hits_as_int: true
+        index: test
+        body:
+          query:
+            match:
+              "phone": "tel:+441344840400"
+  - match: { hits.total: 1 }
+
+  # only document has a tel: prefix
+  - do:
+      search:
+        rest_total_hits_as_int: true
+        index: test
+        body:
+          query:
+            match:
+              "phone": "+441344840400"
+  - match: { hits.total: 1 }
+
+  # only search has a tel: prefix
+  - do:
+      search:
+        rest_total_hits_as_int: true
+        index: test
+        body:
+          query:
+            match:
+              "phone": "tel:+1 888 280 4331"
+  - match: { hits.total: 1 }

From 8191de85856d291507d09a7fd425908843ed8675 Mon Sep 17 00:00:00 2001
From: Marc Handalian <marc.handalian@gmail.com>
Date: Fri, 10 Jan 2025 11:32:25 -0800
Subject: [PATCH 36/61] Limit RW separation to remote store enabled clusters
 and update recovery flow (#16760)

* Update search only replica recovery flow

This PR includes multiple changes to search replica recovery.
1. Change search only replica copies to recover as empty store instead of PEER. This will run a store recovery that syncs segments from remote store directly and eliminate any primary communication.
2. Remove search replicas from the in-sync allocation ID set and update routing table to exclude them from allAllocationIds.  This ensures primaries aren't tracking or validating the routing table for any search replica's presence.
3. Change search replica validation to require remote store.  There are versions of the above changes that are still possible with primary based node-node replication, but I don't think they are worth making  at this time.

Signed-off-by: Marc Handalian <marc.handalian@gmail.com>

* more coverage

Signed-off-by: Marc Handalian <marc.handalian@gmail.com>

* add changelog entry

Signed-off-by: Marc Handalian <marc.handalian@gmail.com>

* add assertions that Search Replicas are not in the in-sync id set nor the AllAllocationIds set in the routing table

Signed-off-by: Marc Handalian <marc.handalian@gmail.com>

* update async task to only run if the FF is enabled and we are a remote store cluster.

This check had previously only checked for segrep

Signed-off-by: Marc Handalian <marc.handalian@gmail.com>

* clean up max shards logic

Signed-off-by: Marc Handalian <marc.handalian@gmail.com>

* remove search replicas from check during renewPeerRecoveryRetentionLeases

Signed-off-by: Marc Handalian <marc.handalian@gmail.com>

* Revert "update async task to only run if the FF is enabled and we are a remote store cluster."

reverting this, we already check for remote store earlier.

This reverts commit 48ca1a3050d0f24757c70ae23a9d9e185cb3bc40.

Signed-off-by: Marc Handalian <marc.handalian@gmail.com>

* Add more tests for failover case

Signed-off-by: Marc Handalian <marc.handalian@gmail.com>

* Update remotestore restore logic and add test ensuring we can restore only writers when red

Signed-off-by: Marc Handalian <marc.handalian@gmail.com>

* Fix Search replicas to honor node level recovery limits

Signed-off-by: Marc Handalian <marc.handalian@gmail.com>

* Fix translog UUID mismatch on existing store recovery.

This commit adds PR feedback and recovery tests post node restart.

Signed-off-by: Marc Handalian <marc.handalian@gmail.com>

* Fix spotless

Signed-off-by: Marc Handalian <marc.handalian@gmail.com>

* Fix bug with remote restore and add more tests

Signed-off-by: Marc Handalian <marc.handalian@gmail.com>

---------

Signed-off-by: Marc Handalian <marc.handalian@gmail.com>
---
 CHANGELOG.md                                  |   1 +
 .../SearchReplicaFilteringAllocationIT.java   |   3 +-
 ...SearchReplicaReplicationAndRecoveryIT.java | 325 ++++++++++++++++++
 .../SearchReplicaReplicationIT.java           | 134 --------
 .../replication/SearchReplicaRestoreIT.java   |  68 +---
 .../indices/settings/SearchOnlyReplicaIT.java |  32 +-
 .../metadata/MetadataCreateIndexService.java  |   9 +-
 .../MetadataUpdateSettingsService.java        |  15 +-
 .../cluster/routing/IndexRoutingTable.java    |  43 ++-
 .../routing/IndexShardRoutingTable.java       |  17 +
 .../cluster/routing/ShardRouting.java         |   6 +-
 .../allocation/IndexMetadataUpdater.java      |  13 +-
 .../decider/ThrottlingAllocationDecider.java  |  40 ++-
 .../index/seqno/ReplicationTracker.java       |   1 +
 .../opensearch/index/shard/IndexShard.java    |  21 +-
 .../index/shard/ReplicationGroup.java         |   6 +-
 .../opensearch/index/shard/StoreRecovery.java |  15 +-
 .../metadata/SearchOnlyReplicaTests.java      | 163 +++++----
 .../SearchReplicaAllocationDeciderTests.java  | 184 ++++++++++
 .../gateway/ClusterStateUpdatersTests.java    | 143 ++++++++
 .../index/shard/IndexShardTests.java          |  46 +++
 .../cluster/routing/TestShardRouting.java     |  22 ++
 22 files changed, 951 insertions(+), 356 deletions(-)
 create mode 100644 server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationAndRecoveryIT.java
 delete mode 100644 server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationIT.java

diff --git a/CHANGELOG.md b/CHANGELOG.md
index a57561da861ee..a46359520e9e1 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -67,6 +67,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 - Make entries for dependencies from server/build.gradle to gradle version catalog ([#16707](https://github.com/opensearch-project/OpenSearch/pull/16707))
 - Allow extended plugins to be optional ([#16909](https://github.com/opensearch-project/OpenSearch/pull/16909))
 - Use the correct type to widen the sort fields when merging top docs ([#16881](https://github.com/opensearch-project/OpenSearch/pull/16881))
+- Limit reader writer separation to remote store enabled clusters [#16760](https://github.com/opensearch-project/OpenSearch/pull/16760)
 
 ### Deprecated
 - Performing update operation with default pipeline or final pipeline is deprecated ([#16712](https://github.com/opensearch-project/OpenSearch/pull/16712))
diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/SearchReplicaFilteringAllocationIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/SearchReplicaFilteringAllocationIT.java
index 5f65d6647f26d..df2620b794686 100644
--- a/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/SearchReplicaFilteringAllocationIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/SearchReplicaFilteringAllocationIT.java
@@ -14,6 +14,7 @@
 import org.opensearch.common.settings.Settings;
 import org.opensearch.common.util.FeatureFlags;
 import org.opensearch.indices.replication.common.ReplicationType;
+import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase;
 import org.opensearch.test.OpenSearchIntegTestCase;
 
 import java.util.List;
@@ -23,7 +24,7 @@
 import static org.opensearch.cluster.routing.allocation.decider.SearchReplicaAllocationDecider.SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_SETTING;
 
 @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0)
-public class SearchReplicaFilteringAllocationIT extends OpenSearchIntegTestCase {
+public class SearchReplicaFilteringAllocationIT extends RemoteStoreBaseIntegTestCase {
 
     @Override
     protected Settings featureFlagSettings() {
diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationAndRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationAndRecoveryIT.java
new file mode 100644
index 0000000000000..7d4dd62cdca61
--- /dev/null
+++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationAndRecoveryIT.java
@@ -0,0 +1,325 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.indices.replication;
+
+import org.opensearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.opensearch.action.admin.cluster.node.stats.NodeStats;
+import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest;
+import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest;
+import org.opensearch.action.admin.indices.recovery.RecoveryRequest;
+import org.opensearch.action.admin.indices.recovery.RecoveryResponse;
+import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsResponse;
+import org.opensearch.action.support.PlainActionFuture;
+import org.opensearch.cluster.health.ClusterHealthStatus;
+import org.opensearch.cluster.metadata.IndexMetadata;
+import org.opensearch.cluster.routing.RecoverySource;
+import org.opensearch.common.settings.Settings;
+import org.opensearch.common.util.FeatureFlags;
+import org.opensearch.index.SegmentReplicationPerGroupStats;
+import org.opensearch.index.SegmentReplicationShardStats;
+import org.opensearch.indices.recovery.RecoveryState;
+import org.opensearch.indices.replication.common.ReplicationType;
+import org.opensearch.test.InternalTestCluster;
+import org.opensearch.test.OpenSearchIntegTestCase;
+import org.junit.After;
+
+import java.nio.file.Path;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.ExecutionException;
+
+import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS;
+import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS;
+import static org.opensearch.cluster.routing.RecoverySource.Type.EMPTY_STORE;
+import static org.opensearch.cluster.routing.RecoverySource.Type.EXISTING_STORE;
+import static org.opensearch.cluster.routing.allocation.decider.SearchReplicaAllocationDecider.SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_SETTING;
+
+@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0)
+public class SearchReplicaReplicationAndRecoveryIT extends SegmentReplicationBaseIT {
+
+    private static final String REPOSITORY_NAME = "test-remote-store-repo";
+    protected Path absolutePath;
+
+    @Override
+    protected Settings nodeSettings(int nodeOrdinal) {
+        if (absolutePath == null) {
+            absolutePath = randomRepoPath().toAbsolutePath();
+        }
+        return Settings.builder()
+            .put(super.nodeSettings(nodeOrdinal))
+            .put(remoteStoreClusterSettings(REPOSITORY_NAME, absolutePath))
+            .build();
+    }
+
+    @After
+    public void teardown() {
+        clusterAdmin().prepareCleanupRepository(REPOSITORY_NAME).get();
+
+    }
+
+    @Override
+    public Settings indexSettings() {
+        return Settings.builder()
+            .put(super.indexSettings())
+            .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
+            .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
+            .put(IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS, 1)
+            .build();
+    }
+
+    @Override
+    protected Settings featureFlagSettings() {
+        return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL, true).build();
+    }
+
+    public void testReplication() throws Exception {
+        internalCluster().startClusterManagerOnlyNode();
+        final String primary = internalCluster().startDataOnlyNode();
+        createIndex(INDEX_NAME);
+        ensureYellowAndNoInitializingShards(INDEX_NAME);
+        final String replica = internalCluster().startDataOnlyNode();
+        ensureGreen(INDEX_NAME);
+
+        final int docCount = 10;
+        for (int i = 0; i < docCount; i++) {
+            client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get();
+        }
+        refresh(INDEX_NAME);
+        waitForSearchableDocs(docCount, primary, replica);
+    }
+
+    public void testSegmentReplicationStatsResponseWithSearchReplica() throws Exception {
+        internalCluster().startClusterManagerOnlyNode();
+        final List<String> nodes = internalCluster().startDataOnlyNodes(2);
+        createIndex(
+            INDEX_NAME,
+            Settings.builder()
+                .put("number_of_shards", 1)
+                .put("number_of_replicas", 0)
+                .put("number_of_search_only_replicas", 1)
+                .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT)
+                .build()
+        );
+        ensureGreen(INDEX_NAME);
+
+        final int docCount = 5;
+        for (int i = 0; i < docCount; i++) {
+            client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get();
+        }
+        refresh(INDEX_NAME);
+        waitForSearchableDocs(docCount, nodes);
+
+        SegmentReplicationStatsResponse segmentReplicationStatsResponse = dataNodeClient().admin()
+            .indices()
+            .prepareSegmentReplicationStats(INDEX_NAME)
+            .setDetailed(true)
+            .execute()
+            .actionGet();
+
+        // Verify the number of indices
+        assertEquals(1, segmentReplicationStatsResponse.getReplicationStats().size());
+        // Verify total shards
+        assertEquals(2, segmentReplicationStatsResponse.getTotalShards());
+        // Verify the number of primary shards
+        assertEquals(1, segmentReplicationStatsResponse.getReplicationStats().get(INDEX_NAME).size());
+
+        SegmentReplicationPerGroupStats perGroupStats = segmentReplicationStatsResponse.getReplicationStats().get(INDEX_NAME).get(0);
+        Set<SegmentReplicationShardStats> replicaStats = perGroupStats.getReplicaStats();
+        // Verify the number of replica stats
+        assertEquals(1, replicaStats.size());
+        for (SegmentReplicationShardStats replicaStat : replicaStats) {
+            assertNotNull(replicaStat.getCurrentReplicationState());
+        }
+    }
+
+    public void testSearchReplicaRecovery() throws Exception {
+        internalCluster().startClusterManagerOnlyNode();
+        final String primary = internalCluster().startDataOnlyNode();
+        final String replica = internalCluster().startDataOnlyNode();
+
+        // ensure search replicas are only allocated to "replica" node.
+        client().admin()
+            .cluster()
+            .prepareUpdateSettings()
+            .setTransientSettings(Settings.builder().put(SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "_name", replica))
+            .execute()
+            .actionGet();
+
+        createIndex(INDEX_NAME);
+        ensureGreen(INDEX_NAME);
+        assertRecoverySourceType(replica, EMPTY_STORE);
+
+        final int docCount = 10;
+        for (int i = 0; i < docCount; i++) {
+            client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get();
+        }
+        refresh(INDEX_NAME);
+        flush(INDEX_NAME);
+        waitForSearchableDocs(10, primary, replica);
+
+        // Node stats should show remote download stats as nonzero, use this as a precondition to compare
+        // post restart.
+        assertDownloadStats(replica, true);
+        NodesStatsResponse nodesStatsResponse;
+        NodeStats nodeStats;
+
+        internalCluster().restartNode(replica);
+        ensureGreen(INDEX_NAME);
+        assertDocCounts(10, replica);
+
+        // assert existing store recovery
+        assertRecoverySourceType(replica, EXISTING_STORE);
+        assertDownloadStats(replica, false);
+    }
+
+    public void testRecoveryAfterDocsIndexed() throws Exception {
+        internalCluster().startClusterManagerOnlyNode();
+        final String primary = internalCluster().startDataOnlyNode();
+        createIndex(INDEX_NAME);
+        ensureYellowAndNoInitializingShards(INDEX_NAME);
+        final int docCount = 10;
+        for (int i = 0; i < docCount; i++) {
+            client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get();
+        }
+        refresh(INDEX_NAME);
+
+        final String replica = internalCluster().startDataOnlyNode();
+        ensureGreen(INDEX_NAME);
+        assertDocCounts(10, replica);
+
+        assertRecoverySourceType(replica, EMPTY_STORE);
+        // replica should have downloaded from remote
+        assertDownloadStats(replica, true);
+
+        client().admin()
+            .indices()
+            .prepareUpdateSettings(INDEX_NAME)
+            .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 0))
+            .get();
+
+        ensureGreen(INDEX_NAME);
+
+        client().admin()
+            .indices()
+            .prepareUpdateSettings(INDEX_NAME)
+            .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 1))
+            .get();
+        ensureGreen(INDEX_NAME);
+        assertDocCounts(10, replica);
+
+        internalCluster().restartNode(replica);
+
+        ensureGreen(INDEX_NAME);
+        assertDocCounts(10, replica);
+        assertRecoverySourceType(replica, EXISTING_STORE);
+        assertDownloadStats(replica, false);
+    }
+
+    private static void assertRecoverySourceType(String replica, RecoverySource.Type recoveryType) throws InterruptedException,
+        ExecutionException {
+        RecoveryResponse recoveryResponse = client().admin().indices().recoveries(new RecoveryRequest(INDEX_NAME)).get();
+        for (RecoveryState recoveryState : recoveryResponse.shardRecoveryStates().get(INDEX_NAME)) {
+            if (recoveryState.getPrimary() == false) {
+                assertEquals("All SR should be of expected recovery type", recoveryType, recoveryState.getRecoverySource().getType());
+                assertEquals("All SR should be on the specified node", replica, recoveryState.getTargetNode().getName());
+            }
+        }
+    }
+
+    private static void assertDownloadStats(String replica, boolean expectBytesDownloaded) throws InterruptedException, ExecutionException {
+        NodesStatsResponse nodesStatsResponse = client().admin().cluster().nodesStats(new NodesStatsRequest(replica)).get();
+        assertEquals(1, nodesStatsResponse.getNodes().size());
+        NodeStats nodeStats = nodesStatsResponse.getNodes().get(0);
+        assertEquals(replica, nodeStats.getNode().getName());
+        if (expectBytesDownloaded) {
+            assertTrue(nodeStats.getIndices().getSegments().getRemoteSegmentStats().getDownloadBytesStarted() > 0);
+        } else {
+            assertEquals(0, nodeStats.getIndices().getSegments().getRemoteSegmentStats().getDownloadBytesStarted());
+        }
+    }
+
+    public void testStopPrimary_RestoreOnNewNode() throws Exception {
+        internalCluster().startClusterManagerOnlyNode();
+        final String primary = internalCluster().startDataOnlyNode();
+        createIndex(INDEX_NAME);
+        ensureYellowAndNoInitializingShards(INDEX_NAME);
+        final int docCount = 10;
+        for (int i = 0; i < docCount; i++) {
+            client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get();
+        }
+        refresh(INDEX_NAME);
+        assertDocCounts(docCount, primary);
+
+        final String replica = internalCluster().startDataOnlyNode();
+        ensureGreen(INDEX_NAME);
+        assertDocCounts(docCount, replica);
+        // stop the primary
+        internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primary));
+
+        assertBusy(() -> {
+            ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth(INDEX_NAME).get();
+            assertEquals(ClusterHealthStatus.RED, clusterHealthResponse.getStatus());
+        });
+        assertDocCounts(docCount, replica);
+
+        String restoredPrimary = internalCluster().startDataOnlyNode();
+
+        client().admin().cluster().restoreRemoteStore(new RestoreRemoteStoreRequest().indices(INDEX_NAME), PlainActionFuture.newFuture());
+        ensureGreen(INDEX_NAME);
+        assertDocCounts(docCount, replica, restoredPrimary);
+
+        for (int i = docCount; i < docCount * 2; i++) {
+            client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get();
+        }
+        refresh(INDEX_NAME);
+        assertBusy(() -> assertDocCounts(20, replica, restoredPrimary));
+    }
+
+    public void testFailoverToNewPrimaryWithPollingReplication() throws Exception {
+        internalCluster().startClusterManagerOnlyNode();
+        final String primary = internalCluster().startDataOnlyNode();
+        createIndex(INDEX_NAME);
+        ensureYellowAndNoInitializingShards(INDEX_NAME);
+        final int docCount = 10;
+        for (int i = 0; i < docCount; i++) {
+            client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get();
+        }
+        refresh(INDEX_NAME);
+
+        final String replica = internalCluster().startDataOnlyNode();
+        ensureGreen(INDEX_NAME);
+        assertDocCounts(10, replica);
+
+        client().admin()
+            .indices()
+            .prepareUpdateSettings(INDEX_NAME)
+            .setSettings(Settings.builder().put(SETTING_NUMBER_OF_REPLICAS, 1))
+            .get();
+        final String writer_replica = internalCluster().startDataOnlyNode();
+        ensureGreen(INDEX_NAME);
+
+        // stop the primary
+        internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primary));
+
+        assertBusy(() -> {
+            ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth(INDEX_NAME).get();
+            assertEquals(ClusterHealthStatus.YELLOW, clusterHealthResponse.getStatus());
+        });
+        ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth(INDEX_NAME).get();
+        assertEquals(ClusterHealthStatus.YELLOW, clusterHealthResponse.getStatus());
+        assertDocCounts(10, replica);
+
+        for (int i = docCount; i < docCount * 2; i++) {
+            client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get();
+        }
+        refresh(INDEX_NAME);
+        assertBusy(() -> assertDocCounts(20, replica, writer_replica));
+    }
+}
diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationIT.java
deleted file mode 100644
index f660695af9965..0000000000000
--- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationIT.java
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-package org.opensearch.indices.replication;
-
-import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsResponse;
-import org.opensearch.cluster.metadata.IndexMetadata;
-import org.opensearch.common.settings.Settings;
-import org.opensearch.common.util.FeatureFlags;
-import org.opensearch.index.SegmentReplicationPerGroupStats;
-import org.opensearch.index.SegmentReplicationShardStats;
-import org.opensearch.indices.replication.common.ReplicationType;
-import org.opensearch.test.OpenSearchIntegTestCase;
-import org.junit.After;
-import org.junit.Before;
-
-import java.nio.file.Path;
-import java.util.List;
-import java.util.Set;
-
-@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0)
-public class SearchReplicaReplicationIT extends SegmentReplicationBaseIT {
-
-    private static final String REPOSITORY_NAME = "test-remote-store-repo";
-    protected Path absolutePath;
-
-    private Boolean useRemoteStore;
-
-    @Before
-    public void randomizeRemoteStoreEnabled() {
-        useRemoteStore = randomBoolean();
-    }
-
-    @Override
-    protected Settings nodeSettings(int nodeOrdinal) {
-        if (useRemoteStore) {
-            if (absolutePath == null) {
-                absolutePath = randomRepoPath().toAbsolutePath();
-            }
-            return Settings.builder()
-                .put(super.nodeSettings(nodeOrdinal))
-                .put(remoteStoreClusterSettings(REPOSITORY_NAME, absolutePath))
-                .build();
-        }
-        return super.nodeSettings(nodeOrdinal);
-    }
-
-    @After
-    public void teardown() {
-        if (useRemoteStore) {
-            clusterAdmin().prepareCleanupRepository(REPOSITORY_NAME).get();
-        }
-    }
-
-    @Override
-    public Settings indexSettings() {
-        return Settings.builder()
-            .put(super.indexSettings())
-            .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
-            .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
-            .put(IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS, 1)
-            .build();
-    }
-
-    @Override
-    protected Settings featureFlagSettings() {
-        return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL, true).build();
-    }
-
-    public void testReplication() throws Exception {
-        internalCluster().startClusterManagerOnlyNode();
-        final String primary = internalCluster().startDataOnlyNode();
-        createIndex(INDEX_NAME);
-        ensureYellowAndNoInitializingShards(INDEX_NAME);
-        final String replica = internalCluster().startDataOnlyNode();
-        ensureGreen(INDEX_NAME);
-
-        final int docCount = 10;
-        for (int i = 0; i < docCount; i++) {
-            client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get();
-        }
-        refresh(INDEX_NAME);
-        waitForSearchableDocs(docCount, primary, replica);
-    }
-
-    public void testSegmentReplicationStatsResponseWithSearchReplica() throws Exception {
-        internalCluster().startClusterManagerOnlyNode();
-        final List<String> nodes = internalCluster().startDataOnlyNodes(2);
-        createIndex(
-            INDEX_NAME,
-            Settings.builder()
-                .put("number_of_shards", 1)
-                .put("number_of_replicas", 0)
-                .put("number_of_search_only_replicas", 1)
-                .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT)
-                .build()
-        );
-        ensureGreen(INDEX_NAME);
-
-        final int docCount = 5;
-        for (int i = 0; i < docCount; i++) {
-            client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get();
-        }
-        refresh(INDEX_NAME);
-        waitForSearchableDocs(docCount, nodes);
-
-        SegmentReplicationStatsResponse segmentReplicationStatsResponse = dataNodeClient().admin()
-            .indices()
-            .prepareSegmentReplicationStats(INDEX_NAME)
-            .setDetailed(true)
-            .execute()
-            .actionGet();
-
-        // Verify the number of indices
-        assertEquals(1, segmentReplicationStatsResponse.getReplicationStats().size());
-        // Verify total shards
-        assertEquals(2, segmentReplicationStatsResponse.getTotalShards());
-        // Verify the number of primary shards
-        assertEquals(1, segmentReplicationStatsResponse.getReplicationStats().get(INDEX_NAME).size());
-
-        SegmentReplicationPerGroupStats perGroupStats = segmentReplicationStatsResponse.getReplicationStats().get(INDEX_NAME).get(0);
-        Set<SegmentReplicationShardStats> replicaStats = perGroupStats.getReplicaStats();
-        // Verify the number of replica stats
-        assertEquals(1, replicaStats.size());
-        for (SegmentReplicationShardStats replicaStat : replicaStats) {
-            assertNotNull(replicaStat.getCurrentReplicationState());
-        }
-    }
-}
diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaRestoreIT.java
index 352332b962c92..e8d65e07c7dd9 100644
--- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaRestoreIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaRestoreIT.java
@@ -15,7 +15,7 @@
 import org.opensearch.common.util.FeatureFlags;
 import org.opensearch.index.query.QueryBuilders;
 import org.opensearch.indices.replication.common.ReplicationType;
-import org.opensearch.snapshots.AbstractSnapshotIntegTestCase;
+import org.opensearch.remotestore.RemoteSnapshotIT;
 import org.opensearch.snapshots.SnapshotRestoreException;
 import org.opensearch.test.OpenSearchIntegTestCase;
 
@@ -26,7 +26,7 @@
 import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount;
 
 @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0)
-public class SearchReplicaRestoreIT extends AbstractSnapshotIntegTestCase {
+public class SearchReplicaRestoreIT extends RemoteSnapshotIT {
 
     private static final String INDEX_NAME = "test-idx-1";
     private static final String RESTORED_INDEX_NAME = INDEX_NAME + "-restored";
@@ -40,49 +40,6 @@ protected Settings featureFlagSettings() {
         return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL, true).build();
     }
 
-    public void testSearchReplicaRestore_WhenSnapshotOnDocRep_RestoreOnDocRepWithSearchReplica() throws Exception {
-        bootstrapIndexWithOutSearchReplicas(ReplicationType.DOCUMENT);
-        createRepoAndSnapshot(REPOSITORY_NAME, FS_REPOSITORY_TYPE, SNAPSHOT_NAME, INDEX_NAME);
-
-        SnapshotRestoreException exception = expectThrows(
-            SnapshotRestoreException.class,
-            () -> restoreSnapshot(
-                REPOSITORY_NAME,
-                SNAPSHOT_NAME,
-                INDEX_NAME,
-                RESTORED_INDEX_NAME,
-                Settings.builder()
-                    .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT)
-                    .put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 1)
-                    .build()
-            )
-        );
-        assertTrue(exception.getMessage().contains(getSnapshotExceptionMessage(ReplicationType.DOCUMENT, ReplicationType.DOCUMENT)));
-    }
-
-    public void testSearchReplicaRestore_WhenSnapshotOnDocRep_RestoreOnSegRepWithSearchReplica() throws Exception {
-        bootstrapIndexWithOutSearchReplicas(ReplicationType.DOCUMENT);
-        createRepoAndSnapshot(REPOSITORY_NAME, FS_REPOSITORY_TYPE, SNAPSHOT_NAME, INDEX_NAME);
-
-        restoreSnapshot(
-            REPOSITORY_NAME,
-            SNAPSHOT_NAME,
-            INDEX_NAME,
-            RESTORED_INDEX_NAME,
-            Settings.builder()
-                .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT)
-                .put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 1)
-                .build()
-        );
-        ensureYellowAndNoInitializingShards(RESTORED_INDEX_NAME);
-        internalCluster().startDataOnlyNode();
-        ensureGreen(RESTORED_INDEX_NAME);
-        assertEquals(1, getNumberOfSearchReplicas(RESTORED_INDEX_NAME));
-
-        SearchResponse resp = client().prepareSearch(RESTORED_INDEX_NAME).setQuery(QueryBuilders.matchAllQuery()).get();
-        assertHitCount(resp, DOC_COUNT);
-    }
-
     public void testSearchReplicaRestore_WhenSnapshotOnSegRep_RestoreOnDocRepWithSearchReplica() throws Exception {
         bootstrapIndexWithOutSearchReplicas(ReplicationType.SEGMENT);
         createRepoAndSnapshot(REPOSITORY_NAME, FS_REPOSITORY_TYPE, SNAPSHOT_NAME, INDEX_NAME);
@@ -140,27 +97,6 @@ public void testSearchReplicaRestore_WhenSnapshotOnSegRepWithSearchReplica_Resto
         assertTrue(exception.getMessage().contains(getSnapshotExceptionMessage(ReplicationType.SEGMENT, ReplicationType.DOCUMENT)));
     }
 
-    public void testSearchReplicaRestore_WhenSnapshotOnSegRepWithSearchReplica_RestoreOnDocRepWithNoSearchReplica() throws Exception {
-        bootstrapIndexWithSearchReplicas();
-        createRepoAndSnapshot(REPOSITORY_NAME, FS_REPOSITORY_TYPE, SNAPSHOT_NAME, INDEX_NAME);
-
-        restoreSnapshot(
-            REPOSITORY_NAME,
-            SNAPSHOT_NAME,
-            INDEX_NAME,
-            RESTORED_INDEX_NAME,
-            Settings.builder()
-                .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT)
-                .put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 0)
-                .build()
-        );
-        ensureGreen(RESTORED_INDEX_NAME);
-        assertEquals(0, getNumberOfSearchReplicas(RESTORED_INDEX_NAME));
-
-        SearchResponse resp = client().prepareSearch(RESTORED_INDEX_NAME).setQuery(QueryBuilders.matchAllQuery()).get();
-        assertHitCount(resp, DOC_COUNT);
-    }
-
     private void bootstrapIndexWithOutSearchReplicas(ReplicationType replicationType) throws InterruptedException {
         startCluster(2);
 
diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaIT.java
index fa836e2cc5784..f524f4d1298c1 100644
--- a/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaIT.java
@@ -20,6 +20,7 @@
 import org.opensearch.common.util.FeatureFlags;
 import org.opensearch.index.query.QueryBuilders;
 import org.opensearch.indices.replication.common.ReplicationType;
+import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase;
 import org.opensearch.test.InternalTestCluster;
 import org.opensearch.test.OpenSearchIntegTestCase;
 
@@ -31,7 +32,7 @@
 import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount;
 
 @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0)
-public class SearchOnlyReplicaIT extends OpenSearchIntegTestCase {
+public class SearchOnlyReplicaIT extends RemoteStoreBaseIntegTestCase {
 
     private static final String TEST_INDEX = "test_index";
 
@@ -55,35 +56,6 @@ public Settings indexSettings() {
             .build();
     }
 
-    public void testCreateDocRepFails() {
-        Settings settings = Settings.builder().put(indexSettings()).put(SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT).build();
-
-        IllegalArgumentException illegalArgumentException = expectThrows(
-            IllegalArgumentException.class,
-            () -> createIndex(TEST_INDEX, settings)
-        );
-        assertEquals(expectedFailureMessage, illegalArgumentException.getMessage());
-    }
-
-    public void testUpdateDocRepFails() {
-        Settings settings = Settings.builder()
-            .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
-            .put(SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT)
-            .build();
-        // create succeeds
-        createIndex(TEST_INDEX, settings);
-
-        // update fails
-        IllegalArgumentException illegalArgumentException = expectThrows(IllegalArgumentException.class, () -> {
-            client().admin()
-                .indices()
-                .prepareUpdateSettings(TEST_INDEX)
-                .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 1))
-                .get();
-        });
-        assertEquals(expectedFailureMessage, illegalArgumentException.getMessage());
-    }
-
     public void testFailoverWithSearchReplica_WithWriterReplicas() throws IOException {
         int numSearchReplicas = 1;
         int numWriterReplicas = 1;
diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java
index 232201d18ba13..b5b2b71f977fa 100644
--- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java
+++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java
@@ -1096,14 +1096,9 @@ static Settings aggregateIndexSettings(
     private static void updateSearchOnlyReplicas(Settings requestSettings, Settings.Builder builder) {
         if (INDEX_NUMBER_OF_SEARCH_REPLICAS_SETTING.exists(builder) && builder.get(SETTING_NUMBER_OF_SEARCH_REPLICAS) != null) {
             if (INDEX_NUMBER_OF_SEARCH_REPLICAS_SETTING.get(requestSettings) > 0
-                && ReplicationType.parseString(builder.get(INDEX_REPLICATION_TYPE_SETTING.getKey())).equals(ReplicationType.DOCUMENT)) {
+                && Boolean.parseBoolean(builder.get(SETTING_REMOTE_STORE_ENABLED)) == false) {
                 throw new IllegalArgumentException(
-                    "To set "
-                        + SETTING_NUMBER_OF_SEARCH_REPLICAS
-                        + ", "
-                        + INDEX_REPLICATION_TYPE_SETTING.getKey()
-                        + " must be set to "
-                        + ReplicationType.SEGMENT
+                    "To set " + SETTING_NUMBER_OF_SEARCH_REPLICAS + ", " + SETTING_REMOTE_STORE_ENABLED + " must be set to true"
                 );
             }
             builder.put(SETTING_NUMBER_OF_SEARCH_REPLICAS, INDEX_NUMBER_OF_SEARCH_REPLICAS_SETTING.get(requestSettings));
diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java
index 8c350d6b9cef5..a35af0e607c31 100644
--- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java
+++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java
@@ -63,7 +63,6 @@
 import org.opensearch.index.IndexSettings;
 import org.opensearch.indices.IndicesService;
 import org.opensearch.indices.ShardLimitValidator;
-import org.opensearch.indices.replication.common.ReplicationType;
 import org.opensearch.threadpool.ThreadPool;
 
 import java.io.IOException;
@@ -77,8 +76,8 @@
 import java.util.Set;
 
 import static org.opensearch.action.support.ContextPreservingActionListener.wrapPreservingContext;
-import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_REPLICATION_TYPE_SETTING;
 import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS;
+import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED;
 import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateOverlap;
 import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateRefreshIntervalSettings;
 import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateTranslogDurabilitySettings;
@@ -538,14 +537,12 @@ public ClusterState execute(ClusterState currentState) {
     private void validateSearchReplicaCountSettings(Settings requestSettings, Index[] indices, ClusterState currentState) {
         final int updatedNumberOfSearchReplicas = IndexMetadata.INDEX_NUMBER_OF_SEARCH_REPLICAS_SETTING.get(requestSettings);
         if (updatedNumberOfSearchReplicas > 0) {
-            if (Arrays.stream(indices).allMatch(index -> currentState.metadata().isSegmentReplicationEnabled(index.getName())) == false) {
+            if (Arrays.stream(indices)
+                .allMatch(
+                    index -> currentState.metadata().index(index.getName()).getSettings().getAsBoolean(SETTING_REMOTE_STORE_ENABLED, false)
+                ) == false) {
                 throw new IllegalArgumentException(
-                    "To set "
-                        + SETTING_NUMBER_OF_SEARCH_REPLICAS
-                        + ", "
-                        + INDEX_REPLICATION_TYPE_SETTING.getKey()
-                        + " must be set to "
-                        + ReplicationType.SEGMENT
+                    "To set " + SETTING_NUMBER_OF_SEARCH_REPLICAS + ", " + SETTING_REMOTE_STORE_ENABLED + " must be set to true"
                 );
             }
         }
diff --git a/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java
index b4592659bb70f..08574dddc007c 100644
--- a/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java
+++ b/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java
@@ -149,7 +149,10 @@ boolean validate(Metadata metadata) {
                     "Shard ["
                         + indexShardRoutingTable.shardId().id()
                         + "] routing table has wrong number of replicas, expected ["
+                        + "Replicas:  "
                         + indexMetadata.getNumberOfReplicas()
+                        + "Search Replicas: "
+                        + indexMetadata.getNumberOfSearchOnlyReplicas()
                         + "], got ["
                         + routingNumberOfReplicas
                         + "]"
@@ -514,15 +517,31 @@ public Builder initializeAsRemoteStoreRestore(
                             ShardRouting.newUnassigned(shardId, false, PeerRecoverySource.INSTANCE, unassignedInfo)
                         );
                     }
+                    // if writers are red we do not want to re-recover search only shards if already assigned.
+                    for (ShardRouting shardRouting : indexShardRoutingTable.searchOnlyReplicas()) {
+                        if (shardRouting.unassigned()) {
+                            indexShardRoutingBuilder.addShard(
+                                ShardRouting.newUnassigned(shardId, false, true, EmptyStoreRecoverySource.INSTANCE, unassignedInfo)
+                            );
+                        } else {
+                            indexShardRoutingBuilder.addShard(shardRouting);
+                        }
+                    }
                 } else {
                     // Primary is either active or initializing. Do not trigger restore.
                     indexShardRoutingBuilder.addShard(indexShardRoutingTable.primaryShard());
                     // Replica, if unassigned, trigger peer recovery else no action.
                     for (ShardRouting shardRouting : indexShardRoutingTable.replicaShards()) {
                         if (shardRouting.unassigned()) {
-                            indexShardRoutingBuilder.addShard(
-                                ShardRouting.newUnassigned(shardId, false, PeerRecoverySource.INSTANCE, unassignedInfo)
-                            );
+                            if (shardRouting.isSearchOnly()) {
+                                indexShardRoutingBuilder.addShard(
+                                    ShardRouting.newUnassigned(shardId, false, true, EmptyStoreRecoverySource.INSTANCE, unassignedInfo)
+                                );
+                            } else {
+                                indexShardRoutingBuilder.addShard(
+                                    ShardRouting.newUnassigned(shardId, false, PeerRecoverySource.INSTANCE, unassignedInfo)
+                                );
+                            }
                         } else {
                             indexShardRoutingBuilder.addShard(shardRouting);
                         }
@@ -575,13 +594,7 @@ private Builder initializeAsRestore(
                 }
                 for (int i = 0; i < indexMetadata.getNumberOfSearchOnlyReplicas(); i++) {
                     indexShardRoutingBuilder.addShard(
-                        ShardRouting.newUnassigned(
-                            shardId,
-                            false,
-                            true,
-                            PeerRecoverySource.INSTANCE, // TODO: Update to remote store if enabled
-                            unassignedInfo
-                        )
+                        ShardRouting.newUnassigned(shardId, false, true, EmptyStoreRecoverySource.INSTANCE, unassignedInfo)
                     );
                 }
                 shards.put(shardNumber, indexShardRoutingBuilder.build());
@@ -624,13 +637,7 @@ private Builder initializeEmpty(IndexMetadata indexMetadata, UnassignedInfo unas
                 }
                 for (int i = 0; i < indexMetadata.getNumberOfSearchOnlyReplicas(); i++) {
                     indexShardRoutingBuilder.addShard(
-                        ShardRouting.newUnassigned(
-                            shardId,
-                            false,
-                            true,
-                            PeerRecoverySource.INSTANCE, // TODO: Update to remote store if enabled
-                            unassignedInfo
-                        )
+                        ShardRouting.newUnassigned(shardId, false, true, EmptyStoreRecoverySource.INSTANCE, unassignedInfo)
                     );
                 }
                 shards.put(shardNumber, indexShardRoutingBuilder.build());
@@ -665,7 +672,7 @@ public Builder addSearchReplica() {
                     shardId,
                     false,
                     true,
-                    PeerRecoverySource.INSTANCE, // TODO: Change to remote store if enabled
+                    EmptyStoreRecoverySource.INSTANCE,
                     new UnassignedInfo(UnassignedInfo.Reason.REPLICA_ADDED, null)
                 );
                 shards.put(shardNumber, new IndexShardRoutingTable.Builder(shards.get(shard.id())).addShard(shard).build());
diff --git a/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java
index f25cb14f65eca..eb4177d7046ca 100644
--- a/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java
+++ b/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java
@@ -134,6 +134,23 @@ public class IndexShardRoutingTable extends AbstractDiffable<IndexShardRoutingTa
             if (shard.initializing()) {
                 allInitializingShards.add(shard);
             }
+            if (shard.isSearchOnly()) {
+                // mark search only shards as initializing or assigned, but do not add to
+                // the allAllocationId set. Cluster Manager will filter out search replica allocationIds in
+                // the in-sync set that is sent to primaries, but they are still included in the routing table.
+                // This ensures the primaries do not validate these ids exist in tracking nor are included
+                // in the unavailableInSyncShards set.
+                if (shard.relocating()) {
+                    allInitializingShards.add(shard.getTargetRelocatingShard());
+                    assignedShards.add(shard.getTargetRelocatingShard());
+                }
+                if (shard.assignedToNode()) {
+                    assignedShards.add(shard);
+                }
+                assert shard.allocationId() == null || allAllocationIds.contains(shard.allocationId().getId()) == false
+                    : "Search replicas should not be part of the allAllocationId set";
+                continue;
+            }
             if (shard.relocating()) {
                 // create the target initializing shard routing on the node the shard is relocating to
                 allInitializingShards.add(shard.getTargetRelocatingShard());
diff --git a/server/src/main/java/org/opensearch/cluster/routing/ShardRouting.java b/server/src/main/java/org/opensearch/cluster/routing/ShardRouting.java
index ada35caa1e61e..bdc98061f2fa4 100644
--- a/server/src/main/java/org/opensearch/cluster/routing/ShardRouting.java
+++ b/server/src/main/java/org/opensearch/cluster/routing/ShardRouting.java
@@ -115,7 +115,7 @@ protected ShardRouting(
         assert !(state == ShardRoutingState.UNASSIGNED && unassignedInfo == null) : "unassigned shard must be created with meta";
         assert (state == ShardRoutingState.UNASSIGNED || state == ShardRoutingState.INITIALIZING) == (recoverySource != null)
             : "recovery source only available on unassigned or initializing shard but was " + state;
-        assert recoverySource == null || recoverySource == PeerRecoverySource.INSTANCE || primary
+        assert recoverySource == null || recoverySource == PeerRecoverySource.INSTANCE || primary || searchOnly
             : "replica shards always recover from primary";
         assert (currentNodeId == null) == (state == ShardRoutingState.UNASSIGNED) : "unassigned shard must not be assigned to a node "
             + this;
@@ -156,7 +156,7 @@ private ShardRouting initializeTargetRelocatingShard() {
                 primary,
                 searchOnly,
                 ShardRoutingState.INITIALIZING,
-                PeerRecoverySource.INSTANCE,
+                isSearchOnly() ? RecoverySource.EmptyStoreRecoverySource.INSTANCE : PeerRecoverySource.INSTANCE,
                 unassignedInfo,
                 AllocationId.newTargetRelocation(allocationId),
                 expectedShardSize
@@ -440,7 +440,7 @@ public ShardRouting moveToUnassigned(UnassignedInfo unassignedInfo) {
         assert state != ShardRoutingState.UNASSIGNED : this;
         final RecoverySource recoverySource;
         if (active()) {
-            if (primary()) {
+            if (primary() || isSearchOnly()) {
                 recoverySource = ExistingStoreRecoverySource.INSTANCE;
             } else {
                 recoverySource = PeerRecoverySource.INSTANCE;
diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java
index 113d5803c1d65..e673c1409a869 100644
--- a/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java
+++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java
@@ -99,7 +99,11 @@ public void shardStarted(ShardRouting initializingShard, ShardRouting startedSha
                 + startedShard.allocationId().getId()
                 + "] have to have the same";
         Updates updates = changes(startedShard.shardId());
-        updates.addedAllocationIds.add(startedShard.allocationId().getId());
+        // if the started shard is an untracked replica, don't bother sending it as part of the
+        // in sync id set.
+        if (startedShard.isSearchOnly() == false) {
+            updates.addedAllocationIds.add(startedShard.allocationId().getId());
+        }
         if (startedShard.primary()
             // started shard has to have null recoverySource; have to pick up recoverySource from its initializing state
             && (initializingShard.recoverySource() == RecoverySource.ExistingStoreRecoverySource.FORCE_STALE_PRIMARY_INSTANCE)) {
@@ -259,9 +263,9 @@ private IndexMetadata.Builder updateInSyncAllocations(
             // We use number_of_replicas + 1 (= possible active shard copies) to bound the inSyncAllocationIds set
             // Only trim the set of allocation ids when it grows, otherwise we might trim too eagerly when the number
             // of replicas was decreased while shards were unassigned.
-            int maxActiveShards = oldIndexMetadata.getNumberOfReplicas() + oldIndexMetadata.getNumberOfSearchOnlyReplicas() + 1; // +1 for
-                                                                                                                                 // the
-                                                                                                                                 // primary
+            int maxActiveShards = oldIndexMetadata.getNumberOfReplicas() + 1; // +1 for
+                                                                              // the
+                                                                              // primary
             IndexShardRoutingTable newShardRoutingTable = newRoutingTable.shardRoutingTable(shardId);
             assert newShardRoutingTable.assignedShards()
                 .stream()
@@ -273,6 +277,7 @@ private IndexMetadata.Builder updateInSyncAllocations(
                 List<ShardRouting> assignedShards = newShardRoutingTable.assignedShards()
                     .stream()
                     .filter(s -> s.isRelocationTarget() == false)
+                    .filter(s -> s.isSearchOnly() == false) // do not consider search only shards for in sync validation
                     .collect(Collectors.toList());
                 assert assignedShards.size() <= maxActiveShards : "cannot have more assigned shards "
                     + assignedShards
diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java
index 4bde1e282fe78..32639bc3065da 100644
--- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java
+++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java
@@ -191,7 +191,8 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing
             }
         } else {
             // Peer recovery
-            assert initializingShard(shardRouting, node.nodeId()).recoverySource().getType() == RecoverySource.Type.PEER;
+            assert initializingShard(shardRouting, node.nodeId()).recoverySource().getType() == RecoverySource.Type.PEER
+                || shardRouting.isSearchOnly();
 
             if (shardRouting.unassignedReasonIndexCreated()) {
                 return allocateInitialShardCopies(shardRouting, node, allocation);
@@ -204,7 +205,6 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing
     private Decision allocateInitialShardCopies(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
         int currentInRecoveries = allocation.routingNodes().getInitialIncomingRecoveries(node.nodeId());
         assert shardRouting.unassignedReasonIndexCreated() && !shardRouting.primary();
-
         return allocateShardCopies(
             shardRouting,
             allocation,
@@ -212,7 +212,8 @@ private Decision allocateInitialShardCopies(ShardRouting shardRouting, RoutingNo
             replicasInitialRecoveries,
             this::getInitialPrimaryNodeOutgoingRecoveries,
             replicasInitialRecoveries,
-            true
+            true,
+            node
         );
     }
 
@@ -228,7 +229,8 @@ private Decision allocateNonInitialShardCopies(ShardRouting shardRouting, Routin
             concurrentIncomingRecoveries,
             this::getPrimaryNodeOutgoingRecoveries,
             concurrentOutgoingRecoveries,
-            false
+            false,
+            node
         );
     }
 
@@ -249,7 +251,8 @@ private Decision allocateShardCopies(
         int inRecoveriesLimit,
         BiFunction<ShardRouting, RoutingAllocation, Integer> primaryNodeOutRecoveriesFunc,
         int outRecoveriesLimit,
-        boolean isInitialShardCopies
+        boolean isInitialShardCopies,
+        RoutingNode candidateNode
     ) {
         // Allocating a shard to this node will increase the incoming recoveries
         if (currentInRecoveries >= inRecoveriesLimit) {
@@ -274,6 +277,16 @@ private Decision allocateShardCopies(
                 );
             }
         } else {
+            // if this is a search shard that recovers from remote store, ignore outgoing recovery limits.
+            if (shardRouting.isSearchOnly() && candidateNode.node().isRemoteStoreNode()) {
+                return allocation.decision(
+                    YES,
+                    NAME,
+                    "Remote based search replica below incoming recovery limit: [%d < %d]",
+                    currentInRecoveries,
+                    inRecoveriesLimit
+                );
+            }
             // search for corresponding recovery source (= primary shard) and check number of outgoing recoveries on that node
             ShardRouting primaryShard = allocation.routingNodes().activePrimary(shardRouting.shardId());
             if (primaryShard == null) {
@@ -319,6 +332,10 @@ private Decision allocateShardCopies(
         }
     }
 
+    private static boolean isRemoteStoreNode(ShardRouting shardRouting, RoutingAllocation allocation) {
+        return allocation.nodes().getNodes().get(shardRouting.currentNodeId()).isRemoteStoreNode();
+    }
+
     /**
      * The shard routing passed to {@link #canAllocate(ShardRouting, RoutingNode, RoutingAllocation)} is not the initializing shard to this
      * node but:
@@ -357,9 +374,18 @@ private ShardRouting initializingShard(ShardRouting shardRouting, String current
     @Override
     public Decision canMoveAway(ShardRouting shardRouting, RoutingAllocation allocation) {
         int outgoingRecoveries = 0;
-        if (!shardRouting.primary() && !shardRouting.isSearchOnly()) {
+        if (!shardRouting.primary()) {
             ShardRouting primaryShard = allocation.routingNodes().activePrimary(shardRouting.shardId());
-            outgoingRecoveries = allocation.routingNodes().getOutgoingRecoveries(primaryShard.currentNodeId());
+            if (primaryShard != null) {
+                outgoingRecoveries = allocation.routingNodes().getOutgoingRecoveries(primaryShard.currentNodeId());
+            } else {
+                assert shardRouting.isSearchOnly();
+                // check if the moving away search replica is using remote store, if not
+                // throw an error as the primary it will use for recovery is not active.
+                if (isRemoteStoreNode(shardRouting, allocation) == false) {
+                    return allocation.decision(Decision.NO, NAME, "primary shard for this replica is not yet active");
+                }
+            }
         } else {
             outgoingRecoveries = allocation.routingNodes().getOutgoingRecoveries(shardRouting.currentNodeId());
         }
diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java
index 1e43827afeb47..57ade7fa10cd0 100644
--- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java
+++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java
@@ -633,6 +633,7 @@ public synchronized void renewPeerRecoveryRetentionLeases() {
          */
         final boolean renewalNeeded = StreamSupport.stream(routingTable.spliterator(), false)
             .filter(ShardRouting::assignedToNode)
+            .filter(r -> r.isSearchOnly() == false)
             .anyMatch(shardRouting -> {
                 final RetentionLease retentionLease = retentionLeases.get(getPeerRecoveryRetentionLeaseId(shardRouting));
                 if (retentionLease == null) {
diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java
index f5de4dfb5a933..02f20504b07ba 100644
--- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java
+++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java
@@ -2540,22 +2540,24 @@ public void openEngineAndRecoverFromTranslog(boolean syncFromRemote) throws IOEx
      */
     public void openEngineAndSkipTranslogRecovery() throws IOException {
         assert routingEntry().recoverySource().getType() == RecoverySource.Type.PEER : "not a peer recovery [" + routingEntry() + "]";
-        recoveryState.validateCurrentStage(RecoveryState.Stage.TRANSLOG);
-        loadGlobalCheckpointToReplicationTracker();
-        innerOpenEngineAndTranslog(replicationTracker);
-        getEngine().translogManager().skipTranslogRecovery();
+        openEngineAndSkipTranslogRecovery(true);
     }
 
     public void openEngineAndSkipTranslogRecoveryFromSnapshot() throws IOException {
-        assert routingEntry().recoverySource().getType() == RecoverySource.Type.SNAPSHOT : "not a snapshot recovery ["
-            + routingEntry()
-            + "]";
+        assert routingEntry().isSearchOnly() || routingEntry().recoverySource().getType() == RecoverySource.Type.SNAPSHOT
+            : "not a snapshot recovery [" + routingEntry() + "]";
         recoveryState.validateCurrentStage(RecoveryState.Stage.INDEX);
         maybeCheckIndex();
         recoveryState.setStage(RecoveryState.Stage.TRANSLOG);
+        openEngineAndSkipTranslogRecovery(routingEntry().isSearchOnly());
+    }
+
+    void openEngineAndSkipTranslogRecovery(boolean syncFromRemote) throws IOException {
         recoveryState.validateCurrentStage(RecoveryState.Stage.TRANSLOG);
         loadGlobalCheckpointToReplicationTracker();
-        innerOpenEngineAndTranslog(replicationTracker, false);
+        innerOpenEngineAndTranslog(replicationTracker, syncFromRemote);
+        assert routingEntry().isSearchOnly() == false || translogStats().estimatedNumberOfOperations() == 0
+            : "Translog is expected to be empty but holds " + translogStats().estimatedNumberOfOperations() + "Operations.";
         getEngine().translogManager().skipTranslogRecovery();
     }
 
@@ -2905,7 +2907,8 @@ public void recoverFromLocalShards(
     public void recoverFromStore(ActionListener<Boolean> listener) {
         // we are the first primary, recover from the gateway
         // if its post api allocation, the index should exists
-        assert shardRouting.primary() : "recover from store only makes sense if the shard is a primary shard";
+        assert shardRouting.primary() || shardRouting.isSearchOnly()
+            : "recover from store only makes sense if the shard is a primary shard or an untracked search only replica";
         assert shardRouting.initializing() : "can only start recovery on initializing shard";
         StoreRecovery storeRecovery = new StoreRecovery(shardId, logger);
         storeRecovery.recoverFromStore(this, listener);
diff --git a/server/src/main/java/org/opensearch/index/shard/ReplicationGroup.java b/server/src/main/java/org/opensearch/index/shard/ReplicationGroup.java
index ccfaf50da1c6b..b2db48737ee3f 100644
--- a/server/src/main/java/org/opensearch/index/shard/ReplicationGroup.java
+++ b/server/src/main/java/org/opensearch/index/shard/ReplicationGroup.java
@@ -67,15 +67,17 @@ public ReplicationGroup(
         this.inSyncAllocationIds = inSyncAllocationIds;
         this.trackedAllocationIds = trackedAllocationIds;
         this.version = version;
-
         this.unavailableInSyncShards = Sets.difference(inSyncAllocationIds, routingTable.getAllAllocationIds());
         this.replicationTargets = new ArrayList<>();
         this.skippedShards = new ArrayList<>();
         for (final ShardRouting shard : routingTable) {
-            // search only replicas never receive any replicated operations
             if (shard.unassigned() || shard.isSearchOnly()) {
                 assert shard.primary() == false : "primary shard should not be unassigned in a replication group: " + shard;
                 skippedShards.add(shard);
+                if (shard.isSearchOnly()) {
+                    assert shard.allocationId() == null || inSyncAllocationIds.contains(shard.allocationId().getId()) == false
+                        : " Search replicas should not be part of the inSync id set";
+                }
             } else {
                 if (trackedAllocationIds.contains(shard.allocationId().getId())) {
                     replicationTargets.add(shard);
diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java
index 6933e4e161dd1..74d9cc4b4f6dd 100644
--- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java
+++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java
@@ -544,7 +544,7 @@ private boolean canRecover(IndexShard indexShard) {
             // got closed on us, just ignore this recovery
             return false;
         }
-        if (indexShard.routingEntry().primary() == false) {
+        if (indexShard.routingEntry().primary() == false && indexShard.routingEntry().isSearchOnly() == false) {
             throw new IndexShardRecoveryException(shardId, "Trying to recover when the shard is in backup state", null);
         }
         return true;
@@ -747,7 +747,17 @@ private void internalRecoverFromStore(IndexShard indexShard) throws IndexShardRe
                 writeEmptyRetentionLeasesFile(indexShard);
                 indexShard.recoveryState().getIndex().setFileDetailsComplete();
             }
-            indexShard.openEngineAndRecoverFromTranslog();
+            if (indexShard.routingEntry().isSearchOnly() == false) {
+                indexShard.openEngineAndRecoverFromTranslog();
+            } else {
+                // Opens the engine for pull based replica copies that are
+                // not primary eligible. This will skip any checkpoint tracking and ensure
+                // that the shards are sync'd with remote store before opening.
+                //
+                // first bootstrap new history / translog so that the TranslogUUID matches the UUID from the latest commit.
+                bootstrapForSnapshot(indexShard, store);
+                indexShard.openEngineAndSkipTranslogRecoveryFromSnapshot();
+            }
             if (indexShard.shouldSeedRemoteStore()) {
                 indexShard.getThreadPool().executor(ThreadPool.Names.GENERIC).execute(() -> {
                     logger.info("Attempting to seed Remote Store via local recovery for {}", indexShard.shardId());
@@ -878,6 +888,7 @@ private void bootstrap(final IndexShard indexShard, final Store store) throws IO
         store.bootstrapNewHistory();
         final SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo();
         final long localCheckpoint = Long.parseLong(segmentInfos.userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY));
+
         final String translogUUID = Translog.createEmptyTranslog(
             indexShard.shardPath().resolveTranslog(),
             localCheckpoint,
diff --git a/server/src/test/java/org/opensearch/cluster/metadata/SearchOnlyReplicaTests.java b/server/src/test/java/org/opensearch/cluster/metadata/SearchOnlyReplicaTests.java
index 3d11193a07884..81055e01d915b 100644
--- a/server/src/test/java/org/opensearch/cluster/metadata/SearchOnlyReplicaTests.java
+++ b/server/src/test/java/org/opensearch/cluster/metadata/SearchOnlyReplicaTests.java
@@ -19,32 +19,46 @@
 import org.opensearch.cluster.node.DiscoveryNodeRole;
 import org.opensearch.cluster.routing.IndexShardRoutingTable;
 import org.opensearch.cluster.routing.ShardRoutingState;
+import org.opensearch.common.ValidationException;
 import org.opensearch.common.settings.Settings;
 import org.opensearch.common.util.FeatureFlags;
+import org.opensearch.env.Environment;
+import org.opensearch.gateway.remote.RemoteClusterStateService;
 import org.opensearch.indices.ShardLimitValidator;
 import org.opensearch.indices.cluster.ClusterStateChanges;
 import org.opensearch.indices.replication.common.ReplicationType;
+import org.opensearch.repositories.fs.FsRepository;
 import org.opensearch.test.OpenSearchSingleNodeTestCase;
 import org.opensearch.threadpool.TestThreadPool;
 import org.opensearch.threadpool.ThreadPool;
 import org.junit.After;
 import org.junit.Before;
 
+import java.nio.file.Path;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Locale;
 import java.util.Set;
+import java.util.concurrent.ExecutionException;
 import java.util.concurrent.atomic.AtomicInteger;
 
 import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_REPLICATION_TYPE_SETTING;
 import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS;
 import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS;
 import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS;
+import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING;
+import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY;
+import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX;
+import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT;
+import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY;
+import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY;
 
 public class SearchOnlyReplicaTests extends OpenSearchSingleNodeTestCase {
 
+    public static final String TEST_RS_REPO = "test-rs-repo";
+    public static final String INDEX_NAME = "test-index";
     private ThreadPool threadPool;
 
     @Before
@@ -70,7 +84,7 @@ protected Settings featureFlagSettings() {
     public void testCreateWithDefaultSearchReplicasSetting() {
         final ClusterStateChanges cluster = new ClusterStateChanges(xContentRegistry(), threadPool);
         ClusterState state = createIndexWithSettings(cluster, Settings.builder().build());
-        IndexShardRoutingTable indexShardRoutingTable = state.getRoutingTable().index("index").getShards().get(0);
+        IndexShardRoutingTable indexShardRoutingTable = state.getRoutingTable().index(INDEX_NAME).getShards().get(0);
         assertEquals(1, indexShardRoutingTable.replicaShards().size());
         assertEquals(0, indexShardRoutingTable.searchOnlyReplicas().size());
         assertEquals(1, indexShardRoutingTable.writerReplicas().size());
@@ -91,53 +105,50 @@ public void testSearchReplicasValidationWithDocumentReplication() {
             )
         );
         assertEquals(
-            "To set index.number_of_search_only_replicas, index.replication.type must be set to SEGMENT",
+            "To set index.number_of_search_only_replicas, index.remote_store.enabled must be set to true",
             exception.getCause().getMessage()
         );
     }
 
-    public void testUpdateSearchReplicaCount() {
-        final ClusterStateChanges cluster = new ClusterStateChanges(xContentRegistry(), threadPool);
+    public void testUpdateSearchReplicaCount() throws ExecutionException, InterruptedException {
+        Settings settings = Settings.builder()
+            .put(SETTING_NUMBER_OF_SHARDS, 1)
+            .put(SETTING_NUMBER_OF_REPLICAS, 0)
+            .put(INDEX_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT)
+            .put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 1)
+            .build();
+        createIndex(INDEX_NAME, settings);
 
-        ClusterState state = createIndexWithSettings(
-            cluster,
-            Settings.builder()
-                .put(SETTING_NUMBER_OF_SHARDS, 1)
-                .put(SETTING_NUMBER_OF_REPLICAS, 0)
-                .put(INDEX_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT)
-                .put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 1)
-                .build()
-        );
-        assertTrue(state.metadata().hasIndex("index"));
-        rerouteUntilActive(state, cluster);
-        IndexShardRoutingTable indexShardRoutingTable = state.getRoutingTable().index("index").getShards().get(0);
+        IndexShardRoutingTable indexShardRoutingTable = getIndexShardRoutingTable();
         assertEquals(1, indexShardRoutingTable.replicaShards().size());
         assertEquals(1, indexShardRoutingTable.searchOnlyReplicas().size());
         assertEquals(0, indexShardRoutingTable.writerReplicas().size());
 
         // add another replica
-        state = cluster.updateSettings(
-            state,
-            new UpdateSettingsRequest("index").settings(Settings.builder().put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 2).build())
+        UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(INDEX_NAME).settings(
+            Settings.builder().put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 2).build()
         );
-        rerouteUntilActive(state, cluster);
-        indexShardRoutingTable = state.getRoutingTable().index("index").getShards().get(0);
+        client().admin().indices().updateSettings(updateSettingsRequest).get();
+        indexShardRoutingTable = getIndexShardRoutingTable();
         assertEquals(2, indexShardRoutingTable.replicaShards().size());
         assertEquals(2, indexShardRoutingTable.searchOnlyReplicas().size());
         assertEquals(0, indexShardRoutingTable.writerReplicas().size());
 
         // remove all replicas
-        state = cluster.updateSettings(
-            state,
-            new UpdateSettingsRequest("index").settings(Settings.builder().put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 0).build())
+        updateSettingsRequest = new UpdateSettingsRequest(INDEX_NAME).settings(
+            Settings.builder().put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 0).build()
         );
-        rerouteUntilActive(state, cluster);
-        indexShardRoutingTable = state.getRoutingTable().index("index").getShards().get(0);
+        client().admin().indices().updateSettings(updateSettingsRequest).get();
+        indexShardRoutingTable = getIndexShardRoutingTable();
         assertEquals(0, indexShardRoutingTable.replicaShards().size());
         assertEquals(0, indexShardRoutingTable.searchOnlyReplicas().size());
         assertEquals(0, indexShardRoutingTable.writerReplicas().size());
     }
 
+    private IndexShardRoutingTable getIndexShardRoutingTable() {
+        return client().admin().cluster().prepareState().get().getState().getRoutingTable().index(INDEX_NAME).getShards().get(0);
+    }
+
     private ClusterState createIndexWithSettings(ClusterStateChanges cluster, Settings settings) {
         List<DiscoveryNode> allNodes = new ArrayList<>();
         // node for primary/local
@@ -149,48 +160,32 @@ private ClusterState createIndexWithSettings(ClusterStateChanges cluster, Settin
         }
         ClusterState state = ClusterStateCreationUtils.state(localNode, localNode, allNodes.toArray(new DiscoveryNode[0]));
 
-        CreateIndexRequest request = new CreateIndexRequest("index", settings).waitForActiveShards(ActiveShardCount.NONE);
+        CreateIndexRequest request = new CreateIndexRequest(INDEX_NAME, settings).waitForActiveShards(ActiveShardCount.NONE);
         state = cluster.createIndex(state, request);
         return state;
     }
 
     public void testUpdateSearchReplicasOverShardLimit() {
-        final ClusterStateChanges cluster = new ClusterStateChanges(xContentRegistry(), threadPool);
-
-        List<DiscoveryNode> allNodes = new ArrayList<>();
-        // node for primary/local
-        DiscoveryNode localNode = createNode(Version.CURRENT, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE, DiscoveryNodeRole.DATA_ROLE);
-        allNodes.add(localNode);
-
-        allNodes.add(createNode(Version.CURRENT, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE, DiscoveryNodeRole.DATA_ROLE));
-
-        ClusterState state = ClusterStateCreationUtils.state(localNode, localNode, allNodes.toArray(new DiscoveryNode[0]));
+        Settings settings = Settings.builder()
+            .put(SETTING_NUMBER_OF_SHARDS, 1)
+            .put(SETTING_NUMBER_OF_REPLICAS, 0)
+            .put(INDEX_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT)
+            .put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 0)
+            .build();
+        createIndex(INDEX_NAME, settings);
+        Integer maxShardPerNode = ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getDefault(Settings.EMPTY);
 
-        CreateIndexRequest request = new CreateIndexRequest(
-            "index",
-            Settings.builder()
-                .put(SETTING_NUMBER_OF_SHARDS, 1)
-                .put(SETTING_NUMBER_OF_REPLICAS, 0)
-                .put(INDEX_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT)
-                .put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 1)
-                .build()
-        ).waitForActiveShards(ActiveShardCount.NONE);
-        state = cluster.createIndex(state, request);
-        assertTrue(state.metadata().hasIndex("index"));
-        rerouteUntilActive(state, cluster);
+        UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(INDEX_NAME).settings(
+            Settings.builder().put(SETTING_NUMBER_OF_SEARCH_REPLICAS, maxShardPerNode * 2).build()
+        );
 
         // add another replica
-        ClusterState finalState = state;
-        Integer maxShardPerNode = ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getDefault(Settings.EMPTY);
-        expectThrows(
-            RuntimeException.class,
-            () -> cluster.updateSettings(
-                finalState,
-                new UpdateSettingsRequest("index").settings(
-                    Settings.builder().put(SETTING_NUMBER_OF_SEARCH_REPLICAS, maxShardPerNode * 2).build()
-                )
-            )
+        ExecutionException executionException = expectThrows(
+            ExecutionException.class,
+            () -> client().admin().indices().updateSettings(updateSettingsRequest).get()
         );
+        Throwable cause = executionException.getCause();
+        assertEquals(ValidationException.class, cause.getClass());
     }
 
     public void testUpdateSearchReplicasOnDocrepCluster() {
@@ -206,7 +201,7 @@ public void testUpdateSearchReplicasOnDocrepCluster() {
         ClusterState state = ClusterStateCreationUtils.state(localNode, localNode, allNodes.toArray(new DiscoveryNode[0]));
 
         CreateIndexRequest request = new CreateIndexRequest(
-            "index",
+            INDEX_NAME,
             Settings.builder()
                 .put(SETTING_NUMBER_OF_SHARDS, 1)
                 .put(SETTING_NUMBER_OF_REPLICAS, 0)
@@ -214,7 +209,7 @@ public void testUpdateSearchReplicasOnDocrepCluster() {
                 .build()
         ).waitForActiveShards(ActiveShardCount.NONE);
         state = cluster.createIndex(state, request);
-        assertTrue(state.metadata().hasIndex("index"));
+        assertTrue(state.metadata().hasIndex(INDEX_NAME));
         rerouteUntilActive(state, cluster);
 
         // add another replica
@@ -224,7 +219,7 @@ public void testUpdateSearchReplicasOnDocrepCluster() {
             RuntimeException.class,
             () -> cluster.updateSettings(
                 finalState,
-                new UpdateSettingsRequest("index").settings(
+                new UpdateSettingsRequest(INDEX_NAME).settings(
                     Settings.builder().put(SETTING_NUMBER_OF_SEARCH_REPLICAS, maxShardPerNode * 2).build()
                 )
             )
@@ -232,11 +227,51 @@ public void testUpdateSearchReplicasOnDocrepCluster() {
 
     }
 
+    Path tempDir = createTempDir();
+    Path repo = tempDir.resolve("repo");
+
+    @Override
+    protected Settings nodeSettings() {
+        return Settings.builder()
+            .put(super.nodeSettings())
+            .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT)
+            .put(buildRemoteStoreNodeAttributes(TEST_RS_REPO, repo))
+            .put(Environment.PATH_HOME_SETTING.getKey(), tempDir)
+            .put(Environment.PATH_REPO_SETTING.getKey(), repo)
+            .build();
+    }
+
+    private Settings buildRemoteStoreNodeAttributes(String repoName, Path repoPath) {
+        String repoTypeAttributeKey = String.format(
+            Locale.getDefault(),
+            "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT,
+            repoName
+        );
+        String repoSettingsAttributeKeyPrefix = String.format(
+            Locale.getDefault(),
+            "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX,
+            repoName
+        );
+
+        return Settings.builder()
+            .put("node.attr." + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, repoName)
+            .put(repoTypeAttributeKey, FsRepository.TYPE)
+            .put(repoSettingsAttributeKeyPrefix + "location", repoPath)
+            .put("node.attr." + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, repoName)
+            .put(repoTypeAttributeKey, FsRepository.TYPE)
+            .put(repoSettingsAttributeKeyPrefix + "location", repoPath)
+            .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, repoName)
+            .put(repoTypeAttributeKey, FsRepository.TYPE)
+            .put(repoSettingsAttributeKeyPrefix + "location", repoPath)
+            .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), false)
+            .build();
+    }
+
     private static void rerouteUntilActive(ClusterState state, ClusterStateChanges cluster) {
-        while (state.routingTable().index("index").shard(0).allShardsStarted() == false) {
+        while (state.routingTable().index(INDEX_NAME).shard(0).allShardsStarted() == false) {
             state = cluster.applyStartedShards(
                 state,
-                state.routingTable().index("index").shard(0).shardsWithState(ShardRoutingState.INITIALIZING)
+                state.routingTable().index(INDEX_NAME).shard(0).shardsWithState(ShardRoutingState.INITIALIZING)
             );
             state = cluster.reroute(state, new ClusterRerouteRequest());
         }
diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/SearchReplicaAllocationDeciderTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/SearchReplicaAllocationDeciderTests.java
index 8d4f4cdee26cc..9604e82fe4c88 100644
--- a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/SearchReplicaAllocationDeciderTests.java
+++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/SearchReplicaAllocationDeciderTests.java
@@ -8,27 +8,44 @@
 
 package org.opensearch.cluster.routing.allocation.decider;
 
+import org.opensearch.Version;
 import org.opensearch.cluster.ClusterState;
 import org.opensearch.cluster.EmptyClusterInfoService;
 import org.opensearch.cluster.OpenSearchAllocationTestCase;
+import org.opensearch.cluster.metadata.IndexMetadata;
+import org.opensearch.cluster.metadata.Metadata;
+import org.opensearch.cluster.node.DiscoveryNode;
+import org.opensearch.cluster.node.DiscoveryNodes;
 import org.opensearch.cluster.routing.RecoverySource;
 import org.opensearch.cluster.routing.RoutingTable;
 import org.opensearch.cluster.routing.ShardRouting;
+import org.opensearch.cluster.routing.ShardRoutingHelper;
 import org.opensearch.cluster.routing.UnassignedInfo;
 import org.opensearch.cluster.routing.allocation.AllocationService;
 import org.opensearch.cluster.routing.allocation.RoutingAllocation;
 import org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
+import org.opensearch.cluster.routing.allocation.command.AllocationCommands;
+import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand;
 import org.opensearch.common.settings.ClusterSettings;
 import org.opensearch.common.settings.Setting;
 import org.opensearch.common.settings.Settings;
+import org.opensearch.core.index.Index;
+import org.opensearch.core.index.shard.ShardId;
 import org.opensearch.snapshots.EmptySnapshotsInfoService;
 import org.opensearch.test.gateway.TestGatewayAllocator;
 
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashSet;
+import java.util.Map;
 import java.util.Set;
 
+import static org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING;
+import static org.opensearch.cluster.routing.ShardRoutingState.STARTED;
 import static org.opensearch.cluster.routing.allocation.decider.SearchReplicaAllocationDecider.SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_SETTING;
+import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY;
+import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY;
+import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY;
 
 public class SearchReplicaAllocationDeciderTests extends OpenSearchAllocationTestCase {
 
@@ -130,4 +147,171 @@ public void testSearchReplicaRoutingDedicatedIncludes() {
         decision = (Decision.Single) filterAllocationDecider.canRemain(primary, state.getRoutingNodes().node("node1"), allocation);
         assertEquals(decision.toString(), Decision.Type.YES, decision.type());
     }
+
+    public void testSearchReplicaWithThrottlingDecider_PrimaryBasedReplication() {
+        TestGatewayAllocator gatewayAllocator = new TestGatewayAllocator();
+        // throttle outgoing on primary
+        AllocationService strategy = createAllocationService(Settings.EMPTY, gatewayAllocator);
+
+        Set<Setting<?>> settings = new HashSet<>(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
+        settings.add(SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_SETTING);
+        Metadata metadata = Metadata.builder()
+            .put(
+                IndexMetadata.builder("test")
+                    .settings(settings(Version.CURRENT))
+                    .numberOfShards(1)
+                    .numberOfReplicas(0)
+                    .numberOfSearchReplicas(1)
+            )
+            .build();
+
+        ClusterState clusterState = initializeClusterStateWithSingleIndexAndShard(newNode("node1"), metadata, gatewayAllocator);
+        clusterState = strategy.reroute(clusterState, "reroute");
+        clusterState = startInitializingShardsAndReroute(strategy, clusterState);
+        clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build();
+        clusterState = strategy.reroute(clusterState, "reroute");
+        clusterState = startInitializingShardsAndReroute(strategy, clusterState);
+        assertEquals(2, clusterState.routingTable().shardsWithState(STARTED).size());
+        assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node1"), 0);
+        // start a third node, we will try and move the SR to this node
+        clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
+        clusterState = strategy.reroute(clusterState, "reroute");
+        // remove the primary and reroute - this would throw an NPE for search replicas but *not* regular.
+        // regular replicas would get promoted to primary before the CanMoveAway call.
+        clusterState = strategy.disassociateDeadNodes(
+            ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node1")).build(),
+            true,
+            "test"
+        );
+
+        // attempt to move the replica
+        AllocationService.CommandsResult commandsResult = strategy.reroute(
+            clusterState,
+            new AllocationCommands(new MoveAllocationCommand("test", 0, "node2", "node3")),
+            true,
+            false
+        );
+
+        assertEquals(commandsResult.explanations().explanations().size(), 1);
+        assertEquals(commandsResult.explanations().explanations().get(0).decisions().type(), Decision.Type.NO);
+        boolean isCorrectNoDecision = false;
+        for (Decision decision : commandsResult.explanations().explanations().get(0).decisions().getDecisions()) {
+            if (decision.label().equals(ThrottlingAllocationDecider.NAME)) {
+                assertEquals("primary shard for this replica is not yet active", decision.getExplanation());
+                assertEquals(Decision.Type.NO, decision.type());
+                isCorrectNoDecision = true;
+            }
+        }
+        assertTrue(isCorrectNoDecision);
+    }
+
+    public void testSearchReplicaWithThrottlingDeciderWithoutPrimary_RemoteStoreEnabled() {
+        TestGatewayAllocator gatewayAllocator = new TestGatewayAllocator();
+        AllocationService strategy = createAllocationService(Settings.EMPTY, gatewayAllocator);
+        Set<Setting<?>> settings = new HashSet<>(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
+        settings.add(SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_SETTING);
+        Metadata metadata = Metadata.builder()
+            .put(
+                IndexMetadata.builder("test")
+                    .settings(settings(Version.CURRENT))
+                    .numberOfShards(1)
+                    .numberOfReplicas(0)
+                    .numberOfSearchReplicas(1)
+            )
+            .build();
+
+        ClusterState clusterState = initializeClusterStateWithSingleIndexAndShard(newRemoteNode("node1"), metadata, gatewayAllocator);
+
+        clusterState = strategy.reroute(clusterState, "reroute");
+        clusterState = startInitializingShardsAndReroute(strategy, clusterState);
+        DiscoveryNode node2 = newRemoteNode("node2");
+        clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(node2)).build();
+        clusterState = strategy.reroute(clusterState, "reroute");
+        clusterState = startInitializingShardsAndReroute(strategy, clusterState);
+        assertEquals(2, clusterState.routingTable().shardsWithState(STARTED).size());
+        assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node1"), 0);
+        // start a third node, we will try and move the SR to this node
+        DiscoveryNode node3 = newRemoteNode("node3");
+        clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(node3)).build();
+        clusterState = strategy.reroute(clusterState, "reroute");
+        // remove the primary and reroute - this would throw an NPE for search replicas but *not* regular.
+        // regular replicas would get promoted to primary before the CanMoveAway call.
+        clusterState = strategy.disassociateDeadNodes(
+            ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node1")).build(),
+            true,
+            "test"
+        );
+
+        // attempt to move the replica
+        AllocationService.CommandsResult commandsResult = strategy.reroute(
+            clusterState,
+            new AllocationCommands(new MoveAllocationCommand("test", 0, "node2", "node3")),
+            true,
+            false
+        );
+
+        assertEquals(commandsResult.explanations().explanations().size(), 1);
+        assertEquals(commandsResult.explanations().explanations().get(0).decisions().type(), Decision.Type.NO);
+        boolean foundYesMessage = false;
+        for (Decision decision : commandsResult.explanations().explanations().get(0).decisions().getDecisions()) {
+            if (decision.label().equals(ThrottlingAllocationDecider.NAME)) {
+                assertEquals("Remote based search replica below incoming recovery limit: [0 < 2]", decision.getExplanation());
+                assertEquals(Decision.Type.YES, decision.type());
+                foundYesMessage = true;
+            }
+        }
+        assertTrue(foundYesMessage);
+    }
+
+    private ClusterState initializeClusterStateWithSingleIndexAndShard(
+        DiscoveryNode primaryNode,
+        Metadata metadata,
+        TestGatewayAllocator gatewayAllocator
+    ) {
+        Metadata.Builder metadataBuilder = new Metadata.Builder(metadata);
+        RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
+        IndexMetadata indexMetadata = metadata.index("test");
+        IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(indexMetadata);
+        initializePrimaryAndMarkInSync(indexMetadata.getIndex(), indexMetadataBuilder, gatewayAllocator, primaryNode);
+        routingTableBuilder.addAsRecovery(indexMetadata);
+        metadataBuilder.put(indexMetadata, false);
+        RoutingTable routingTable = routingTableBuilder.build();
+        return ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
+            .nodes(DiscoveryNodes.builder().add(primaryNode))
+            .metadata(metadataBuilder.build())
+            .routingTable(routingTable)
+            .build();
+    }
+
+    private void initializePrimaryAndMarkInSync(
+        Index index,
+        IndexMetadata.Builder indexMetadata,
+        TestGatewayAllocator gatewayAllocator,
+        DiscoveryNode primaryNode
+    ) {
+        final ShardRouting unassigned = ShardRouting.newUnassigned(
+            new ShardId(index, 0),
+            true,
+            RecoverySource.EmptyStoreRecoverySource.INSTANCE,
+            new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "test")
+        );
+        ShardRouting started = ShardRoutingHelper.moveToStarted(ShardRoutingHelper.initialize(unassigned, primaryNode.getId()));
+        indexMetadata.putInSyncAllocationIds(0, Collections.singleton(started.allocationId().getId()));
+        gatewayAllocator.addKnownAllocation(started);
+    }
+
+    private static DiscoveryNode newRemoteNode(String name) {
+        return newNode(
+            name,
+            name,
+            Map.of(
+                REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY,
+                "cluster-repo",
+                REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY,
+                "segment-repo",
+                REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY,
+                "translog-repo"
+            )
+        );
+    }
 }
diff --git a/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java b/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java
index dd2fb51151a5b..d85ed10eeeae7 100644
--- a/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java
+++ b/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java
@@ -44,6 +44,7 @@
 import org.opensearch.cluster.routing.IndexShardRoutingTable;
 import org.opensearch.cluster.routing.RecoverySource;
 import org.opensearch.cluster.routing.RoutingTable;
+import org.opensearch.cluster.routing.ShardRouting;
 import org.opensearch.cluster.routing.UnassignedInfo;
 import org.opensearch.cluster.service.ClusterService;
 import org.opensearch.common.UUIDs;
@@ -489,4 +490,146 @@ public void testHideStateIfNotRecovered() {
         assertFalse(hiddenState.blocks().hasIndexBlock(indexMetadata.getIndex().getName(), IndexMetadata.INDEX_READ_ONLY_BLOCK));
     }
 
+    public void testRemoteRestoreWithSearchOnlyShards() {
+        final int numOfShards = 10;
+        final int numAssignedSearchReplicas = 5;
+        final int numOfSearchReplicas = 1;
+
+        final IndexMetadata remoteMetadata = createIndexMetadata(
+            "test-remote",
+            Settings.builder()
+                .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true)
+                .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numOfShards)
+                .put(IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS, numOfSearchReplicas)
+                .build()
+        );
+        // create an initial routing table where all search replicas exist and are assigned, they should get included as is in the restored
+        // routing.
+        final Index index = remoteMetadata.getIndex();
+
+        Map<ShardId, IndexShardRoutingTable> routingTable = new HashMap<>();
+        for (int shardNumber = 0; shardNumber < remoteMetadata.getNumberOfShards(); shardNumber++) {
+            ShardId shardId = new ShardId(index, shardNumber);
+            final String nodeId = "node " + shardNumber;
+            IndexShardRoutingTable.Builder builder = new IndexShardRoutingTable.Builder(
+                new ShardId(remoteMetadata.getIndex(), shardId.id())
+            );
+            // add a search replica for the shard
+            ShardRouting searchReplicaRouting = ShardRouting.newUnassigned(
+                shardId,
+                false,
+                true,
+                RecoverySource.EmptyStoreRecoverySource.INSTANCE,
+                new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "test")
+            );
+            if (shardNumber < numAssignedSearchReplicas) {
+                // first five shards add the SR as assigned
+                builder.addShard(searchReplicaRouting.initialize(nodeId, null, 0L));
+            } else {
+                builder.addShard(searchReplicaRouting);
+            }
+            routingTable.put(shardId, builder.build());
+        }
+        IndexRoutingTable.Builder routingTableAfterRestore = new IndexRoutingTable.Builder(remoteMetadata.getIndex())
+            .initializeAsRemoteStoreRestore(
+                remoteMetadata,
+                new RecoverySource.RemoteStoreRecoverySource(
+                    UUIDs.randomBase64UUID(),
+                    remoteMetadata.getCreationVersion(),
+                    new IndexId(remoteMetadata.getIndex().getName(), remoteMetadata.getIndexUUID())
+                ),
+                routingTable,
+                true
+            );
+        for (IndexShardRoutingTable indexShardRoutingTable : routingTableAfterRestore.build()) {
+            assertEquals(numOfSearchReplicas, indexShardRoutingTable.searchOnlyReplicas().size());
+            for (ShardRouting shardRouting : indexShardRoutingTable.searchOnlyReplicas()) {
+                if (shardRouting.shardId().getId() < numAssignedSearchReplicas) {
+                    assertTrue(shardRouting.assignedToNode());
+                    assertTrue(containsSameRouting(routingTable.get(indexShardRoutingTable.getShardId()), shardRouting));
+                } else {
+                    assertTrue(shardRouting.unassigned());
+                    assertFalse(containsSameRouting(routingTable.get(indexShardRoutingTable.getShardId()), shardRouting));
+                }
+            }
+        }
+    }
+
+    private boolean containsSameRouting(IndexShardRoutingTable oldRoutingTable, ShardRouting shardRouting) {
+        return oldRoutingTable.searchOnlyReplicas().stream().anyMatch(r -> r.isSameAllocation(shardRouting));
+    }
+
+    public void testRemoteRestoreWithActivePrimaryAndSearchOnlyShards() {
+        final int numOfShards = 10;
+        final int numAssignedSearchReplicas = 5;
+        final int numOfSearchReplicas = 1;
+
+        final IndexMetadata remoteMetadata = createIndexMetadata(
+            "test-remote",
+            Settings.builder()
+                .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true)
+                .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numOfShards)
+                .put(IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS, numOfSearchReplicas)
+                .build()
+        );
+        // create an initial routing table where all search replicas exist and are assigned, they should get included as is in the restored
+        // routing.
+        final Index index = remoteMetadata.getIndex();
+
+        Map<ShardId, IndexShardRoutingTable> routingTable = new HashMap<>();
+        for (int shardNumber = 0; shardNumber < remoteMetadata.getNumberOfShards(); shardNumber++) {
+            ShardId shardId = new ShardId(index, shardNumber);
+            final String nodeId = "node " + shardNumber;
+            IndexShardRoutingTable.Builder builder = new IndexShardRoutingTable.Builder(
+                new ShardId(remoteMetadata.getIndex(), shardId.id())
+            );
+            // add the primary as assigned
+            ShardRouting primary = ShardRouting.newUnassigned(
+                shardId,
+                true,
+                RecoverySource.EmptyStoreRecoverySource.INSTANCE,
+                new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "test")
+            );
+            builder.addShard(primary.initialize(nodeId + " Primary", null, 0L));
+
+            // add a search replica for the shard
+            ShardRouting searchReplicaRouting = ShardRouting.newUnassigned(
+                shardId,
+                false,
+                true,
+                RecoverySource.EmptyStoreRecoverySource.INSTANCE,
+                new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "test")
+            );
+            if (shardNumber < numAssignedSearchReplicas) {
+                // first five shards add the SR as assigned
+                builder.addShard(searchReplicaRouting.initialize(nodeId, null, 0L));
+            } else {
+                builder.addShard(searchReplicaRouting);
+            }
+            routingTable.put(shardId, builder.build());
+        }
+        IndexRoutingTable.Builder routingTableAfterRestore = new IndexRoutingTable.Builder(remoteMetadata.getIndex())
+            .initializeAsRemoteStoreRestore(
+                remoteMetadata,
+                new RecoverySource.RemoteStoreRecoverySource(
+                    UUIDs.randomBase64UUID(),
+                    remoteMetadata.getCreationVersion(),
+                    new IndexId(remoteMetadata.getIndex().getName(), remoteMetadata.getIndexUUID())
+                ),
+                routingTable,
+                false
+            );
+        for (IndexShardRoutingTable indexShardRoutingTable : routingTableAfterRestore.build()) {
+            assertEquals(numOfSearchReplicas, indexShardRoutingTable.searchOnlyReplicas().size());
+            for (ShardRouting shardRouting : indexShardRoutingTable.searchOnlyReplicas()) {
+                if (shardRouting.shardId().getId() < numAssignedSearchReplicas) {
+                    assertTrue(shardRouting.assignedToNode());
+                    assertTrue(containsSameRouting(routingTable.get(indexShardRoutingTable.getShardId()), shardRouting));
+                } else {
+                    assertTrue(shardRouting.unassigned());
+                    assertFalse(containsSameRouting(routingTable.get(indexShardRoutingTable.getShardId()), shardRouting));
+                }
+            }
+        }
+    }
 }
diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java
index 96794a83ef762..535adfbff8dcc 100644
--- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java
+++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java
@@ -3011,6 +3011,52 @@ public void testRestoreShardFromRemoteStore(boolean performFlush) throws IOExcep
         closeShards(target);
     }
 
+    public void testRestoreSearchOnlyShardFromStore() throws IOException {
+        // this test indexes docs on a primary, refreshes, then recovers a new Search Replica and asserts
+        // all docs are present
+        String remoteStorePath = createTempDir().toString();
+        Settings settings = Settings.builder()
+            .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT)
+            .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true)
+            .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, remoteStorePath + "__test")
+            .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, remoteStorePath + "__test")
+            .build();
+        IndexShard primary = newStartedShard(true, settings, new InternalEngineFactory());
+        indexDoc(primary, "_doc", "1");
+        indexDoc(primary, "_doc", "2");
+        primary.refresh("test");
+        assertDocs(primary, "1", "2");
+
+        ShardRouting searchReplicaShardRouting = TestShardRouting.newShardRouting(
+            primary.shardId,
+            randomAlphaOfLength(10),
+            false,
+            true,
+            ShardRoutingState.INITIALIZING,
+            RecoverySource.EmptyStoreRecoverySource.INSTANCE
+        );
+        IndexShard replica = newShard(searchReplicaShardRouting, settings, new NRTReplicationEngineFactory());
+        recoverShardFromStore(replica);
+        searchReplicaShardRouting = replica.routingEntry();
+        assertDocs(replica, "1", "2");
+        assertEquals(
+            primary.getLatestReplicationCheckpoint().getSegmentInfosVersion(),
+            replica.getLatestReplicationCheckpoint().getSegmentInfosVersion()
+        );
+
+        // move to unassigned while the replica is active, then reinit from existing store.
+        searchReplicaShardRouting = ShardRoutingHelper.moveToUnassigned(
+            searchReplicaShardRouting,
+            new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "because I say so")
+        );
+        searchReplicaShardRouting = ShardRoutingHelper.initialize(searchReplicaShardRouting, replica.routingEntry().currentNodeId());
+        assertEquals(RecoverySource.ExistingStoreRecoverySource.INSTANCE, searchReplicaShardRouting.recoverySource());
+        replica = reinitShard(replica, searchReplicaShardRouting);
+        recoverShardFromStore(replica);
+        assertDocs(replica, "1", "2");
+        closeShards(primary, replica);
+    }
+
     public void testReaderWrapperIsUsed() throws IOException {
         IndexShard shard = newStartedShard(true);
         indexDoc(shard, "_doc", "0", "{\"foo\" : \"bar\"}");
diff --git a/test/framework/src/main/java/org/opensearch/cluster/routing/TestShardRouting.java b/test/framework/src/main/java/org/opensearch/cluster/routing/TestShardRouting.java
index 9a000a4eeda72..a6af658be2ca1 100644
--- a/test/framework/src/main/java/org/opensearch/cluster/routing/TestShardRouting.java
+++ b/test/framework/src/main/java/org/opensearch/cluster/routing/TestShardRouting.java
@@ -342,4 +342,26 @@ public static ShardRouting newShardRouting(
             -1
         );
     }
+
+    public static ShardRouting newShardRouting(
+        ShardId shardId,
+        String currentNodeId,
+        boolean primary,
+        boolean searchOnly,
+        ShardRoutingState state,
+        RecoverySource recoverySource
+    ) {
+        return new ShardRouting(
+            shardId,
+            currentNodeId,
+            null,
+            primary,
+            searchOnly,
+            state,
+            recoverySource,
+            buildUnassignedInfo(state),
+            buildAllocationId(state),
+            -1
+        );
+    }
 }

From 5afb92fc06b5dc68110ccddd49b3ef1468734963 Mon Sep 17 00:00:00 2001
From: "Samuel.G" <1148690954@qq.com>
Date: Sat, 11 Jan 2025 06:22:30 +0900
Subject: [PATCH 37/61] Fix case insensitive and escaped query on wildcard
 (#16827)

* fix case insensitive and escaped query on wildcard

Signed-off-by: gesong.samuel <gesong.samuel@bytedance.com>

* add changelog

Signed-off-by: gesong.samuel <gesong.samuel@bytedance.com>

---------

Signed-off-by: gesong.samuel <gesong.samuel@bytedance.com>
Signed-off-by: Michael Froh <froh@amazon.com>
Co-authored-by: gesong.samuel <gesong.samuel@bytedance.com>
Co-authored-by: Michael Froh <froh@amazon.com>
---
 CHANGELOG.md                                  |   1 +
 .../search/270_wildcard_fieldtype_queries.yml | 127 +++++++++++++++++-
 .../index/mapper/WildcardFieldMapper.java     | 116 ++++++++++++----
 3 files changed, 213 insertions(+), 31 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index a46359520e9e1..20e6c03d5a9d6 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -88,6 +88,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 - Bound the size of cache in deprecation logger ([16702](https://github.com/opensearch-project/OpenSearch/issues/16702))
 - Ensure consistency of system flag on IndexMetadata after diff is applied ([#16644](https://github.com/opensearch-project/OpenSearch/pull/16644))
 - Skip remote-repositories validations for node-joins when RepositoriesService is not in sync with cluster-state ([#16763](https://github.com/opensearch-project/OpenSearch/pull/16763))
+- Fix case insensitive and escaped query on wildcard ([#16827](https://github.com/opensearch-project/OpenSearch/pull/16827))
 - Fix _list/shards API failing when closed indices are present ([#16606](https://github.com/opensearch-project/OpenSearch/pull/16606))
 - Fix remote shards balance ([#15335](https://github.com/opensearch-project/OpenSearch/pull/15335))
 - Always use `constant_score` query for `match_only_text` field ([#16964](https://github.com/opensearch-project/OpenSearch/pull/16964))
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/270_wildcard_fieldtype_queries.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/270_wildcard_fieldtype_queries.yml
index d92538824232d..a85399feefd25 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/270_wildcard_fieldtype_queries.yml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/270_wildcard_fieldtype_queries.yml
@@ -62,6 +62,19 @@ setup:
         id: 7
         body:
           my_field: "ABCD"
+  - do:
+      index:
+        index: test
+        id: 8
+        body:
+          my_field: "*"
+
+  - do:
+      index:
+        index: test
+        id: 9
+        body:
+          my_field: "\\*"
   - do:
       indices.refresh: {}
 
@@ -223,7 +236,7 @@ setup:
             wildcard:
               my_field:
                 value: "*"
-  - match: { hits.total.value: 6 }
+  - match: { hits.total.value: 8 }
 ---
 "regexp match-all works":
   - do:
@@ -234,7 +247,7 @@ setup:
             regexp:
               my_field:
                 value: ".*"
-  - match: { hits.total.value: 6 }
+  - match: { hits.total.value: 8 }
 ---
 "terms query on wildcard field matches":
   - do:
@@ -270,3 +283,113 @@ setup:
   - match: { hits.total.value: 2 }
   - match: { hits.hits.0._id: "5" }
   - match: { hits.hits.1._id: "7" }
+---
+"case insensitive regexp query on wildcard field":
+  - do:
+      search:
+        index: test
+        body:
+          query:
+            regexp:
+              my_field:
+                value: "AbCd"
+  - match: { hits.total.value: 1 }
+  - match: { hits.hits.0._id: "5" }
+  - do:
+      search:
+        index: test
+        body:
+          query:
+            regexp:
+              my_field:
+                value: "AbCd"
+                case_insensitive: true
+  - match: { hits.total.value: 2 }
+  - match: { hits.hits.0._id: "5" }
+  - match: { hits.hits.1._id: "7" }
+---
+"wildcard query works on values contains escaped characters":
+  - do:
+      search:
+        index: test
+        body:
+          query:
+            wildcard:
+              my_field:
+                value: "\\*"
+  - match: { hits.total.value: 1 }
+  - match: { hits.hits.0._id: "8" }
+
+  - do:
+      search:
+        index: test
+        body:
+          query:
+            wildcard:
+              my_field:
+                value: "\\\\\\*"
+  - match: { hits.total.value: 1 }
+  - match: { hits.hits.0._id: "9" }
+---
+"regexp query works on values contains escaped characters":
+  - do:
+      search:
+        index: test
+        body:
+          query:
+            regexp:
+              my_field:
+                value: "\\*"
+  - match: { hits.total.value: 1 }
+  - match: { hits.hits.0._id: "8" }
+
+  - do:
+      search:
+        index: test
+        body:
+          query:
+            regexp:
+              my_field:
+                value: "\\\\\\*"
+  - match: { hits.total.value: 1 }
+  - match: { hits.hits.0._id: "9"}
+---
+"term query contains escaped characters":
+  - do:
+      search:
+        index: test
+        body:
+          query:
+            term:
+              my_field: "\\*"
+  - match: { hits.total.value: 1 }
+  - match: { hits.hits.0._id: "9" }
+
+  - do:
+      search:
+        index: test
+        body:
+          query:
+            term:
+              my_field: "*"
+  - match: { hits.total.value: 1 }
+  - match: { hits.hits.0._id: "8"}
+---
+"terms query contains escaped characters":
+  - do:
+      search:
+        index: test
+        body:
+          query:
+            terms: { my_field: ["*"] }
+  - match: { hits.total.value: 1 }
+  - match: { hits.hits.0._id: "8" }
+
+  - do:
+      search:
+        index: test
+        body:
+          query:
+            terms: { my_field: [ "\\*" ] }
+  - match: { hits.total.value: 1 }
+  - match: { hits.hits.0._id: "9" }
diff --git a/server/src/main/java/org/opensearch/index/mapper/WildcardFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/WildcardFieldMapper.java
index e43e3bda692e7..7342c6f9f23bd 100644
--- a/server/src/main/java/org/opensearch/index/mapper/WildcardFieldMapper.java
+++ b/server/src/main/java/org/opensearch/index/mapper/WildcardFieldMapper.java
@@ -327,6 +327,25 @@ public boolean incrementToken() throws IOException {
      * Implements the various query types over wildcard fields.
      */
     public static final class WildcardFieldType extends StringFieldType {
+        private static final Set<Character> WILDCARD_SPECIAL = Set.of('?', '*', '\\');
+        private static final Set<Character> REGEXP_SPECIAL = Set.of(
+            '.',
+            '^',
+            '$',
+            '*',
+            '+',
+            '?',
+            '(',
+            ')',
+            '[',
+            ']',
+            '{',
+            '}',
+            '|',
+            '/',
+            '\\'
+        );
+
         private final int ignoreAbove;
         private final String nullValue;
 
@@ -438,7 +457,7 @@ public Query wildcardQuery(String value, MultiTermQuery.RewriteMethod method, bo
                     if (caseInsensitive) {
                         s = s.toLowerCase(Locale.ROOT);
                     }
-                    return s.equals(finalValue);
+                    return s.equals(performEscape(finalValue, false));
                 };
             } else if (compiledAutomaton.type == CompiledAutomaton.AUTOMATON_TYPE.ALL) {
                 return existsQuery(context);
@@ -454,7 +473,7 @@ public Query wildcardQuery(String value, MultiTermQuery.RewriteMethod method, bo
                 };
             }
 
-            Set<String> requiredNGrams = getRequiredNGrams(finalValue);
+            Set<String> requiredNGrams = getRequiredNGrams(finalValue, false);
             Query approximation;
             if (requiredNGrams.isEmpty()) {
                 // This only happens when all characters are wildcard characters (* or ?),
@@ -471,7 +490,7 @@ public Query wildcardQuery(String value, MultiTermQuery.RewriteMethod method, bo
         }
 
         // Package-private for testing
-        static Set<String> getRequiredNGrams(String value) {
+        static Set<String> getRequiredNGrams(String value, boolean regexpMode) {
             Set<String> terms = new HashSet<>();
 
             if (value.isEmpty()) {
@@ -484,7 +503,7 @@ static Set<String> getRequiredNGrams(String value) {
             if (!value.startsWith("?") && !value.startsWith("*")) {
                 // Can add prefix term
                 rawSequence = getNonWildcardSequence(value, 0);
-                currentSequence = performEscape(rawSequence);
+                currentSequence = performEscape(rawSequence, regexpMode);
                 if (currentSequence.length() == 1) {
                     terms.add(new String(new char[] { 0, currentSequence.charAt(0) }));
                 } else {
@@ -496,7 +515,7 @@ static Set<String> getRequiredNGrams(String value) {
             }
             while (pos < value.length()) {
                 boolean isEndOfValue = pos + rawSequence.length() == value.length();
-                currentSequence = performEscape(rawSequence);
+                currentSequence = performEscape(rawSequence, regexpMode);
                 if (!currentSequence.isEmpty() && currentSequence.length() < 3 && !isEndOfValue && pos > 0) {
                     // If this is a prefix or suffix of length < 3, then we already have a longer token including the anchor.
                     terms.add(currentSequence);
@@ -542,19 +561,42 @@ private static int findNonWildcardSequence(String value, int startFrom) {
             return value.length();
         }
 
-        private static String performEscape(String str) {
-            StringBuilder sb = new StringBuilder();
+        /**
+         * reversed process of quoteWildcard
+         * @param str target string
+         * @param regexpMode whether is used for regexp escape
+         * @return string before escaped
+         */
+        private static String performEscape(String str, boolean regexpMode) {
+            final StringBuilder sb = new StringBuilder();
+            final Set<Character> targetChars = regexpMode ? REGEXP_SPECIAL : WILDCARD_SPECIAL;
+
             for (int i = 0; i < str.length(); i++) {
                 if (str.charAt(i) == '\\' && (i + 1) < str.length()) {
                     char c = str.charAt(i + 1);
-                    if (c == '*' || c == '?') {
+                    if (targetChars.contains(c)) {
                         i++;
                     }
                 }
                 sb.append(str.charAt(i));
             }
-            assert !sb.toString().contains("\\*");
-            assert !sb.toString().contains("\\?");
+            return sb.toString();
+        }
+
+        /**
+         * manually escape instead of call String.replace for better performance
+         * only for term query
+         * @param str target string
+         * @return escaped string
+         */
+        private static String quoteWildcard(String str) {
+            StringBuilder sb = new StringBuilder();
+            for (int i = 0; i < str.length(); i++) {
+                if (WILDCARD_SPECIAL.contains(str.charAt(i))) {
+                    sb.append('\\');
+                }
+                sb.append(str.charAt(i));
+            }
             return sb.toString();
         }
 
@@ -568,11 +610,10 @@ public Query regexpQuery(
             QueryShardContext context
         ) {
             NamedAnalyzer normalizer = normalizer();
-            if (normalizer != null) {
-                value = normalizer.normalize(name(), value).utf8ToString();
-            }
+            final String finalValue = normalizer != null ? value = normalizer.normalize(name(), value).utf8ToString() : value;
+            final boolean caseInsensitive = matchFlags == RegExp.ASCII_CASE_INSENSITIVE;
 
-            RegExp regExp = new RegExp(value, syntaxFlags, matchFlags);
+            RegExp regExp = new RegExp(finalValue, syntaxFlags, matchFlags);
             Automaton automaton = regExp.toAutomaton(maxDeterminizedStates);
             CompiledAutomaton compiledAutomaton = new CompiledAutomaton(automaton);
 
@@ -581,6 +622,14 @@ public Query regexpQuery(
                 return existsQuery(context);
             } else if (compiledAutomaton.type == CompiledAutomaton.AUTOMATON_TYPE.NONE) {
                 return new MatchNoDocsQuery("Regular expression matches nothing");
+            } else if (compiledAutomaton.type == CompiledAutomaton.AUTOMATON_TYPE.SINGLE) {
+                // when type equals SINGLE, #compiledAutomaton.runAutomaton is null
+                regexpPredicate = s -> {
+                    if (caseInsensitive) {
+                        s = s.toLowerCase(Locale.ROOT);
+                    }
+                    return s.equals(performEscape(finalValue, true));
+                };
             } else {
                 regexpPredicate = s -> {
                     BytesRef valueBytes = BytesRefs.toBytesRef(s);
@@ -588,11 +637,11 @@ public Query regexpQuery(
                 };
             }
 
-            Query approximation = regexpToQuery(name(), regExp);
+            Query approximation = regexpToQuery(name(), regExp, caseInsensitive);
             if (approximation instanceof MatchAllDocsQuery) {
                 approximation = existsQuery(context);
             }
-            return new WildcardMatchingQuery(name(), approximation, regexpPredicate, "/" + value + "/", context, this);
+            return new WildcardMatchingQuery(name(), approximation, regexpPredicate, "/" + finalValue + "/", context, this);
         }
 
         /**
@@ -602,16 +651,16 @@ public Query regexpQuery(
          * @param regExp a parsed node in the {@link RegExp} tree
          * @return a query that matches on the known required parts of the given regular expression
          */
-        private static Query regexpToQuery(String fieldName, RegExp regExp) {
+        private static Query regexpToQuery(String fieldName, RegExp regExp, boolean caseInsensitive) {
             BooleanQuery query;
             if (Objects.requireNonNull(regExp.kind) == RegExp.Kind.REGEXP_UNION) {
                 List<Query> clauses = new ArrayList<>();
                 while (regExp.exp1.kind == RegExp.Kind.REGEXP_UNION) {
-                    clauses.add(regexpToQuery(fieldName, regExp.exp2));
+                    clauses.add(regexpToQuery(fieldName, regExp.exp2, caseInsensitive));
                     regExp = regExp.exp1;
                 }
-                clauses.add(regexpToQuery(fieldName, regExp.exp2));
-                clauses.add(regexpToQuery(fieldName, regExp.exp1));
+                clauses.add(regexpToQuery(fieldName, regExp.exp2, caseInsensitive));
+                clauses.add(regexpToQuery(fieldName, regExp.exp1, caseInsensitive));
                 BooleanQuery.Builder builder = new BooleanQuery.Builder();
                 for (int i = clauses.size() - 1; i >= 0; i--) {
                     Query clause = clauses.get(i);
@@ -623,18 +672,24 @@ private static Query regexpToQuery(String fieldName, RegExp regExp) {
                 query = builder.build();
             } else if (regExp.kind == RegExp.Kind.REGEXP_STRING) {
                 BooleanQuery.Builder builder = new BooleanQuery.Builder();
-                for (String string : getRequiredNGrams("*" + regExp.s + "*")) {
-                    builder.add(new TermQuery(new Term(fieldName, string)), BooleanClause.Occur.FILTER);
+                for (String string : getRequiredNGrams("*" + regExp.s + "*", true)) {
+                    final Query subQuery;
+                    if (caseInsensitive) {
+                        subQuery = AutomatonQueries.caseInsensitiveTermQuery(new Term(fieldName, string));
+                    } else {
+                        subQuery = new TermQuery(new Term(fieldName, string));
+                    }
+                    builder.add(subQuery, BooleanClause.Occur.FILTER);
                 }
                 query = builder.build();
             } else if (regExp.kind == RegExp.Kind.REGEXP_CONCATENATION) {
                 List<Query> clauses = new ArrayList<>();
                 while (regExp.exp1.kind == RegExp.Kind.REGEXP_CONCATENATION) {
-                    clauses.add(regexpToQuery(fieldName, regExp.exp2));
+                    clauses.add(regexpToQuery(fieldName, regExp.exp2, caseInsensitive));
                     regExp = regExp.exp1;
                 }
-                clauses.add(regexpToQuery(fieldName, regExp.exp2));
-                clauses.add(regexpToQuery(fieldName, regExp.exp1));
+                clauses.add(regexpToQuery(fieldName, regExp.exp2, caseInsensitive));
+                clauses.add(regexpToQuery(fieldName, regExp.exp1, caseInsensitive));
                 BooleanQuery.Builder builder = new BooleanQuery.Builder();
                 for (int i = clauses.size() - 1; i >= 0; i--) {
                     Query clause = clauses.get(i);
@@ -645,7 +700,7 @@ private static Query regexpToQuery(String fieldName, RegExp regExp) {
                 query = builder.build();
             } else if ((regExp.kind == RegExp.Kind.REGEXP_REPEAT_MIN || regExp.kind == RegExp.Kind.REGEXP_REPEAT_MINMAX)
                 && regExp.min > 0) {
-                    return regexpToQuery(fieldName, regExp.exp1);
+                    return regexpToQuery(fieldName, regExp.exp1, caseInsensitive);
                 } else {
                     return new MatchAllDocsQuery();
                 }
@@ -664,12 +719,12 @@ public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower
 
         @Override
         public Query termQueryCaseInsensitive(Object value, QueryShardContext context) {
-            return wildcardQuery(BytesRefs.toString(value), MultiTermQuery.CONSTANT_SCORE_REWRITE, true, context);
+            return wildcardQuery(quoteWildcard(BytesRefs.toString(value)), MultiTermQuery.CONSTANT_SCORE_REWRITE, true, context);
         }
 
         @Override
         public Query termQuery(Object value, QueryShardContext context) {
-            return wildcardQuery(BytesRefs.toString(value), MultiTermQuery.CONSTANT_SCORE_REWRITE, false, context);
+            return wildcardQuery(quoteWildcard(BytesRefs.toString(value)), MultiTermQuery.CONSTANT_SCORE_REWRITE, false, context);
         }
 
         @Override
@@ -679,7 +734,10 @@ public Query termsQuery(List<?> values, QueryShardContext context) {
             StringBuilder pattern = new StringBuilder();
             for (Object value : values) {
                 String stringVal = BytesRefs.toString(value);
-                builder.add(matchAllTermsQuery(name(), getRequiredNGrams(stringVal), false), BooleanClause.Occur.SHOULD);
+                builder.add(
+                    matchAllTermsQuery(name(), getRequiredNGrams(quoteWildcard(stringVal), false), false),
+                    BooleanClause.Occur.SHOULD
+                );
                 expectedValues.add(stringVal);
                 if (pattern.length() > 0) {
                     pattern.append('|');

From 6dc63c5281f631c8a1921bbd60f2bf975a82e0a5 Mon Sep 17 00:00:00 2001
From: Andriy Redko <andriy.redko@aiven.io>
Date: Fri, 10 Jan 2025 17:37:40 -0500
Subject: [PATCH 38/61] Bump opentelemetry from 1.41.0 to 1.46.0 and
 opentelemetry-semconv from 1.27.0-alpha to 1.29.0-alpha (#17000)

Signed-off-by: Andriy Redko <andriy.redko@aiven.io>
---
 CHANGELOG.md                                                  | 2 ++
 gradle/libs.versions.toml                                     | 4 ++--
 plugins/telemetry-otel/build.gradle                           | 1 +
 .../telemetry-otel/licenses/opentelemetry-api-1.41.0.jar.sha1 | 1 -
 .../telemetry-otel/licenses/opentelemetry-api-1.46.0.jar.sha1 | 1 +
 .../opentelemetry-api-incubator-1.41.0-alpha.jar.sha1         | 1 -
 .../opentelemetry-api-incubator-1.46.0-alpha.jar.sha1         | 1 +
 .../licenses/opentelemetry-context-1.41.0.jar.sha1            | 1 -
 .../licenses/opentelemetry-context-1.46.0.jar.sha1            | 1 +
 .../licenses/opentelemetry-exporter-common-1.41.0.jar.sha1    | 1 -
 .../licenses/opentelemetry-exporter-common-1.46.0.jar.sha1    | 1 +
 .../licenses/opentelemetry-exporter-logging-1.41.0.jar.sha1   | 1 -
 .../licenses/opentelemetry-exporter-logging-1.46.0.jar.sha1   | 1 +
 .../licenses/opentelemetry-exporter-otlp-1.41.0.jar.sha1      | 1 -
 .../licenses/opentelemetry-exporter-otlp-1.46.0.jar.sha1      | 1 +
 .../opentelemetry-exporter-otlp-common-1.41.0.jar.sha1        | 1 -
 .../opentelemetry-exporter-otlp-common-1.46.0.jar.sha1        | 1 +
 .../opentelemetry-exporter-sender-okhttp-1.41.0.jar.sha1      | 1 -
 .../opentelemetry-exporter-sender-okhttp-1.46.0.jar.sha1      | 1 +
 .../telemetry-otel/licenses/opentelemetry-sdk-1.41.0.jar.sha1 | 1 -
 .../telemetry-otel/licenses/opentelemetry-sdk-1.46.0.jar.sha1 | 1 +
 .../licenses/opentelemetry-sdk-common-1.41.0.jar.sha1         | 1 -
 .../licenses/opentelemetry-sdk-common-1.46.0.jar.sha1         | 1 +
 .../licenses/opentelemetry-sdk-logs-1.41.0.jar.sha1           | 1 -
 .../licenses/opentelemetry-sdk-logs-1.46.0.jar.sha1           | 1 +
 .../licenses/opentelemetry-sdk-metrics-1.41.0.jar.sha1        | 1 -
 .../licenses/opentelemetry-sdk-metrics-1.46.0.jar.sha1        | 1 +
 .../licenses/opentelemetry-sdk-trace-1.41.0.jar.sha1          | 1 -
 .../licenses/opentelemetry-sdk-trace-1.46.0.jar.sha1          | 1 +
 .../licenses/opentelemetry-semconv-1.27.0-alpha.jar.sha1      | 1 -
 .../licenses/opentelemetry-semconv-1.29.0-alpha.jar.sha1      | 1 +
 31 files changed, 19 insertions(+), 16 deletions(-)
 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-api-1.41.0.jar.sha1
 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-api-1.46.0.jar.sha1
 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.41.0-alpha.jar.sha1
 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.46.0-alpha.jar.sha1
 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-context-1.41.0.jar.sha1
 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-context-1.46.0.jar.sha1
 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.41.0.jar.sha1
 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.46.0.jar.sha1
 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.41.0.jar.sha1
 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.46.0.jar.sha1
 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.41.0.jar.sha1
 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.46.0.jar.sha1
 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.41.0.jar.sha1
 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.46.0.jar.sha1
 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.41.0.jar.sha1
 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.46.0.jar.sha1
 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-1.41.0.jar.sha1
 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-1.46.0.jar.sha1
 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.41.0.jar.sha1
 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.46.0.jar.sha1
 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.41.0.jar.sha1
 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.46.0.jar.sha1
 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.41.0.jar.sha1
 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.46.0.jar.sha1
 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.41.0.jar.sha1
 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.46.0.jar.sha1
 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-semconv-1.27.0-alpha.jar.sha1
 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-semconv-1.29.0-alpha.jar.sha1

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 20e6c03d5a9d6..e20fda7bfdb18 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -61,6 +61,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 - Bump `org.apache.commons:commons-text` from 1.12.0 to 1.13.0 ([#16919](https://github.com/opensearch-project/OpenSearch/pull/16919))
 - Bump `ch.qos.logback:logback-core` from 1.5.12 to 1.5.16 ([#16951](https://github.com/opensearch-project/OpenSearch/pull/16951))
 - Bump `com.azure:azure-core-http-netty` from 1.15.5 to 1.15.7 ([#16952](https://github.com/opensearch-project/OpenSearch/pull/16952))
+- Bump `opentelemetry` from 1.41.0 to 1.46.0 ([#16700](https://github.com/opensearch-project/OpenSearch/pull/16700))
+- Bump `opentelemetry-semconv` from 1.27.0-alpha to 1.29.0-alpha ([#16700](https://github.com/opensearch-project/OpenSearch/pull/16700))
 
 ### Changed
 - Indexed IP field supports `terms_query` with more than 1025 IP masks [#16391](https://github.com/opensearch-project/OpenSearch/pull/16391)
diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml
index f357fb248520c..1cd2f8d87e1d4 100644
--- a/gradle/libs.versions.toml
+++ b/gradle/libs.versions.toml
@@ -78,8 +78,8 @@ jzlib             = "1.1.3"
 resteasy          = "6.2.4.Final"
 
 # opentelemetry dependencies
-opentelemetry         = "1.41.0"
-opentelemetrysemconv  = "1.27.0-alpha"
+opentelemetry         = "1.46.0"
+opentelemetrysemconv  = "1.29.0-alpha"
 
 # arrow dependencies
 arrow                 = "17.0.0"
diff --git a/plugins/telemetry-otel/build.gradle b/plugins/telemetry-otel/build.gradle
index 3aba7d64cd96d..54f4f2f897562 100644
--- a/plugins/telemetry-otel/build.gradle
+++ b/plugins/telemetry-otel/build.gradle
@@ -88,6 +88,7 @@ thirdPartyAudit {
     'io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider',
     'io.opentelemetry.sdk.autoconfigure.spi.internal.AutoConfigureListener',
     'io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider',
+    'io.opentelemetry.sdk.autoconfigure.spi.internal.DefaultConfigProperties',
     'io.opentelemetry.sdk.autoconfigure.spi.internal.StructuredConfigProperties'
   )
 }
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.41.0.jar.sha1
deleted file mode 100644
index ead8fb235fa12..0000000000000
--- a/plugins/telemetry-otel/licenses/opentelemetry-api-1.41.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-ec5ad3b420c9fba4b340e85a3199fd0f2accd023
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.46.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.46.0.jar.sha1
new file mode 100644
index 0000000000000..b2d1d3575fcde
--- /dev/null
+++ b/plugins/telemetry-otel/licenses/opentelemetry-api-1.46.0.jar.sha1
@@ -0,0 +1 @@
+afd2d5781454088400cceabbe84f7a9b29d27161
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.41.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.41.0-alpha.jar.sha1
deleted file mode 100644
index b601a4fb5246f..0000000000000
--- a/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.41.0-alpha.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-fd387313cc37a6e93062e9a80a2526634d22cb19
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.46.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.46.0-alpha.jar.sha1
new file mode 100644
index 0000000000000..e89de4cb29f16
--- /dev/null
+++ b/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.46.0-alpha.jar.sha1
@@ -0,0 +1 @@
+1a708444d2818ac1a47767a2b35d74ef55d26af8
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.41.0.jar.sha1
deleted file mode 100644
index 74b7cb25cdfe5..0000000000000
--- a/plugins/telemetry-otel/licenses/opentelemetry-context-1.41.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-3d7cf15ef425053e24e825160ca7b4ac08d721aa
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.46.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.46.0.jar.sha1
new file mode 100644
index 0000000000000..df658f4c87ac2
--- /dev/null
+++ b/plugins/telemetry-otel/licenses/opentelemetry-context-1.46.0.jar.sha1
@@ -0,0 +1 @@
+8cee1fa7ec9129f7b252595c612c19f4570d567f
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.41.0.jar.sha1
deleted file mode 100644
index d8d8f75850cb6..0000000000000
--- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.41.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-cf92f4c1b60c2359c12f6f323f6a2a623c333910
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.46.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.46.0.jar.sha1
new file mode 100644
index 0000000000000..e6503871bff53
--- /dev/null
+++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.46.0.jar.sha1
@@ -0,0 +1 @@
+2e2d8f3b51b1a2b1184f11d9059e129c5e39147a
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.41.0.jar.sha1
deleted file mode 100644
index 3e1212943f894..0000000000000
--- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.41.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-8dee21440b811004ecc1c36c1cd44f9d3494546c
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.46.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.46.0.jar.sha1
new file mode 100644
index 0000000000000..65757fff8b0e7
--- /dev/null
+++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.46.0.jar.sha1
@@ -0,0 +1 @@
+a0ef76a383a086b812395ca5a5cdf94804a59a3f
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.41.0.jar.sha1
deleted file mode 100644
index 21a29cc8445e5..0000000000000
--- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.41.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-d86e60b6d49e389ebe5797d42a7288a20d30c162
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.46.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.46.0.jar.sha1
new file mode 100644
index 0000000000000..0fc550e83748e
--- /dev/null
+++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.46.0.jar.sha1
@@ -0,0 +1 @@
+1122a5ea0562147547ddf0eb28e1035d549c0ea0
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.41.0.jar.sha1
deleted file mode 100644
index ae522ac698aa8..0000000000000
--- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.41.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-aeba3075b8dfd97779edadc0a3711d999bb0e396
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.46.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.46.0.jar.sha1
new file mode 100644
index 0000000000000..a01f85d9e1258
--- /dev/null
+++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.46.0.jar.sha1
@@ -0,0 +1 @@
+abeb93b8b6d2cb0007b1d6122325f94a11e61ca4
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.41.0.jar.sha1
deleted file mode 100644
index a741d0a167d60..0000000000000
--- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.41.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-368d7905d6a0a313c63e3a91f895a3a08500519e
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.46.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.46.0.jar.sha1
new file mode 100644
index 0000000000000..8c755281bab05
--- /dev/null
+++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.46.0.jar.sha1
@@ -0,0 +1 @@
+32a0fe0fa7cd9831b502075f27c1fe6d28280cdb
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.41.0.jar.sha1
deleted file mode 100644
index 972e7de1c74be..0000000000000
--- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.41.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-c740e8f7d0d914d6acd310ac53901bb8753c6e8d
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.46.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.46.0.jar.sha1
new file mode 100644
index 0000000000000..a41c756db7096
--- /dev/null
+++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.46.0.jar.sha1
@@ -0,0 +1 @@
+b3a77fff1084177c4f5099bbb7db6181d6efd752
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.41.0.jar.sha1
deleted file mode 100644
index c56ca0b9e8169..0000000000000
--- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.41.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-b820861f85ba83db0ad896c47f723208d7473d5a
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.46.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.46.0.jar.sha1
new file mode 100644
index 0000000000000..1bd211a143c03
--- /dev/null
+++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.46.0.jar.sha1
@@ -0,0 +1 @@
+1d353ee4e980ff77c742350fc7000b732b6c6b3f
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.41.0.jar.sha1
deleted file mode 100644
index 39db6cb73727f..0000000000000
--- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.41.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-f88ee292f5605c87dfe85c8d90131bce9f0b3b8e
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.46.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.46.0.jar.sha1
new file mode 100644
index 0000000000000..084a703a4d4cc
--- /dev/null
+++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.46.0.jar.sha1
@@ -0,0 +1 @@
+1bd9bb4f3ce9ac573613b353a78d51491cd02bbd
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.41.0.jar.sha1
deleted file mode 100644
index 6dcd496e033d3..0000000000000
--- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.41.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-9d1200befb28e3e9f61073ac3de23cc55e509dc7
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.46.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.46.0.jar.sha1
new file mode 100644
index 0000000000000..1fe3c4842d41d
--- /dev/null
+++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.46.0.jar.sha1
@@ -0,0 +1 @@
+475d900ffd0567a7ddf2452290b2e5d51ac35c58
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.41.0.jar.sha1
deleted file mode 100644
index 161e400f87077..0000000000000
--- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.41.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-d9bbc2e2e800317d72fbf3141ae8391e95fa6229
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.46.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.46.0.jar.sha1
new file mode 100644
index 0000000000000..da00b35812afb
--- /dev/null
+++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.46.0.jar.sha1
@@ -0,0 +1 @@
+c6e39faabf0741780189861156d0a7763e942796
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.27.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.27.0-alpha.jar.sha1
deleted file mode 100644
index e986b4b53388e..0000000000000
--- a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.27.0-alpha.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-906d916bee46f60260c09314284b5948c54a0662
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.29.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.29.0-alpha.jar.sha1
new file mode 100644
index 0000000000000..3326c366cb4c9
--- /dev/null
+++ b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.29.0-alpha.jar.sha1
@@ -0,0 +1 @@
+613d7f7743eb2b974680ad1af1685802e6a7cb58
\ No newline at end of file

From fccd6c54c14dabc46483f1b6ec3f3b02d08edfdd Mon Sep 17 00:00:00 2001
From: kkewwei <kewei.11@bytedance.com>
Date: Sat, 11 Jan 2025 06:45:52 +0800
Subject: [PATCH 39/61] TransportBulkAction.doRun() (#16950)

Signed-off-by: kkewwei <kewei.11@bytedance.com>
Signed-off-by: kkewwei <kkewwei@163.com>
---
 .../action/bulk/TransportBulkAction.java      | 23 +++++++------------
 1 file changed, 8 insertions(+), 15 deletions(-)

diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java
index 19ffb12859183..db509afb68da9 100644
--- a/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java
+++ b/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java
@@ -532,6 +532,8 @@ protected void doRun() {
             }
             final ConcreteIndices concreteIndices = new ConcreteIndices(clusterState, indexNameExpressionResolver);
             Metadata metadata = clusterState.metadata();
+            // go over all the requests and create a ShardId -> Operations mapping
+            Map<ShardId, List<BulkItemRequest>> requestsByShard = new HashMap<>();
             for (int i = 0; i < bulkRequest.requests.size(); i++) {
                 DocWriteRequest<?> docWriteRequest = bulkRequest.requests.get(i);
                 // the request can only be null because we set it to null in the previous step, so it gets ignored
@@ -587,6 +589,12 @@ protected void doRun() {
                         default:
                             throw new AssertionError("request type not supported: [" + docWriteRequest.opType() + "]");
                     }
+
+                    ShardId shardId = clusterService.operationRouting()
+                        .indexShards(clusterState, concreteIndex.getName(), docWriteRequest.id(), docWriteRequest.routing())
+                        .shardId();
+                    List<BulkItemRequest> shardRequests = requestsByShard.computeIfAbsent(shardId, shard -> new ArrayList<>());
+                    shardRequests.add(new BulkItemRequest(i, docWriteRequest));
                 } catch (OpenSearchParseException | IllegalArgumentException | RoutingMissingException e) {
                     BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex.getName(), docWriteRequest.id(), e);
                     BulkItemResponse bulkItemResponse = new BulkItemResponse(i, docWriteRequest.opType(), failure);
@@ -596,21 +604,6 @@ protected void doRun() {
                 }
             }
 
-            // first, go over all the requests and create a ShardId -> Operations mapping
-            Map<ShardId, List<BulkItemRequest>> requestsByShard = new HashMap<>();
-            for (int i = 0; i < bulkRequest.requests.size(); i++) {
-                DocWriteRequest<?> request = bulkRequest.requests.get(i);
-                if (request == null) {
-                    continue;
-                }
-                String concreteIndex = concreteIndices.getConcreteIndex(request.index()).getName();
-                ShardId shardId = clusterService.operationRouting()
-                    .indexShards(clusterState, concreteIndex, request.id(), request.routing())
-                    .shardId();
-                List<BulkItemRequest> shardRequests = requestsByShard.computeIfAbsent(shardId, shard -> new ArrayList<>());
-                shardRequests.add(new BulkItemRequest(i, request));
-            }
-
             if (requestsByShard.isEmpty()) {
                 BulkItemResponse[] response = responses.toArray(new BulkItemResponse[responses.length()]);
                 long tookMillis = buildTookInMillis(startTimeNanos);

From 8d5e1a3972ac34d769fff6618d26f9f9e36b06b7 Mon Sep 17 00:00:00 2001
From: Ivan Brusic <ivan@brusic.com>
Date: Fri, 10 Jan 2025 22:16:34 -0800
Subject: [PATCH 40/61] Show only intersecting buckets to the Adjacency matrix
 aggregation (#11733)

Signed-off-by: Ivan Brusic <ivan@brusic.com>
---
 .../70_adjacency_matrix.yml                   | 37 +++++++++
 .../AdjacencyMatrixAggregationBuilder.java    | 82 +++++++++++++++++--
 .../adjacency/AdjacencyMatrixAggregator.java  | 19 +++--
 .../AdjacencyMatrixAggregatorFactory.java     | 16 +++-
 ...djacencyMatrixAggregationBuilderTests.java | 21 ++++-
 .../metrics/AdjacencyMatrixTests.java         | 18 ++++
 6 files changed, 177 insertions(+), 16 deletions(-)

diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/70_adjacency_matrix.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/70_adjacency_matrix.yml
index f8fa537ed91bf..ccd194eff6f51 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/70_adjacency_matrix.yml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/70_adjacency_matrix.yml
@@ -125,3 +125,40 @@ setup:
 
   - match: { aggregations.conns.buckets.3.doc_count: 1 }
   - match: { aggregations.conns.buckets.3.key: "4" }
+
+
+---
+"Show only intersections":
+  - skip:
+      version: " - 2.99.99"
+      reason: "show_only_intersecting was added in 3.0.0"
+      features: node_selector
+  - do:
+      node_selector:
+        version: "3.0.0 - "
+      search:
+        index: test
+        rest_total_hits_as_int: true
+        body:
+          size: 0
+          aggs:
+            conns:
+              adjacency_matrix:
+                show_only_intersecting: true
+                filters:
+                  1:
+                    term:
+                      num: 1
+                  2:
+                    term:
+                      num: 2
+                  4:
+                    term:
+                      num: 4
+
+  - match: { hits.total: 3 }
+
+  - length: { aggregations.conns.buckets: 1 }
+
+  - match: { aggregations.conns.buckets.0.doc_count: 1 }
+  - match: { aggregations.conns.buckets.0.key: "1&2" }
diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java
index 743d0023364fa..1b6a7e1158b83 100644
--- a/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java
+++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java
@@ -32,6 +32,7 @@
 
 package org.opensearch.search.aggregations.bucket.adjacency;
 
+import org.opensearch.Version;
 import org.opensearch.core.ParseField;
 import org.opensearch.core.common.io.stream.StreamInput;
 import org.opensearch.core.common.io.stream.StreamOutput;
@@ -71,7 +72,10 @@ public class AdjacencyMatrixAggregationBuilder extends AbstractAggregationBuilde
 
     private static final ParseField SEPARATOR_FIELD = new ParseField("separator");
     private static final ParseField FILTERS_FIELD = new ParseField("filters");
+    private static final ParseField SHOW_ONLY_INTERSECTING = new ParseField("show_only_intersecting");
+
     private List<KeyedFilter> filters;
+    private boolean showOnlyIntersecting = false;
     private String separator = DEFAULT_SEPARATOR;
 
     private static final ObjectParser<AdjacencyMatrixAggregationBuilder, String> PARSER = ObjectParser.fromBuilder(
@@ -81,6 +85,10 @@ public class AdjacencyMatrixAggregationBuilder extends AbstractAggregationBuilde
     static {
         PARSER.declareString(AdjacencyMatrixAggregationBuilder::separator, SEPARATOR_FIELD);
         PARSER.declareNamedObjects(AdjacencyMatrixAggregationBuilder::setFiltersAsList, KeyedFilter.PARSER, FILTERS_FIELD);
+        PARSER.declareBoolean(
+            AdjacencyMatrixAggregationBuilder::setShowOnlyIntersecting,
+            AdjacencyMatrixAggregationBuilder.SHOW_ONLY_INTERSECTING
+        );
     }
 
     public static AggregationBuilder parse(XContentParser parser, String name) throws IOException {
@@ -115,6 +123,7 @@ protected AdjacencyMatrixAggregationBuilder(
         super(clone, factoriesBuilder, metadata);
         this.filters = new ArrayList<>(clone.filters);
         this.separator = clone.separator;
+        this.showOnlyIntersecting = clone.showOnlyIntersecting;
     }
 
     @Override
@@ -138,6 +147,40 @@ public AdjacencyMatrixAggregationBuilder(String name, String separator, Map<Stri
         setFiltersAsMap(filters);
     }
 
+    /**
+     * @param name
+     *            the name of this aggregation
+     * @param filters
+     *            the filters and their key to use with this aggregation.
+     * @param showOnlyIntersecting
+     *            show only the buckets that intersection multiple documents
+     */
+    public AdjacencyMatrixAggregationBuilder(String name, Map<String, QueryBuilder> filters, boolean showOnlyIntersecting) {
+        this(name, DEFAULT_SEPARATOR, filters, showOnlyIntersecting);
+    }
+
+    /**
+     * @param name
+     *            the name of this aggregation
+     * @param separator
+     *            the string used to separate keys in intersections buckets e.g.
+     *            &amp; character for keyed filters A and B would return an
+     *            intersection bucket named A&amp;B
+     * @param filters
+     *            the filters and their key to use with this aggregation.
+     * @param showOnlyIntersecting
+     *            show only the buckets that intersection multiple documents
+     */
+    public AdjacencyMatrixAggregationBuilder(
+        String name,
+        String separator,
+        Map<String, QueryBuilder> filters,
+        boolean showOnlyIntersecting
+    ) {
+        this(name, separator, filters);
+        this.showOnlyIntersecting = showOnlyIntersecting;
+    }
+
     /**
      * Read from a stream.
      */
@@ -145,6 +188,9 @@ public AdjacencyMatrixAggregationBuilder(StreamInput in) throws IOException {
         super(in);
         int filtersSize = in.readVInt();
         separator = in.readString();
+        if (in.getVersion().onOrAfter(Version.V_3_0_0)) {
+            showOnlyIntersecting = in.readBoolean();
+        }
         filters = new ArrayList<>(filtersSize);
         for (int i = 0; i < filtersSize; i++) {
             filters.add(new KeyedFilter(in));
@@ -155,6 +201,9 @@ public AdjacencyMatrixAggregationBuilder(StreamInput in) throws IOException {
     protected void doWriteTo(StreamOutput out) throws IOException {
         out.writeVInt(filters.size());
         out.writeString(separator);
+        if (out.getVersion().onOrAfter(Version.V_3_0_0)) {
+            out.writeBoolean(showOnlyIntersecting);
+        }
         for (KeyedFilter keyedFilter : filters) {
             keyedFilter.writeTo(out);
         }
@@ -185,6 +234,11 @@ private AdjacencyMatrixAggregationBuilder setFiltersAsList(List<KeyedFilter> fil
         return this;
     }
 
+    public AdjacencyMatrixAggregationBuilder setShowOnlyIntersecting(boolean showOnlyIntersecting) {
+        this.showOnlyIntersecting = showOnlyIntersecting;
+        return this;
+    }
+
     /**
      * Set the separator used to join pairs of bucket keys
      */
@@ -214,6 +268,10 @@ public Map<String, QueryBuilder> filters() {
         return result;
     }
 
+    public boolean isShowOnlyIntersecting() {
+        return showOnlyIntersecting;
+    }
+
     @Override
     protected AdjacencyMatrixAggregationBuilder doRewrite(QueryRewriteContext queryShardContext) throws IOException {
         boolean modified = false;
@@ -224,7 +282,9 @@ protected AdjacencyMatrixAggregationBuilder doRewrite(QueryRewriteContext queryS
             rewrittenFilters.add(new KeyedFilter(kf.key(), rewritten));
         }
         if (modified) {
-            return new AdjacencyMatrixAggregationBuilder(name).separator(separator).setFiltersAsList(rewrittenFilters);
+            return new AdjacencyMatrixAggregationBuilder(name).separator(separator)
+                .setFiltersAsList(rewrittenFilters)
+                .setShowOnlyIntersecting(showOnlyIntersecting);
         }
         return this;
     }
@@ -245,7 +305,16 @@ protected AggregatorFactory doBuild(QueryShardContext queryShardContext, Aggrega
                     + "] index level setting."
             );
         }
-        return new AdjacencyMatrixAggregatorFactory(name, filters, separator, queryShardContext, parent, subFactoriesBuilder, metadata);
+        return new AdjacencyMatrixAggregatorFactory(
+            name,
+            filters,
+            showOnlyIntersecting,
+            separator,
+            queryShardContext,
+            parent,
+            subFactoriesBuilder,
+            metadata
+        );
     }
 
     @Override
@@ -257,7 +326,8 @@ public BucketCardinality bucketCardinality() {
     protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException {
         builder.startObject();
         builder.field(SEPARATOR_FIELD.getPreferredName(), separator);
-        builder.startObject(AdjacencyMatrixAggregator.FILTERS_FIELD.getPreferredName());
+        builder.field(SHOW_ONLY_INTERSECTING.getPreferredName(), showOnlyIntersecting);
+        builder.startObject(FILTERS_FIELD.getPreferredName());
         for (KeyedFilter keyedFilter : filters) {
             builder.field(keyedFilter.key(), keyedFilter.filter());
         }
@@ -268,7 +338,7 @@ protected XContentBuilder internalXContent(XContentBuilder builder, Params param
 
     @Override
     public int hashCode() {
-        return Objects.hash(super.hashCode(), filters, separator);
+        return Objects.hash(super.hashCode(), filters, showOnlyIntersecting, separator);
     }
 
     @Override
@@ -277,7 +347,9 @@ public boolean equals(Object obj) {
         if (obj == null || getClass() != obj.getClass()) return false;
         if (super.equals(obj) == false) return false;
         AdjacencyMatrixAggregationBuilder other = (AdjacencyMatrixAggregationBuilder) obj;
-        return Objects.equals(filters, other.filters) && Objects.equals(separator, other.separator);
+        return Objects.equals(filters, other.filters)
+            && Objects.equals(separator, other.separator)
+            && Objects.equals(showOnlyIntersecting, other.showOnlyIntersecting);
     }
 
     @Override
diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregator.java
index ef1795f425240..f82ee9dc242fb 100644
--- a/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregator.java
+++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregator.java
@@ -36,7 +36,6 @@
 import org.apache.lucene.search.Weight;
 import org.apache.lucene.util.Bits;
 import org.opensearch.common.lucene.Lucene;
-import org.opensearch.core.ParseField;
 import org.opensearch.core.common.io.stream.StreamInput;
 import org.opensearch.core.common.io.stream.StreamOutput;
 import org.opensearch.core.common.io.stream.Writeable;
@@ -70,8 +69,6 @@
  */
 public class AdjacencyMatrixAggregator extends BucketsAggregator {
 
-    public static final ParseField FILTERS_FIELD = new ParseField("filters");
-
     /**
      * A keyed filter
      *
@@ -145,6 +142,8 @@ public boolean equals(Object obj) {
 
     private final String[] keys;
     private final Weight[] filters;
+
+    private final boolean showOnlyIntersecting;
     private final int totalNumKeys;
     private final int totalNumIntersections;
     private final String separator;
@@ -155,6 +154,7 @@ public AdjacencyMatrixAggregator(
         String separator,
         String[] keys,
         Weight[] filters,
+        boolean showOnlyIntersecting,
         SearchContext context,
         Aggregator parent,
         Map<String, Object> metadata
@@ -163,6 +163,7 @@ public AdjacencyMatrixAggregator(
         this.separator = separator;
         this.keys = keys;
         this.filters = filters;
+        this.showOnlyIntersecting = showOnlyIntersecting;
         this.totalNumIntersections = ((keys.length * keys.length) - keys.length) / 2;
         this.totalNumKeys = keys.length + totalNumIntersections;
     }
@@ -177,10 +178,12 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBuc
         return new LeafBucketCollectorBase(sub, null) {
             @Override
             public void collect(int doc, long bucket) throws IOException {
-                // Check each of the provided filters
-                for (int i = 0; i < bits.length; i++) {
-                    if (bits[i].get(doc)) {
-                        collectBucket(sub, doc, bucketOrd(bucket, i));
+                if (!showOnlyIntersecting) {
+                    // Check each of the provided filters
+                    for (int i = 0; i < bits.length; i++) {
+                        if (bits[i].get(doc)) {
+                            collectBucket(sub, doc, bucketOrd(bucket, i));
+                        }
                     }
                 }
                 // Check all the possible intersections of the provided filters
@@ -229,7 +232,7 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I
             for (int i = 0; i < keys.length; i++) {
                 long bucketOrd = bucketOrd(owningBucketOrds[owningBucketOrdIdx], i);
                 long docCount = bucketDocCount(bucketOrd);
-                // Empty buckets are not returned because this aggregation will commonly be used under a
+                // Empty buckets are not returned because this aggregation will commonly be used under
                 // a date-histogram where we will look for transactions over time and can expect many
                 // empty buckets.
                 if (docCount > 0) {
diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorFactory.java
index 99ffb563ba2a8..bae86f3fcdfc1 100644
--- a/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorFactory.java
+++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorFactory.java
@@ -57,11 +57,14 @@ public class AdjacencyMatrixAggregatorFactory extends AggregatorFactory {
 
     private final String[] keys;
     private final Weight[] weights;
+
+    private final boolean showOnlyIntersecting;
     private final String separator;
 
     public AdjacencyMatrixAggregatorFactory(
         String name,
         List<KeyedFilter> filters,
+        boolean showOnlyIntersecting,
         String separator,
         QueryShardContext queryShardContext,
         AggregatorFactory parent,
@@ -79,6 +82,7 @@ public AdjacencyMatrixAggregatorFactory(
             Query filter = keyedFilter.filter().toQuery(queryShardContext);
             weights[i] = contextSearcher.createWeight(contextSearcher.rewrite(filter), ScoreMode.COMPLETE_NO_SCORES, 1f);
         }
+        this.showOnlyIntersecting = showOnlyIntersecting;
     }
 
     @Override
@@ -88,7 +92,17 @@ public Aggregator createInternal(
         CardinalityUpperBound cardinality,
         Map<String, Object> metadata
     ) throws IOException {
-        return new AdjacencyMatrixAggregator(name, factories, separator, keys, weights, searchContext, parent, metadata);
+        return new AdjacencyMatrixAggregator(
+            name,
+            factories,
+            separator,
+            keys,
+            weights,
+            showOnlyIntersecting,
+            searchContext,
+            parent,
+            metadata
+        );
     }
 
     @Override
diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilderTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilderTests.java
index b2025ae5f03c1..e7c1de0123c9e 100644
--- a/server/src/test/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilderTests.java
+++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilderTests.java
@@ -57,7 +57,7 @@
 public class AdjacencyMatrixAggregationBuilderTests extends OpenSearchTestCase {
 
     public void testFilterSizeLimitation() throws Exception {
-        // filter size grater than max size should thrown a exception
+        // filter size grater than max size should throw an exception
         QueryShardContext queryShardContext = mock(QueryShardContext.class);
         IndexShard indexShard = mock(IndexShard.class);
         Settings settings = Settings.builder()
@@ -94,7 +94,7 @@ public void testFilterSizeLimitation() throws Exception {
             )
         );
 
-        // filter size not grater than max size should return an instance of AdjacencyMatrixAggregatorFactory
+        // filter size not greater than max size should return an instance of AdjacencyMatrixAggregatorFactory
         Map<String, QueryBuilder> emptyFilters = Collections.emptyMap();
 
         AdjacencyMatrixAggregationBuilder aggregationBuilder = new AdjacencyMatrixAggregationBuilder("dummy", emptyFilters);
@@ -106,4 +106,21 @@ public void testFilterSizeLimitation() throws Exception {
                 + "removed in a future release! See the breaking changes documentation for the next major version."
         );
     }
+
+    public void testShowOnlyIntersecting() throws Exception {
+        QueryShardContext queryShardContext = mock(QueryShardContext.class);
+
+        Map<String, QueryBuilder> filters = new HashMap<>(3);
+        for (int i = 0; i < 2; i++) {
+            QueryBuilder queryBuilder = mock(QueryBuilder.class);
+            // return builder itself to skip rewrite
+            when(queryBuilder.rewrite(queryShardContext)).thenReturn(queryBuilder);
+            filters.put("filter" + i, queryBuilder);
+        }
+        AdjacencyMatrixAggregationBuilder builder = new AdjacencyMatrixAggregationBuilder("dummy", filters, true);
+        assertTrue(builder.isShowOnlyIntersecting());
+
+        builder = new AdjacencyMatrixAggregationBuilder("dummy", filters, false);
+        assertFalse(builder.isShowOnlyIntersecting());
+    }
 }
diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/AdjacencyMatrixTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/AdjacencyMatrixTests.java
index c5cf56f6caff7..38e53d65a69e6 100644
--- a/server/src/test/java/org/opensearch/search/aggregations/metrics/AdjacencyMatrixTests.java
+++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/AdjacencyMatrixTests.java
@@ -68,4 +68,22 @@ public void testFiltersSameMap() {
         assertEquals(original, builder.filters());
         assert original != builder.filters();
     }
+
+    public void testShowOnlyIntersecting() {
+        Map<String, QueryBuilder> original = new HashMap<>();
+        original.put("bbb", new MatchNoneQueryBuilder());
+        original.put("aaa", new MatchNoneQueryBuilder());
+        AdjacencyMatrixAggregationBuilder builder;
+        builder = new AdjacencyMatrixAggregationBuilder("my-agg", "&", original, true);
+        assertTrue(builder.isShowOnlyIntersecting());
+    }
+
+    public void testShowOnlyIntersectingAsFalse() {
+        Map<String, QueryBuilder> original = new HashMap<>();
+        original.put("bbb", new MatchNoneQueryBuilder());
+        original.put("aaa", new MatchNoneQueryBuilder());
+        AdjacencyMatrixAggregationBuilder builder;
+        builder = new AdjacencyMatrixAggregationBuilder("my-agg", original, false);
+        assertFalse(builder.isShowOnlyIntersecting());
+    }
 }

From 26465e87ed60acc8fd7c65e3559b14e8ccd59fbf Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 13 Jan 2025 10:41:36 -0500
Subject: [PATCH 41/61] Bump com.google.re2j:re2j from 1.7 to 1.8 in
 /plugins/repository-hdfs (#17012)

* Bump com.google.re2j:re2j from 1.7 to 1.8 in /plugins/repository-hdfs

Bumps [com.google.re2j:re2j](https://github.com/google/re2j) from 1.7 to 1.8.
- [Release notes](https://github.com/google/re2j/releases)
- [Commits](https://github.com/google/re2j/compare/re2j-1.7...re2j-1.8)

---
updated-dependencies:
- dependency-name: com.google.re2j:re2j
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

* Updating SHAs

Signed-off-by: dependabot[bot] <support@github.com>

* Update changelog

Signed-off-by: dependabot[bot] <support@github.com>

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
 CHANGELOG.md                                       | 1 +
 plugins/repository-hdfs/build.gradle               | 2 +-
 plugins/repository-hdfs/licenses/re2j-1.7.jar.sha1 | 1 -
 plugins/repository-hdfs/licenses/re2j-1.8.jar.sha1 | 1 +
 4 files changed, 3 insertions(+), 2 deletions(-)
 delete mode 100644 plugins/repository-hdfs/licenses/re2j-1.7.jar.sha1
 create mode 100644 plugins/repository-hdfs/licenses/re2j-1.8.jar.sha1

diff --git a/CHANGELOG.md b/CHANGELOG.md
index e20fda7bfdb18..5b07e527ac712 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -63,6 +63,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 - Bump `com.azure:azure-core-http-netty` from 1.15.5 to 1.15.7 ([#16952](https://github.com/opensearch-project/OpenSearch/pull/16952))
 - Bump `opentelemetry` from 1.41.0 to 1.46.0 ([#16700](https://github.com/opensearch-project/OpenSearch/pull/16700))
 - Bump `opentelemetry-semconv` from 1.27.0-alpha to 1.29.0-alpha ([#16700](https://github.com/opensearch-project/OpenSearch/pull/16700))
+- Bump `com.google.re2j:re2j` from 1.7 to 1.8 ([#17012](https://github.com/opensearch-project/OpenSearch/pull/17012))
 
 ### Changed
 - Indexed IP field supports `terms_query` with more than 1025 IP masks [#16391](https://github.com/opensearch-project/OpenSearch/pull/16391)
diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle
index 441c6ae998406..c2685a525c8ba 100644
--- a/plugins/repository-hdfs/build.gradle
+++ b/plugins/repository-hdfs/build.gradle
@@ -77,7 +77,7 @@ dependencies {
   api 'org.apache.commons:commons-configuration2:2.11.0'
   api "commons-io:commons-io:${versions.commonsio}"
   api 'org.apache.commons:commons-lang3:3.17.0'
-  implementation 'com.google.re2j:re2j:1.7'
+  implementation 'com.google.re2j:re2j:1.8'
   api 'javax.servlet:servlet-api:2.5'
   api "org.slf4j:slf4j-api:${versions.slf4j}"
   api "org.apache.logging.log4j:log4j-slf4j-impl:${versions.log4j}"
diff --git a/plugins/repository-hdfs/licenses/re2j-1.7.jar.sha1 b/plugins/repository-hdfs/licenses/re2j-1.7.jar.sha1
deleted file mode 100644
index eb858e3677e30..0000000000000
--- a/plugins/repository-hdfs/licenses/re2j-1.7.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-2949632c1b4acce0d7784f28e3152e9cf3c2ec7a
\ No newline at end of file
diff --git a/plugins/repository-hdfs/licenses/re2j-1.8.jar.sha1 b/plugins/repository-hdfs/licenses/re2j-1.8.jar.sha1
new file mode 100644
index 0000000000000..8887078965f56
--- /dev/null
+++ b/plugins/repository-hdfs/licenses/re2j-1.8.jar.sha1
@@ -0,0 +1 @@
+12c25e923e9e4fb1575a7640a2698745c6f19a94
\ No newline at end of file

From f98f4267c35ec2d5567f189944dda5671425e46e Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 13 Jan 2025 11:09:34 -0500
Subject: [PATCH 42/61] Bump com.nimbusds:oauth2-oidc-sdk from 11.20.1 to 11.21
 in /plugins/repository-azure (#17010)

* Bump com.nimbusds:oauth2-oidc-sdk in /plugins/repository-azure

Bumps [com.nimbusds:oauth2-oidc-sdk](https://bitbucket.org/connect2id/oauth-2.0-sdk-with-openid-connect-extensions) from 11.20.1 to 11.21.
- [Changelog](https://bitbucket.org/connect2id/oauth-2.0-sdk-with-openid-connect-extensions/src/master/CHANGELOG.txt)
- [Commits](https://bitbucket.org/connect2id/oauth-2.0-sdk-with-openid-connect-extensions/branches/compare/11.21..11.20.1)

---
updated-dependencies:
- dependency-name: com.nimbusds:oauth2-oidc-sdk
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

* Updating SHAs

Signed-off-by: dependabot[bot] <support@github.com>

* Update changelog

Signed-off-by: dependabot[bot] <support@github.com>

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
 CHANGELOG.md                                                    | 2 +-
 plugins/repository-azure/build.gradle                           | 2 +-
 .../repository-azure/licenses/oauth2-oidc-sdk-11.20.1.jar.sha1  | 1 -
 .../repository-azure/licenses/oauth2-oidc-sdk-11.21.jar.sha1    | 1 +
 4 files changed, 3 insertions(+), 3 deletions(-)
 delete mode 100644 plugins/repository-azure/licenses/oauth2-oidc-sdk-11.20.1.jar.sha1
 create mode 100644 plugins/repository-azure/licenses/oauth2-oidc-sdk-11.21.jar.sha1

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 5b07e527ac712..f1866ea07a352 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -55,7 +55,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 - Bump `com.gradle.develocity` from 3.18.2 to 3.19 ([#16855](https://github.com/opensearch-project/OpenSearch/pull/16855))
 - Bump `org.jline:jline` from 3.27.1 to 3.28.0 ([#16857](https://github.com/opensearch-project/OpenSearch/pull/16857))
 - Bump `com.azure:azure-core` from 1.51.0 to 1.54.1 ([#16856](https://github.com/opensearch-project/OpenSearch/pull/16856))
-- Bump `com.nimbusds:oauth2-oidc-sdk` from 11.19.1 to 11.20.1 ([#16895](https://github.com/opensearch-project/OpenSearch/pull/16895))
+- Bump `com.nimbusds:oauth2-oidc-sdk` from 11.19.1 to 11.21 ([#16895](https://github.com/opensearch-project/OpenSearch/pull/16895), [#17010](https://github.com/opensearch-project/OpenSearch/pull/17010))
 - Bump `com.netflix.nebula.ospackage-base` from 11.10.0 to 11.10.1 ([#16896](https://github.com/opensearch-project/OpenSearch/pull/16896))
 - Bump `com.microsoft.azure:msal4j` from 1.17.2 to 1.18.0 ([#16918](https://github.com/opensearch-project/OpenSearch/pull/16918))
 - Bump `org.apache.commons:commons-text` from 1.12.0 to 1.13.0 ([#16919](https://github.com/opensearch-project/OpenSearch/pull/16919))
diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle
index c6b303f22112e..332651e37cfa4 100644
--- a/plugins/repository-azure/build.gradle
+++ b/plugins/repository-azure/build.gradle
@@ -62,7 +62,7 @@ dependencies {
   api 'com.microsoft.azure:msal4j-persistence-extension:1.3.0'
   api "net.java.dev.jna:jna-platform:${versions.jna}"
   api 'com.microsoft.azure:msal4j:1.18.0'
-  api 'com.nimbusds:oauth2-oidc-sdk:11.20.1'
+  api 'com.nimbusds:oauth2-oidc-sdk:11.21'
   api 'com.nimbusds:nimbus-jose-jwt:9.41.1'
   api 'com.nimbusds:content-type:2.3'
   api 'com.nimbusds:lang-tag:1.7'
diff --git a/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.20.1.jar.sha1 b/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.20.1.jar.sha1
deleted file mode 100644
index 7527d31eb1d37..0000000000000
--- a/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.20.1.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-8d1ecd62d31945534a7cd63062c3c48ff0df9c43
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.21.jar.sha1 b/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.21.jar.sha1
new file mode 100644
index 0000000000000..9736182141a0a
--- /dev/null
+++ b/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.21.jar.sha1
@@ -0,0 +1 @@
+97bec173d2a199fdd7f5c1f3a61f7ccc2e992fc1
\ No newline at end of file

From a609e634a348b76386fb11936bbe8c4b38ea72d0 Mon Sep 17 00:00:00 2001
From: Ralph Ursprung <39383228+rursprung@users.noreply.github.com>
Date: Mon, 13 Jan 2025 22:12:13 +0100
Subject: [PATCH 43/61] improve `PhoneNumberAnalyzerTests#testTelPrefixSearch`
 (#17016)

this way we ensure that it doesn't include any additional tokens which
we don't want.

this is a follow-up to commit 4d943993ac9 / #16993.

Signed-off-by: Ralph Ursprung <Ralph.Ursprung@avaloq.com>
---
 .../opensearch/analysis/phone/PhoneNumberAnalyzerTests.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/plugins/analysis-phonenumber/src/test/java/org/opensearch/analysis/phone/PhoneNumberAnalyzerTests.java b/plugins/analysis-phonenumber/src/test/java/org/opensearch/analysis/phone/PhoneNumberAnalyzerTests.java
index d55c0b2ce7d2a..503cee9cc710f 100644
--- a/plugins/analysis-phonenumber/src/test/java/org/opensearch/analysis/phone/PhoneNumberAnalyzerTests.java
+++ b/plugins/analysis-phonenumber/src/test/java/org/opensearch/analysis/phone/PhoneNumberAnalyzerTests.java
@@ -159,11 +159,11 @@ public void testSipWithoutDomainPart() throws IOException {
     }
 
     public void testTelPrefix() throws IOException {
-        assertTokensInclude("tel:+1228", Arrays.asList("1228", "122", "228"));
+        assertTokensInclude(phoneAnalyzer, "tel:+1228", Arrays.asList("tel:+1228", "tel:", "1228", "122", "228"));
     }
 
     public void testTelPrefixSearch() throws IOException {
-        assertTokensInclude("tel:+1228", Arrays.asList("1228"));
+        assertTokensAreInAnyOrder(phoneSearchAnalyzer, "tel:+1228", Arrays.asList("tel:+1228", "1228"));
     }
 
     public void testNumberPrefix() throws IOException {

From f9c239d340423099699be52aa1594ef37e35005f Mon Sep 17 00:00:00 2001
From: Michael Froh <froh@amazon.com>
Date: Tue, 14 Jan 2025 15:53:51 -0800
Subject: [PATCH 44/61] Filter shards for sliced search at coordinator (#16771)

* Filter shards for sliced search at coordinator

Prior to this commit, a sliced search would fan out to every shard,
then apply a MatchNoDocsQuery filter on shards that don't correspond
to the current slice. This still creates a (useless) search context
on each shard for every slice, though. For a long-running sliced
scroll, this can quickly exhaust the number of available scroll
contexts.

This change avoids fanning out to all the shards by checking at the
coordinator if a shard is matched by the current slice. This should
reduce the number of open scroll contexts to max(numShards, numSlices)
instead of numShards * numSlices.

---------

Signed-off-by: Michael Froh <froh@amazon.com>
---
 CHANGELOG.md                                  |  1 +
 .../rest-api-spec/api/search_shards.json      |  3 +
 .../test/search_shards/20_slice.yml           | 88 +++++++++++++++++++
 .../shards/ClusterSearchShardsRequest.java    | 28 +++++-
 .../TransportClusterSearchShardsAction.java   |  2 +-
 ...TransportFieldCapabilitiesIndexAction.java |  3 +-
 .../action/search/TransportSearchAction.java  | 11 ++-
 .../cluster/routing/OperationRouting.java     | 39 ++++++--
 .../RestClusterSearchShardsAction.java        |  8 ++
 .../opensearch/search/slice/SliceBuilder.java | 39 ++++----
 .../search/TransportSearchActionTests.java    |  5 ++
 .../routing/OperationRoutingTests.java        | 38 ++++----
 12 files changed, 219 insertions(+), 46 deletions(-)
 create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/20_slice.yml

diff --git a/CHANGELOG.md b/CHANGELOG.md
index f1866ea07a352..9fd5efdc986d1 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -68,6 +68,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 ### Changed
 - Indexed IP field supports `terms_query` with more than 1025 IP masks [#16391](https://github.com/opensearch-project/OpenSearch/pull/16391)
 - Make entries for dependencies from server/build.gradle to gradle version catalog ([#16707](https://github.com/opensearch-project/OpenSearch/pull/16707))
+- Sliced search only fans out to shards matched by the selected slice, reducing open search contexts ([#16771](https://github.com/opensearch-project/OpenSearch/pull/16771))
 - Allow extended plugins to be optional ([#16909](https://github.com/opensearch-project/OpenSearch/pull/16909))
 - Use the correct type to widen the sort fields when merging top docs ([#16881](https://github.com/opensearch-project/OpenSearch/pull/16881))
 - Limit reader writer separation to remote store enabled clusters [#16760](https://github.com/opensearch-project/OpenSearch/pull/16760)
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search_shards.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search_shards.json
index 74b7055b4c4b0..9d3d420e8945c 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/api/search_shards.json
+++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search_shards.json
@@ -62,6 +62,9 @@
         "default":"open",
         "description":"Whether to expand wildcard expression to concrete indices that are open, closed or both."
       }
+    },
+    "body":{
+      "description":"The search source (in order to specify slice parameters)"
     }
   }
 }
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/20_slice.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/20_slice.yml
new file mode 100644
index 0000000000000..bf1a5429213df
--- /dev/null
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/20_slice.yml
@@ -0,0 +1,88 @@
+---
+"Search shards with slice specified in body":
+  - skip:
+      version: " - 2.99.99"
+      reason: "Added slice body to search_shards in 2.19"
+  - do:
+      indices.create:
+        index: test_index
+        body:
+          settings:
+            index:
+              number_of_shards: 7
+              number_of_replicas: 0
+
+  - do:
+      search_shards:
+        index: test_index
+        body:
+          slice:
+            id: 0
+            max: 3
+  - length: { shards: 3 }
+  - match: { shards.0.0.index: "test_index" }
+  - match: { shards.0.0.shard: 0 }
+  - match: { shards.1.0.shard: 3 }
+  - match: { shards.2.0.shard: 6 }
+
+  - do:
+      search_shards:
+        index: test_index
+        body:
+          slice:
+            id: 1
+            max: 3
+  - length: { shards: 2 }
+  - match: { shards.0.0.index: "test_index" }
+  - match: { shards.0.0.shard: 1 }
+  - match: { shards.1.0.shard: 4 }
+
+  - do:
+      search_shards:
+        index: test_index
+        body:
+          slice:
+            id: 2
+            max: 3
+  - length: { shards: 2 }
+  - match: { shards.0.0.index: "test_index" }
+  - match: { shards.0.0.shard: 2 }
+  - match: { shards.1.0.shard: 5 }
+
+
+  - do:
+      search_shards:
+        index: test_index
+        preference: "_shards:0,2,4,6"
+        body:
+          slice:
+            id: 0
+            max: 3
+  - length: { shards: 2 }
+  - match: { shards.0.0.index: "test_index" }
+  - match: { shards.0.0.shard: 0 }
+  - match: { shards.1.0.shard: 6 }
+
+  - do:
+      search_shards:
+        index: test_index
+        preference: "_shards:0,2,4,6"
+        body:
+          slice:
+            id: 1
+            max: 3
+  - length: { shards: 1 }
+  - match: { shards.0.0.index: "test_index" }
+  - match: { shards.0.0.shard: 2 }
+
+  - do:
+      search_shards:
+        index: test_index
+        preference: "_shards:0,2,4,6"
+        body:
+          slice:
+            id: 2
+            max: 3
+  - length: { shards: 1 }
+  - match: { shards.0.0.index: "test_index" }
+  - match: { shards.0.0.shard: 4 }
diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java
index 62e05ebb37e28..d4bf0efbd3eb5 100644
--- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java
+++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java
@@ -32,6 +32,7 @@
 
 package org.opensearch.action.admin.cluster.shards;
 
+import org.opensearch.Version;
 import org.opensearch.action.ActionRequestValidationException;
 import org.opensearch.action.IndicesRequest;
 import org.opensearch.action.support.IndicesOptions;
@@ -41,6 +42,7 @@
 import org.opensearch.core.common.Strings;
 import org.opensearch.core.common.io.stream.StreamInput;
 import org.opensearch.core.common.io.stream.StreamOutput;
+import org.opensearch.search.slice.SliceBuilder;
 
 import java.io.IOException;
 import java.util.Objects;
@@ -61,6 +63,8 @@ public class ClusterSearchShardsRequest extends ClusterManagerNodeReadRequest<Cl
     @Nullable
     private String preference;
     private IndicesOptions indicesOptions = IndicesOptions.lenientExpandOpen();
+    @Nullable
+    private SliceBuilder sliceBuilder;
 
     public ClusterSearchShardsRequest() {}
 
@@ -76,6 +80,12 @@ public ClusterSearchShardsRequest(StreamInput in) throws IOException {
         preference = in.readOptionalString();
 
         indicesOptions = IndicesOptions.readIndicesOptions(in);
+        if (in.getVersion().onOrAfter(Version.V_3_0_0)) {
+            boolean hasSlice = in.readBoolean();
+            if (hasSlice) {
+                sliceBuilder = new SliceBuilder(in);
+            }
+        }
     }
 
     @Override
@@ -84,8 +94,15 @@ public void writeTo(StreamOutput out) throws IOException {
         out.writeStringArray(indices);
         out.writeOptionalString(routing);
         out.writeOptionalString(preference);
-
         indicesOptions.writeIndicesOptions(out);
+        if (out.getVersion().onOrAfter(Version.V_3_0_0)) {
+            if (sliceBuilder != null) {
+                out.writeBoolean(true);
+                sliceBuilder.writeTo(out);
+            } else {
+                out.writeBoolean(false);
+            }
+        }
     }
 
     @Override
@@ -166,4 +183,13 @@ public ClusterSearchShardsRequest preference(String preference) {
     public String preference() {
         return this.preference;
     }
+
+    public ClusterSearchShardsRequest slice(SliceBuilder sliceBuilder) {
+        this.sliceBuilder = sliceBuilder;
+        return this;
+    }
+
+    public SliceBuilder slice() {
+        return this.sliceBuilder;
+    }
 }
diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java
index 83e104236f640..11323499efd8b 100644
--- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java
+++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java
@@ -133,7 +133,7 @@ protected void clusterManagerOperation(
 
         Set<String> nodeIds = new HashSet<>();
         GroupShardsIterator<ShardIterator> groupShardsIterator = clusterService.operationRouting()
-            .searchShards(clusterState, concreteIndices, routingMap, request.preference());
+            .searchShards(clusterState, concreteIndices, routingMap, request.preference(), null, null, request.slice());
         ShardRouting shard;
         ClusterSearchShardsGroup[] groupResponses = new ClusterSearchShardsGroup[groupShardsIterator.size()];
         int currentGroup = 0;
diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java b/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java
index 10bf4975311d6..52937182e6a63 100644
--- a/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java
+++ b/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java
@@ -247,8 +247,7 @@ private AsyncShardsAction(FieldCapabilitiesIndexRequest request, ActionListener<
                 throw blockException;
             }
 
-            shardsIt = clusterService.operationRouting()
-                .searchShards(clusterService.state(), new String[] { request.index() }, null, null, null, null);
+            shardsIt = clusterService.operationRouting().searchShards(clusterService.state(), new String[] { request.index() }, null, null);
         }
 
         public void start() {
diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java
index 8c4927afa9a14..dfec2e1fda738 100644
--- a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java
+++ b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java
@@ -85,6 +85,7 @@
 import org.opensearch.search.pipeline.SearchPipelineService;
 import org.opensearch.search.profile.ProfileShardResult;
 import org.opensearch.search.profile.SearchProfileShardResults;
+import org.opensearch.search.slice.SliceBuilder;
 import org.opensearch.tasks.CancellableTask;
 import org.opensearch.tasks.Task;
 import org.opensearch.tasks.TaskResourceTrackingService;
@@ -551,6 +552,7 @@ private ActionListener<SearchSourceBuilder> buildRewriteListener(
                     );
                 } else {
                     AtomicInteger skippedClusters = new AtomicInteger(0);
+                    SliceBuilder slice = searchRequest.source() == null ? null : searchRequest.source().slice();
                     collectSearchShards(
                         searchRequest.indicesOptions(),
                         searchRequest.preference(),
@@ -559,6 +561,7 @@ private ActionListener<SearchSourceBuilder> buildRewriteListener(
                         remoteClusterIndices,
                         remoteClusterService,
                         threadPool,
+                        slice,
                         ActionListener.wrap(searchShardsResponses -> {
                             final BiFunction<String, String, DiscoveryNode> clusterNodeLookup = getRemoteClusterNodeLookup(
                                 searchShardsResponses
@@ -787,6 +790,7 @@ static void collectSearchShards(
         Map<String, OriginalIndices> remoteIndicesByCluster,
         RemoteClusterService remoteClusterService,
         ThreadPool threadPool,
+        SliceBuilder slice,
         ActionListener<Map<String, ClusterSearchShardsResponse>> listener
     ) {
         final CountDown responsesCountDown = new CountDown(remoteIndicesByCluster.size());
@@ -800,7 +804,8 @@ static void collectSearchShards(
             ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest(indices).indicesOptions(indicesOptions)
                 .local(true)
                 .preference(preference)
-                .routing(routing);
+                .routing(routing)
+                .slice(slice);
             clusterClient.admin()
                 .cluster()
                 .searchShards(
@@ -1042,6 +1047,7 @@ private void executeSearch(
                 concreteLocalIndices[i] = indices[i].getName();
             }
             Map<String, Long> nodeSearchCounts = searchTransportService.getPendingSearchRequests();
+            SliceBuilder slice = searchRequest.source() == null ? null : searchRequest.source().slice();
             GroupShardsIterator<ShardIterator> localShardRoutings = clusterService.operationRouting()
                 .searchShards(
                     clusterState,
@@ -1049,7 +1055,8 @@ private void executeSearch(
                     routingMap,
                     searchRequest.preference(),
                     searchService.getResponseCollectorService(),
-                    nodeSearchCounts
+                    nodeSearchCounts,
+                    slice
                 );
             localShardIterators = StreamSupport.stream(localShardRoutings.spliterator(), false)
                 .map(it -> new SearchShardIterator(searchRequest.getLocalClusterAlias(), it.shardId(), it.getShardRoutings(), localIndices))
diff --git a/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java b/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java
index fe9e00b250e70..eac6f41acde4c 100644
--- a/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java
+++ b/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java
@@ -32,6 +32,7 @@
 
 package org.opensearch.cluster.routing;
 
+import org.apache.lucene.util.CollectionUtil;
 import org.opensearch.cluster.ClusterState;
 import org.opensearch.cluster.metadata.IndexMetadata;
 import org.opensearch.cluster.metadata.WeightedRoutingMetadata;
@@ -44,14 +45,17 @@
 import org.opensearch.common.settings.Settings;
 import org.opensearch.common.util.FeatureFlags;
 import org.opensearch.core.common.Strings;
+import org.opensearch.core.index.Index;
 import org.opensearch.core.index.shard.ShardId;
 import org.opensearch.index.IndexModule;
 import org.opensearch.index.IndexNotFoundException;
 import org.opensearch.node.ResponseCollectorService;
+import org.opensearch.search.slice.SliceBuilder;
 
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
@@ -230,7 +234,7 @@ public GroupShardsIterator<ShardIterator> searchShards(
         @Nullable Map<String, Set<String>> routing,
         @Nullable String preference
     ) {
-        return searchShards(clusterState, concreteIndices, routing, preference, null, null);
+        return searchShards(clusterState, concreteIndices, routing, preference, null, null, null);
     }
 
     public GroupShardsIterator<ShardIterator> searchShards(
@@ -239,11 +243,14 @@ public GroupShardsIterator<ShardIterator> searchShards(
         @Nullable Map<String, Set<String>> routing,
         @Nullable String preference,
         @Nullable ResponseCollectorService collectorService,
-        @Nullable Map<String, Long> nodeCounts
+        @Nullable Map<String, Long> nodeCounts,
+        @Nullable SliceBuilder slice
     ) {
         final Set<IndexShardRoutingTable> shards = computeTargetedShards(clusterState, concreteIndices, routing);
-        final Set<ShardIterator> set = new HashSet<>(shards.size());
+
+        Map<Index, List<ShardIterator>> shardIterators = new HashMap<>();
         for (IndexShardRoutingTable shard : shards) {
+
             IndexMetadata indexMetadataForShard = indexMetadata(clusterState, shard.shardId.getIndex().getName());
             if (indexMetadataForShard.isRemoteSnapshot() && (preference == null || preference.isEmpty())) {
                 preference = Preference.PRIMARY.type();
@@ -274,10 +281,31 @@ public GroupShardsIterator<ShardIterator> searchShards(
                 clusterState.metadata().weightedRoutingMetadata()
             );
             if (iterator != null) {
-                set.add(iterator);
+                shardIterators.computeIfAbsent(iterator.shardId().getIndex(), k -> new ArrayList<>()).add(iterator);
+            }
+        }
+        List<ShardIterator> allShardIterators = new ArrayList<>();
+        if (slice != null) {
+            for (List<ShardIterator> indexIterators : shardIterators.values()) {
+                // Filter the returned shards for the given slice
+                CollectionUtil.timSort(indexIterators);
+                // We use the ordinal of the iterator in the group (after sorting) rather than the shard id, because
+                // computeTargetedShards may return a subset of shards for an index, if a routing parameter was
+                // specified. In that case, the set of routable shards is considered the full universe of available
+                // shards for each index, when mapping shards to slices. If no routing parameter was specified,
+                // then ordinals and shard IDs are the same. This mimics the logic in
+                // org.opensearch.search.slice.SliceBuilder.toFilter.
+                for (int i = 0; i < indexIterators.size(); i++) {
+                    if (slice.shardMatches(i, indexIterators.size())) {
+                        allShardIterators.add(indexIterators.get(i));
+                    }
+                }
             }
+        } else {
+            shardIterators.values().forEach(allShardIterators::addAll);
         }
-        return GroupShardsIterator.sortAndCreate(new ArrayList<>(set));
+
+        return GroupShardsIterator.sortAndCreate(allShardIterators);
     }
 
     public static ShardIterator getShards(ClusterState clusterState, ShardId shardId) {
@@ -311,6 +339,7 @@ private Set<IndexShardRoutingTable> computeTargetedShards(
                     set.add(indexShard);
                 }
             }
+
         }
         return set;
     }
diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterSearchShardsAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterSearchShardsAction.java
index 3555576433104..304d1cabefd35 100644
--- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterSearchShardsAction.java
+++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterSearchShardsAction.java
@@ -40,6 +40,7 @@
 import org.opensearch.rest.BaseRestHandler;
 import org.opensearch.rest.RestRequest;
 import org.opensearch.rest.action.RestToXContentListener;
+import org.opensearch.search.builder.SearchSourceBuilder;
 
 import java.io.IOException;
 import java.util.List;
@@ -81,6 +82,13 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC
         clusterSearchShardsRequest.routing(request.param("routing"));
         clusterSearchShardsRequest.preference(request.param("preference"));
         clusterSearchShardsRequest.indicesOptions(IndicesOptions.fromRequest(request, clusterSearchShardsRequest.indicesOptions()));
+        if (request.hasContentOrSourceParam()) {
+            SearchSourceBuilder sourceBuilder = new SearchSourceBuilder();
+            sourceBuilder.parseXContent(request.contentOrSourceParamParser());
+            if (sourceBuilder.slice() != null) {
+                clusterSearchShardsRequest.slice(sourceBuilder.slice());
+            }
+        }
         return channel -> client.admin().cluster().searchShards(clusterSearchShardsRequest, new RestToXContentListener<>(channel));
     }
 }
diff --git a/server/src/main/java/org/opensearch/search/slice/SliceBuilder.java b/server/src/main/java/org/opensearch/search/slice/SliceBuilder.java
index c9b8a896ed525..691b829578e1f 100644
--- a/server/src/main/java/org/opensearch/search/slice/SliceBuilder.java
+++ b/server/src/main/java/org/opensearch/search/slice/SliceBuilder.java
@@ -214,6 +214,15 @@ public int hashCode() {
         return Objects.hash(this.field, this.id, this.max);
     }
 
+    public boolean shardMatches(int shardOrdinal, int numShards) {
+        if (max >= numShards) {
+            // Slices are distributed over shards
+            return id % numShards == shardOrdinal;
+        }
+        // Shards are distributed over slices
+        return shardOrdinal % max == id;
+    }
+
     /**
      * Converts this QueryBuilder to a lucene {@link Query}.
      *
@@ -225,7 +234,7 @@ public Query toFilter(ClusterService clusterService, ShardSearchRequest request,
             throw new IllegalArgumentException("field " + field + " not found");
         }
 
-        int shardId = request.shardId().id();
+        int shardOrdinal = request.shardId().id();
         int numShards = context.getIndexSettings().getNumberOfShards();
         if ((request.preference() != null || request.indexRoutings().length > 0)) {
             GroupShardsIterator<ShardIterator> group = buildShardIterator(clusterService, request);
@@ -241,21 +250,26 @@ public Query toFilter(ClusterService clusterService, ShardSearchRequest request,
                  */
                 numShards = group.size();
                 int ord = 0;
-                shardId = -1;
+                shardOrdinal = -1;
                 // remap the original shard id with its index (position) in the sorted shard iterator.
                 for (ShardIterator it : group) {
                     assert it.shardId().getIndex().equals(request.shardId().getIndex());
                     if (request.shardId().equals(it.shardId())) {
-                        shardId = ord;
+                        shardOrdinal = ord;
                         break;
                     }
                     ++ord;
                 }
-                assert shardId != -1 : "shard id: " + request.shardId().getId() + " not found in index shard routing";
+                assert shardOrdinal != -1 : "shard id: " + request.shardId().getId() + " not found in index shard routing";
             }
         }
 
-        String field = this.field;
+        if (shardMatches(shardOrdinal, numShards) == false) {
+            // We should have already excluded this shard before routing to it.
+            // If we somehow land here, then we match nothing.
+            return new MatchNoDocsQuery("this shard is not part of the slice");
+        }
+
         boolean useTermQuery = false;
         if ("_uid".equals(field)) {
             throw new IllegalArgumentException("Computing slices on the [_uid] field is illegal for 7.x indices, use [_id] instead");
@@ -277,12 +291,7 @@ public Query toFilter(ClusterService clusterService, ShardSearchRequest request,
             // the number of slices is greater than the number of shards
             // in such case we can reduce the number of requested shards by slice
 
-            // first we check if the slice is responsible of this shard
             int targetShard = id % numShards;
-            if (targetShard != shardId) {
-                // the shard is not part of this slice, we can skip it.
-                return new MatchNoDocsQuery("this shard is not part of the slice");
-            }
             // compute the number of slices where this shard appears
             int numSlicesInShard = max / numShards;
             int rest = max % numShards;
@@ -301,14 +310,8 @@ public Query toFilter(ClusterService clusterService, ShardSearchRequest request,
                 ? new TermsSliceQuery(field, shardSlice, numSlicesInShard)
                 : new DocValuesSliceQuery(field, shardSlice, numSlicesInShard);
         }
-        // the number of shards is greater than the number of slices
+        // the number of shards is greater than the number of slices. If we target this shard, we target all of it.
 
-        // check if the shard is assigned to the slice
-        int targetSlice = shardId % max;
-        if (id != targetSlice) {
-            // the shard is not part of this slice, we can skip it.
-            return new MatchNoDocsQuery("this shard is not part of the slice");
-        }
         return new MatchAllDocsQuery();
     }
 
@@ -321,6 +324,8 @@ private GroupShardsIterator<ShardIterator> buildShardIterator(ClusterService clu
         Map<String, Set<String>> routingMap = request.indexRoutings().length > 0
             ? Collections.singletonMap(indices[0], Sets.newHashSet(request.indexRoutings()))
             : null;
+        // Note that we do *not* want to filter this set of shard IDs based on the slice, since we want the
+        // full set of shards matched by the routing parameters.
         return clusterService.operationRouting().searchShards(state, indices, routingMap, request.preference());
     }
 
diff --git a/server/src/test/java/org/opensearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/opensearch/action/search/TransportSearchActionTests.java
index 84955d01a59ce..0a0015ae8cbf6 100644
--- a/server/src/test/java/org/opensearch/action/search/TransportSearchActionTests.java
+++ b/server/src/test/java/org/opensearch/action/search/TransportSearchActionTests.java
@@ -809,6 +809,7 @@ public void testCollectSearchShards() throws Exception {
                     remoteIndicesByCluster,
                     remoteClusterService,
                     threadPool,
+                    null,
                     new LatchedActionListener<>(ActionListener.wrap(response::set, e -> fail("no failures expected")), latch)
                 );
                 awaitLatch(latch, 5, TimeUnit.SECONDS);
@@ -835,6 +836,7 @@ public void testCollectSearchShards() throws Exception {
                     remoteIndicesByCluster,
                     remoteClusterService,
                     threadPool,
+                    null,
                     new LatchedActionListener<>(ActionListener.wrap(r -> fail("no response expected"), failure::set), latch)
                 );
                 awaitLatch(latch, 5, TimeUnit.SECONDS);
@@ -880,6 +882,7 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti
                     remoteIndicesByCluster,
                     remoteClusterService,
                     threadPool,
+                    null,
                     new LatchedActionListener<>(ActionListener.wrap(r -> fail("no response expected"), failure::set), latch)
                 );
                 awaitLatch(latch, 5, TimeUnit.SECONDS);
@@ -907,6 +910,7 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti
                     remoteIndicesByCluster,
                     remoteClusterService,
                     threadPool,
+                    null,
                     new LatchedActionListener<>(ActionListener.wrap(response::set, e -> fail("no failures expected")), latch)
                 );
                 awaitLatch(latch, 5, TimeUnit.SECONDS);
@@ -949,6 +953,7 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti
                     remoteIndicesByCluster,
                     remoteClusterService,
                     threadPool,
+                    null,
                     new LatchedActionListener<>(ActionListener.wrap(response::set, e -> fail("no failures expected")), latch)
                 );
                 awaitLatch(latch, 5, TimeUnit.SECONDS);
diff --git a/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java
index aaeeb52ab5709..4263e1aa347dc 100644
--- a/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java
+++ b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java
@@ -604,7 +604,8 @@ public void testAdaptiveReplicaSelection() throws Exception {
             null,
             null,
             collector,
-            outstandingRequests
+            outstandingRequests,
+            null
         );
 
         assertThat("One group per index shard", groupIterator.size(), equalTo(numIndices * numShards));
@@ -616,7 +617,7 @@ public void testAdaptiveReplicaSelection() throws Exception {
         searchedShards.add(firstChoice);
         selectedNodes.add(firstChoice.currentNodeId());
 
-        groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests);
+        groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests, null);
 
         assertThat(groupIterator.size(), equalTo(numIndices * numShards));
         ShardRouting secondChoice = groupIterator.get(0).nextOrNull();
@@ -624,7 +625,7 @@ public void testAdaptiveReplicaSelection() throws Exception {
         searchedShards.add(secondChoice);
         selectedNodes.add(secondChoice.currentNodeId());
 
-        groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests);
+        groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests, null);
 
         assertThat(groupIterator.size(), equalTo(numIndices * numShards));
         ShardRouting thirdChoice = groupIterator.get(0).nextOrNull();
@@ -643,26 +644,26 @@ public void testAdaptiveReplicaSelection() throws Exception {
         outstandingRequests.put("node_1", 1L);
         outstandingRequests.put("node_2", 1L);
 
-        groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests);
+        groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests, null);
         ShardRouting shardChoice = groupIterator.get(0).nextOrNull();
         // node 1 should be the lowest ranked node to start
         assertThat(shardChoice.currentNodeId(), equalTo("node_1"));
 
         // node 1 starts getting more loaded...
         collector.addNodeStatistics("node_1", 2, TimeValue.timeValueMillis(200).nanos(), TimeValue.timeValueMillis(150).nanos());
-        groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests);
+        groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests, null);
         shardChoice = groupIterator.get(0).nextOrNull();
         assertThat(shardChoice.currentNodeId(), equalTo("node_1"));
 
         // and more loaded...
         collector.addNodeStatistics("node_1", 3, TimeValue.timeValueMillis(250).nanos(), TimeValue.timeValueMillis(200).nanos());
-        groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests);
+        groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests, null);
         shardChoice = groupIterator.get(0).nextOrNull();
         assertThat(shardChoice.currentNodeId(), equalTo("node_1"));
 
         // and even more
         collector.addNodeStatistics("node_1", 4, TimeValue.timeValueMillis(300).nanos(), TimeValue.timeValueMillis(250).nanos());
-        groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests);
+        groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests, null);
         shardChoice = groupIterator.get(0).nextOrNull();
         // finally, node 2 is chosen instead
         assertThat(shardChoice.currentNodeId(), equalTo("node_2"));
@@ -709,7 +710,8 @@ public void testAdaptiveReplicaSelectionWithZoneAwarenessIgnored() throws Except
             null,
             null,
             collector,
-            outstandingRequests
+            outstandingRequests,
+            null
         );
         assertThat("One group per index shard", groupIterator.size(), equalTo(numIndices * numShards));
 
@@ -722,7 +724,7 @@ public void testAdaptiveReplicaSelectionWithZoneAwarenessIgnored() throws Except
         searchedShards.add(firstChoice);
         selectedNodes.add(firstChoice.currentNodeId());
 
-        groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests);
+        groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests, null);
 
         assertThat(groupIterator.size(), equalTo(numIndices * numShards));
         assertThat(groupIterator.get(0).size(), equalTo(numReplicas + 1));
@@ -745,18 +747,18 @@ public void testAdaptiveReplicaSelectionWithZoneAwarenessIgnored() throws Except
         outstandingRequests.put("node_a1", 1L);
         outstandingRequests.put("node_b2", 1L);
 
-        groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests);
+        groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests, null);
         // node_a0 or node_a1 should be the lowest ranked node to start
         groupIterator.forEach(shardRoutings -> assertThat(shardRoutings.nextOrNull().currentNodeId(), containsString("node_a")));
 
         // Adding more load to node_a0
         collector.addNodeStatistics("node_a0", 10, TimeValue.timeValueMillis(200).nanos(), TimeValue.timeValueMillis(150).nanos());
-        groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests);
+        groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests, null);
 
         // Adding more load to node_a0 and node_a1 from zone-a
         collector.addNodeStatistics("node_a1", 100, TimeValue.timeValueMillis(300).nanos(), TimeValue.timeValueMillis(250).nanos());
         collector.addNodeStatistics("node_a0", 100, TimeValue.timeValueMillis(300).nanos(), TimeValue.timeValueMillis(250).nanos());
-        groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests);
+        groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests, null);
         // ARS should pick node_b2 from zone-b since both node_a0 and node_a1 are overloaded
         groupIterator.forEach(shardRoutings -> assertThat(shardRoutings.nextOrNull().currentNodeId(), containsString("node_b")));
 
@@ -842,8 +844,8 @@ public void testWeightedOperationRouting() throws Exception {
                 null,
                 null,
                 collector,
-                outstandingRequests
-
+                outstandingRequests,
+                null
             );
 
             for (ShardIterator it : groupIterator) {
@@ -871,7 +873,7 @@ public void testWeightedOperationRouting() throws Exception {
             opRouting = new OperationRouting(setting, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS));
 
             // search shards call
-            groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests);
+            groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests, null);
 
             for (ShardIterator it : groupIterator) {
                 List<ShardRouting> shardRoutings = Collections.singletonList(it.nextOrNull());
@@ -935,8 +937,8 @@ public void testWeightedOperationRoutingWeightUndefinedForOneZone() throws Excep
                 null,
                 null,
                 collector,
-                outstandingRequests
-
+                outstandingRequests,
+                null
             );
 
             for (ShardIterator it : groupIterator) {
@@ -969,7 +971,7 @@ public void testWeightedOperationRoutingWeightUndefinedForOneZone() throws Excep
             opRouting = new OperationRouting(setting, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS));
 
             // search shards call
-            groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests);
+            groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests, null);
 
             for (ShardIterator it : groupIterator) {
                 while (it.remaining() > 0) {

From fa4595cf853f2f55b6a4ffc9f653330f6a25688d Mon Sep 17 00:00:00 2001
From: kkewwei <kewei.11@bytedance.com>
Date: Wed, 15 Jan 2025 23:25:55 +0800
Subject: [PATCH 45/61] Upgrade HttpCore5/HttpClient5 to support
 ExtendedSocketOption in HttpAsyncClient (#16757)

* upgrade httpcore5/httpclient5 to support ExtendedSocketOption in HttpAsyncClient

Signed-off-by: kkewwei <kewei.11@bytedance.com>
Signed-off-by: kkewwei <kkewwei@163.com>

* Use the Upgrade flow by default

Signed-off-by: Andriy Redko <andriy.redko@aiven.io>

* Update Reactor Netty to 1.1.26.Final

Signed-off-by: Andriy Redko <andriy.redko@aiven.io>

* Add SETTING_H2C_MAX_CONTENT_LENGTH to configure h2cMaxContentLength for reactor-netty4 transport

Signed-off-by: Andriy Redko <andriy.redko@aiven.io>

* Update Apache HttpCore5 to 5.3.2

Signed-off-by: Andriy Redko <andriy.redko@aiven.io>

---------

Signed-off-by: kkewwei <kewei.11@bytedance.com>
Signed-off-by: kkewwei <kkewwei@163.com>
Signed-off-by: Andriy Redko <andriy.redko@aiven.io>
Co-authored-by: Andriy Redko <andriy.redko@aiven.io>
---
 CHANGELOG-3.0.md                               |  1 +
 .../org/opensearch/bootstrap/test.policy       | 11 +++++++++++
 .../rest/licenses/httpclient5-5.3.1.jar.sha1   |  1 -
 .../rest/licenses/httpclient5-5.4.1.jar.sha1   |  1 +
 client/rest/licenses/httpcore5-5.2.5.jar.sha1  |  1 -
 client/rest/licenses/httpcore5-5.3.2.jar.sha1  |  1 +
 .../rest/licenses/httpcore5-h2-5.2.5.jar.sha1  |  1 -
 .../rest/licenses/httpcore5-h2-5.3.2.jar.sha1  |  1 +
 .../licenses/httpcore5-reactive-5.2.5.jar.sha1 |  1 -
 .../licenses/httpcore5-reactive-5.3.2.jar.sha1 |  1 +
 .../client/RestClientBuilderTests.java         |  7 +++++++
 .../client/RestClientSingleHostIntegTests.java | 10 +++++++++-
 .../documentation/RestClientDocumentation.java | 18 ++++++++++++++++++
 .../licenses/httpclient5-5.3.1.jar.sha1        |  1 -
 .../licenses/httpclient5-5.4.1.jar.sha1        |  1 +
 .../sniffer/licenses/httpcore5-5.2.5.jar.sha1  |  1 -
 .../sniffer/licenses/httpcore5-5.3.2.jar.sha1  |  1 +
 gradle/libs.versions.toml                      |  6 +++---
 .../reactor-netty-core-1.1.23.jar.sha1         |  1 -
 .../reactor-netty-core-1.1.26.jar.sha1         |  1 +
 .../reactor-netty-http-1.1.23.jar.sha1         |  1 -
 .../reactor-netty-http-1.1.26.jar.sha1         |  1 +
 .../reactor-netty-core-1.1.23.jar.sha1         |  1 -
 .../reactor-netty-core-1.1.26.jar.sha1         |  1 +
 .../reactor-netty-http-1.1.23.jar.sha1         |  1 -
 .../reactor-netty-http-1.1.26.jar.sha1         |  1 +
 .../ReactorNetty4HttpServerTransport.java      | 17 +++++++++++++++++
 .../transport/reactor/ReactorNetty4Plugin.java |  2 +-
 .../opensearch/bootstrap/test-framework.policy |  2 +-
 29 files changed, 78 insertions(+), 16 deletions(-)
 create mode 100644 client/rest-high-level/src/test/resources/org/opensearch/bootstrap/test.policy
 delete mode 100644 client/rest/licenses/httpclient5-5.3.1.jar.sha1
 create mode 100644 client/rest/licenses/httpclient5-5.4.1.jar.sha1
 delete mode 100644 client/rest/licenses/httpcore5-5.2.5.jar.sha1
 create mode 100644 client/rest/licenses/httpcore5-5.3.2.jar.sha1
 delete mode 100644 client/rest/licenses/httpcore5-h2-5.2.5.jar.sha1
 create mode 100644 client/rest/licenses/httpcore5-h2-5.3.2.jar.sha1
 delete mode 100644 client/rest/licenses/httpcore5-reactive-5.2.5.jar.sha1
 create mode 100644 client/rest/licenses/httpcore5-reactive-5.3.2.jar.sha1
 delete mode 100644 client/sniffer/licenses/httpclient5-5.3.1.jar.sha1
 create mode 100644 client/sniffer/licenses/httpclient5-5.4.1.jar.sha1
 delete mode 100644 client/sniffer/licenses/httpcore5-5.2.5.jar.sha1
 create mode 100644 client/sniffer/licenses/httpcore5-5.3.2.jar.sha1
 delete mode 100644 plugins/repository-azure/licenses/reactor-netty-core-1.1.23.jar.sha1
 create mode 100644 plugins/repository-azure/licenses/reactor-netty-core-1.1.26.jar.sha1
 delete mode 100644 plugins/repository-azure/licenses/reactor-netty-http-1.1.23.jar.sha1
 create mode 100644 plugins/repository-azure/licenses/reactor-netty-http-1.1.26.jar.sha1
 delete mode 100644 plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.23.jar.sha1
 create mode 100644 plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.26.jar.sha1
 delete mode 100644 plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.23.jar.sha1
 create mode 100644 plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.26.jar.sha1

diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md
index 48d978bede420..fddead96aaf45 100644
--- a/CHANGELOG-3.0.md
+++ b/CHANGELOG-3.0.md
@@ -15,6 +15,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 - Views, simplify data access and manipulation by providing a virtual layer over one or more indices ([#11957](https://github.com/opensearch-project/OpenSearch/pull/11957))
 
 ### Dependencies
+- Bump Apache HttpCore5/HttpClient5 dependencies from 5.2.5/5.3.1 to 5.3.1/5.4.1 to support ExtendedSocketOption in HttpAsyncClient ([#16757](https://github.com/opensearch-project/OpenSearch/pull/16757))
 
 ### Changed
 - Changed locale provider from COMPAT to CLDR  ([#14345](https://github.com/opensearch-project/OpenSearch/pull/14345))
diff --git a/client/rest-high-level/src/test/resources/org/opensearch/bootstrap/test.policy b/client/rest-high-level/src/test/resources/org/opensearch/bootstrap/test.policy
new file mode 100644
index 0000000000000..2604c2492d8ab
--- /dev/null
+++ b/client/rest-high-level/src/test/resources/org/opensearch/bootstrap/test.policy
@@ -0,0 +1,11 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+grant {
+  permission java.net.SocketPermission "*", "connect,resolve";
+};
diff --git a/client/rest/licenses/httpclient5-5.3.1.jar.sha1 b/client/rest/licenses/httpclient5-5.3.1.jar.sha1
deleted file mode 100644
index c8f32c1ec23a1..0000000000000
--- a/client/rest/licenses/httpclient5-5.3.1.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-56b53c8f4bcdaada801d311cf2ff8a24d6d96883
\ No newline at end of file
diff --git a/client/rest/licenses/httpclient5-5.4.1.jar.sha1 b/client/rest/licenses/httpclient5-5.4.1.jar.sha1
new file mode 100644
index 0000000000000..40156e9a42620
--- /dev/null
+++ b/client/rest/licenses/httpclient5-5.4.1.jar.sha1
@@ -0,0 +1 @@
+ce913081e592ee8eeee35c4e577d7dce13cba7a4
\ No newline at end of file
diff --git a/client/rest/licenses/httpcore5-5.2.5.jar.sha1 b/client/rest/licenses/httpcore5-5.2.5.jar.sha1
deleted file mode 100644
index ca97e8612ea39..0000000000000
--- a/client/rest/licenses/httpcore5-5.2.5.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-dab1e18842971a45ca8942491ce005ab86a028d7
\ No newline at end of file
diff --git a/client/rest/licenses/httpcore5-5.3.2.jar.sha1 b/client/rest/licenses/httpcore5-5.3.2.jar.sha1
new file mode 100644
index 0000000000000..44c13325b5647
--- /dev/null
+++ b/client/rest/licenses/httpcore5-5.3.2.jar.sha1
@@ -0,0 +1 @@
+35d387301d4a719972b15fbe863020da5f913c22
\ No newline at end of file
diff --git a/client/rest/licenses/httpcore5-h2-5.2.5.jar.sha1 b/client/rest/licenses/httpcore5-h2-5.2.5.jar.sha1
deleted file mode 100644
index bb40fe65854f6..0000000000000
--- a/client/rest/licenses/httpcore5-h2-5.2.5.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-09425df4d1365cee86a8e031a036bdca4343da4b
\ No newline at end of file
diff --git a/client/rest/licenses/httpcore5-h2-5.3.2.jar.sha1 b/client/rest/licenses/httpcore5-h2-5.3.2.jar.sha1
new file mode 100644
index 0000000000000..67c92d8fea09c
--- /dev/null
+++ b/client/rest/licenses/httpcore5-h2-5.3.2.jar.sha1
@@ -0,0 +1 @@
+d908a946e9161511accdc739e443b1e0b0cbba82
\ No newline at end of file
diff --git a/client/rest/licenses/httpcore5-reactive-5.2.5.jar.sha1 b/client/rest/licenses/httpcore5-reactive-5.2.5.jar.sha1
deleted file mode 100644
index ab9241fc93d45..0000000000000
--- a/client/rest/licenses/httpcore5-reactive-5.2.5.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-f68949965075b957c12b4c1ef89fd4bab2a0fdb1
\ No newline at end of file
diff --git a/client/rest/licenses/httpcore5-reactive-5.3.2.jar.sha1 b/client/rest/licenses/httpcore5-reactive-5.3.2.jar.sha1
new file mode 100644
index 0000000000000..345d71cb206ae
--- /dev/null
+++ b/client/rest/licenses/httpcore5-reactive-5.3.2.jar.sha1
@@ -0,0 +1 @@
+9ee35ef1d3e40855695fc87ad2e31192d85c1e88
\ No newline at end of file
diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientBuilderTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientBuilderTests.java
index 7165174e688e1..c9ad10a476f74 100644
--- a/client/rest/src/test/java/org/opensearch/client/RestClientBuilderTests.java
+++ b/client/rest/src/test/java/org/opensearch/client/RestClientBuilderTests.java
@@ -37,6 +37,7 @@
 import org.apache.hc.core5.http.Header;
 import org.apache.hc.core5.http.HttpHost;
 import org.apache.hc.core5.http.message.BasicHeader;
+import org.apache.hc.core5.reactor.IOReactorConfig;
 import org.apache.hc.core5.util.Timeout;
 
 import java.io.IOException;
@@ -143,6 +144,12 @@ public void testBuild() throws IOException {
             builder.setHttpClientConfigCallback(new RestClientBuilder.HttpClientConfigCallback() {
                 @Override
                 public HttpAsyncClientBuilder customizeHttpClient(HttpAsyncClientBuilder httpClientBuilder) {
+                    IOReactorConfig.Builder iOReactorConfig = IOReactorConfig.custom();
+                    iOReactorConfig.setTcpKeepCount(randomIntBetween(4, 10));
+                    iOReactorConfig.setTcpKeepInterval(randomIntBetween(5, 10));
+                    iOReactorConfig.setTcpKeepIdle(randomIntBetween(100, 200));
+                    iOReactorConfig.setIoThreadCount(2);
+                    httpClientBuilder.setIOReactorConfig(iOReactorConfig.build());
                     return httpClientBuilder;
                 }
             });
diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostIntegTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostIntegTests.java
index de04dd843b2db..84f6e7c8beb2e 100644
--- a/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostIntegTests.java
+++ b/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostIntegTests.java
@@ -382,6 +382,10 @@ public void testHeaders() throws Exception {
             if (method.equals("HEAD") == false) {
                 standardHeaders.add("Content-length");
             }
+            if (method.equals("HEAD") == true || method.equals("GET") == true || method.equals("OPTIONS") == true) {
+                standardHeaders.add("Upgrade");
+            }
+
             final Header[] requestHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header");
             final int statusCode = randomStatusCode(getRandom());
             Request request = new Request(method, "/" + statusCode);
@@ -400,11 +404,15 @@ public void testHeaders() throws Exception {
             assertEquals(method, esResponse.getRequestLine().getMethod());
             assertEquals(statusCode, esResponse.getStatusLine().getStatusCode());
             assertEquals(pathPrefix + "/" + statusCode, esResponse.getRequestLine().getUri());
+
             assertHeaders(defaultHeaders, requestHeaders, esResponse.getHeaders(), standardHeaders);
+            final Set<String> removedHeaders = new HashSet<>();
             for (final Header responseHeader : esResponse.getHeaders()) {
                 String name = responseHeader.getName();
-                if (name.startsWith("Header") == false) {
+                // Some headers could be returned multiple times in response, like Connection fe.
+                if (name.startsWith("Header") == false && removedHeaders.contains(name) == false) {
                     assertTrue("unknown header was returned " + name, standardHeaders.remove(name));
+                    removedHeaders.add(name);
                 }
             }
             assertTrue("some expected standard headers weren't returned: " + standardHeaders, standardHeaders.isEmpty());
diff --git a/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java b/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java
index 42c31864e0578..d9c82307cae8a 100644
--- a/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java
+++ b/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java
@@ -376,6 +376,24 @@ public HttpAsyncClientBuilder customizeHttpClient(
                 });
             //end::rest-client-config-threads
         }
+        {
+            //tag::rest-client-config-tcpKeepIdle/tcpKeepInterval/tcpKeepCount
+            RestClientBuilder builder = RestClient.builder(
+                    new HttpHost("localhost", 9200))
+                    .setHttpClientConfigCallback(new HttpClientConfigCallback() {
+                        @Override
+                        public HttpAsyncClientBuilder customizeHttpClient(
+                                HttpAsyncClientBuilder httpClientBuilder) {
+                            return httpClientBuilder.setIOReactorConfig(
+                                    IOReactorConfig.custom()
+                                            .setTcpKeepIdle(200)
+                                            .setTcpKeepInterval(10)
+                                            .setTcpKeepCount(10)
+                                            .build());
+                        }
+                    });
+            //end::rest-client-config-tcpKeepIdle/tcpKeepInterval/tcpKeepCount
+        }
         {
             //tag::rest-client-config-basic-auth
             final BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider();
diff --git a/client/sniffer/licenses/httpclient5-5.3.1.jar.sha1 b/client/sniffer/licenses/httpclient5-5.3.1.jar.sha1
deleted file mode 100644
index c8f32c1ec23a1..0000000000000
--- a/client/sniffer/licenses/httpclient5-5.3.1.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-56b53c8f4bcdaada801d311cf2ff8a24d6d96883
\ No newline at end of file
diff --git a/client/sniffer/licenses/httpclient5-5.4.1.jar.sha1 b/client/sniffer/licenses/httpclient5-5.4.1.jar.sha1
new file mode 100644
index 0000000000000..40156e9a42620
--- /dev/null
+++ b/client/sniffer/licenses/httpclient5-5.4.1.jar.sha1
@@ -0,0 +1 @@
+ce913081e592ee8eeee35c4e577d7dce13cba7a4
\ No newline at end of file
diff --git a/client/sniffer/licenses/httpcore5-5.2.5.jar.sha1 b/client/sniffer/licenses/httpcore5-5.2.5.jar.sha1
deleted file mode 100644
index ca97e8612ea39..0000000000000
--- a/client/sniffer/licenses/httpcore5-5.2.5.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-dab1e18842971a45ca8942491ce005ab86a028d7
\ No newline at end of file
diff --git a/client/sniffer/licenses/httpcore5-5.3.2.jar.sha1 b/client/sniffer/licenses/httpcore5-5.3.2.jar.sha1
new file mode 100644
index 0000000000000..44c13325b5647
--- /dev/null
+++ b/client/sniffer/licenses/httpcore5-5.3.2.jar.sha1
@@ -0,0 +1 @@
+35d387301d4a719972b15fbe863020da5f913c22
\ No newline at end of file
diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml
index 1cd2f8d87e1d4..96f6178295f00 100644
--- a/gradle/libs.versions.toml
+++ b/gradle/libs.versions.toml
@@ -37,12 +37,12 @@ joda              = "2.12.7"
 roaringbitmap     = "1.3.0"
 
 # project reactor
-reactor_netty     = "1.1.23"
+reactor_netty     = "1.1.26"
 reactor           = "3.5.20"
 
 # client dependencies
-httpclient5       = "5.3.1"
-httpcore5         = "5.2.5"
+httpclient5       = "5.4.1"
+httpcore5         = "5.3.2"
 httpclient        = "4.5.14"
 httpcore          = "4.4.16"
 httpasyncclient   = "4.1.5"
diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.23.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.23.jar.sha1
deleted file mode 100644
index 8f56bb5165fa3..0000000000000
--- a/plugins/repository-azure/licenses/reactor-netty-core-1.1.23.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-a7059b0c18ab7aa0fa9e08b48cb6a20b15c11478
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.26.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.26.jar.sha1
new file mode 100644
index 0000000000000..e64cc3645514f
--- /dev/null
+++ b/plugins/repository-azure/licenses/reactor-netty-core-1.1.26.jar.sha1
@@ -0,0 +1 @@
+05a8c6004161a4c1a9c0639b05387baab6efaa32
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.23.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.23.jar.sha1
deleted file mode 100644
index 5bb3136f99e93..0000000000000
--- a/plugins/repository-azure/licenses/reactor-netty-http-1.1.23.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-94b294fa90aee2e88ad4337251e278aaac21362c
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.26.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.26.jar.sha1
new file mode 100644
index 0000000000000..035d2fb1c4c4c
--- /dev/null
+++ b/plugins/repository-azure/licenses/reactor-netty-http-1.1.26.jar.sha1
@@ -0,0 +1 @@
+41682e517e2808fc469d6b2b85fea48d0a7fe73b
\ No newline at end of file
diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.23.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.23.jar.sha1
deleted file mode 100644
index 8f56bb5165fa3..0000000000000
--- a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.23.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-a7059b0c18ab7aa0fa9e08b48cb6a20b15c11478
\ No newline at end of file
diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.26.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.26.jar.sha1
new file mode 100644
index 0000000000000..e64cc3645514f
--- /dev/null
+++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.26.jar.sha1
@@ -0,0 +1 @@
+05a8c6004161a4c1a9c0639b05387baab6efaa32
\ No newline at end of file
diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.23.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.23.jar.sha1
deleted file mode 100644
index 5bb3136f99e93..0000000000000
--- a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.23.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-94b294fa90aee2e88ad4337251e278aaac21362c
\ No newline at end of file
diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.26.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.26.jar.sha1
new file mode 100644
index 0000000000000..035d2fb1c4c4c
--- /dev/null
+++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.26.jar.sha1
@@ -0,0 +1 @@
+41682e517e2808fc469d6b2b85fea48d0a7fe73b
\ No newline at end of file
diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java
index 3dcee4e8ec045..77648ed7e785c 100644
--- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java
+++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java
@@ -18,6 +18,7 @@
 import org.opensearch.common.util.concurrent.OpenSearchExecutors;
 import org.opensearch.common.util.io.IOUtils;
 import org.opensearch.common.util.net.NetUtils;
+import org.opensearch.core.common.unit.ByteSizeUnit;
 import org.opensearch.core.common.unit.ByteSizeValue;
 import org.opensearch.core.xcontent.NamedXContentRegistry;
 import org.opensearch.http.AbstractHttpServerTransport;
@@ -87,6 +88,19 @@ public class ReactorNetty4HttpServerTransport extends AbstractHttpServerTranspor
     private static final String SETTING_KEY_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = "http.netty.max_composite_buffer_components";
     private static final ByteSizeValue MTU = new ByteSizeValue(Long.parseLong(System.getProperty("opensearch.net.mtu", "1500")));
 
+    /**
+     * Configure the maximum length of the content of the HTTP/2.0 clear-text upgrade request.
+     * By default the server will reject an upgrade request with non-empty content,
+     * because the upgrade request is most likely a GET request. If the client sends
+     * a non-GET upgrade request, {@link #h2cMaxContentLength} specifies the maximum
+     * length of the content of the upgrade request.
+     */
+    public static final Setting<ByteSizeValue> SETTING_H2C_MAX_CONTENT_LENGTH = Setting.byteSizeSetting(
+        "h2c.max_content_length",
+        new ByteSizeValue(65536, ByteSizeUnit.KB),
+        Property.NodeScope
+    );
+
     /**
      * The number of Reactor Netty HTTP workers
      */
@@ -133,6 +147,7 @@ public class ReactorNetty4HttpServerTransport extends AbstractHttpServerTranspor
     private final ByteSizeValue maxInitialLineLength;
     private final ByteSizeValue maxHeaderSize;
     private final ByteSizeValue maxChunkSize;
+    private final ByteSizeValue h2cMaxContentLength;
     private final SecureHttpTransportSettingsProvider secureHttpTransportSettingsProvider;
     private volatile SharedGroupFactory.SharedGroup sharedGroup;
     private volatile DisposableServer disposableServer;
@@ -208,6 +223,7 @@ public ReactorNetty4HttpServerTransport(
         this.maxCompositeBufferComponents = SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS.get(settings);
         this.maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.get(settings);
         this.maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings);
+        this.h2cMaxContentLength = SETTING_H2C_MAX_CONTENT_LENGTH.get(settings);
         this.maxInitialLineLength = SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.get(settings);
         this.secureHttpTransportSettingsProvider = secureHttpTransportSettingsProvider;
     }
@@ -228,6 +244,7 @@ protected HttpServerChannel bind(InetSocketAddress socketAddress) throws Excepti
                 .compress(true)
                 .httpRequestDecoder(
                     spec -> spec.maxChunkSize(maxChunkSize.bytesAsInt())
+                        .h2cMaxContentLength(h2cMaxContentLength.bytesAsInt())
                         .maxHeaderSize(maxHeaderSize.bytesAsInt())
                         .maxInitialLineLength(maxInitialLineLength.bytesAsInt())
                         .allowPartialChunks(false)
diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/ReactorNetty4Plugin.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/ReactorNetty4Plugin.java
index 6e5b0215b58a4..90ed1fe729d3a 100644
--- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/ReactorNetty4Plugin.java
+++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/ReactorNetty4Plugin.java
@@ -57,7 +57,7 @@ public ReactorNetty4Plugin() {}
      */
     @Override
     public List<Setting<?>> getSettings() {
-        return Arrays.asList(/* no setting registered since we're picking the onces from Netty 4 transport */);
+        return Arrays.asList(ReactorNetty4HttpServerTransport.SETTING_H2C_MAX_CONTENT_LENGTH);
     }
 
     /**
diff --git a/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy b/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy
index c62adda511140..e1a3b4618035e 100644
--- a/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy
+++ b/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy
@@ -120,7 +120,7 @@ grant codeBase "${codebase.httpcore5}" {
 
 grant codeBase "${codebase.httpclient5}" {
   // httpclient5 makes socket connections for rest tests
-  permission java.net.SocketPermission "*", "connect";
+  permission java.net.SocketPermission "*", "connect,resolve";
 };
 
 grant codeBase "${codebase.httpcore-nio}" {

From 73e11af21c440c326f3685f5b13b359cf41bba92 Mon Sep 17 00:00:00 2001
From: Andriy Redko <andriy.redko@aiven.io>
Date: Wed, 15 Jan 2025 11:51:10 -0500
Subject: [PATCH 46/61] Update version checks for backport (#17030)

Signed-off-by: Michael Froh <froh@amazon.com>
Signed-off-by: Andriy Redko <andriy.redko@aiven.io>
Co-authored-by: Michael Froh <froh@amazon.com>
---
 .../test/search.aggregation/70_adjacency_matrix.yml         | 6 +++---
 .../bucket/adjacency/AdjacencyMatrixAggregationBuilder.java | 4 ++--
 2 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/70_adjacency_matrix.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/70_adjacency_matrix.yml
index ccd194eff6f51..8b1956c6152d2 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/70_adjacency_matrix.yml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/70_adjacency_matrix.yml
@@ -130,12 +130,12 @@ setup:
 ---
 "Show only intersections":
   - skip:
-      version: " - 2.99.99"
-      reason: "show_only_intersecting was added in 3.0.0"
+      version: " - 2.19.0"
+      reason: "show_only_intersecting was added in 2.19.0"
       features: node_selector
   - do:
       node_selector:
-        version: "3.0.0 - "
+        version: "2.19.0 - "
       search:
         index: test
         rest_total_hits_as_int: true
diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java
index 1b6a7e1158b83..e4a454ee64609 100644
--- a/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java
+++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java
@@ -188,7 +188,7 @@ public AdjacencyMatrixAggregationBuilder(StreamInput in) throws IOException {
         super(in);
         int filtersSize = in.readVInt();
         separator = in.readString();
-        if (in.getVersion().onOrAfter(Version.V_3_0_0)) {
+        if (in.getVersion().onOrAfter(Version.V_2_19_0)) {
             showOnlyIntersecting = in.readBoolean();
         }
         filters = new ArrayList<>(filtersSize);
@@ -201,7 +201,7 @@ public AdjacencyMatrixAggregationBuilder(StreamInput in) throws IOException {
     protected void doWriteTo(StreamOutput out) throws IOException {
         out.writeVInt(filters.size());
         out.writeString(separator);
-        if (out.getVersion().onOrAfter(Version.V_3_0_0)) {
+        if (out.getVersion().onOrAfter(Version.V_2_19_0)) {
             out.writeBoolean(showOnlyIntersecting);
         }
         for (KeyedFilter keyedFilter : filters) {

From 6202ab08980b39b301fef305a79138d779b92f56 Mon Sep 17 00:00:00 2001
From: Andriy Redko <andriy.redko@aiven.io>
Date: Wed, 15 Jan 2025 14:09:03 -0500
Subject: [PATCH 47/61] Fix versions and breaking API changes (#17031)

Signed-off-by: Andriy Redko <andriy.redko@aiven.io>
---
 .../resources/rest-api-spec/test/search_shards/20_slice.yml   | 2 +-
 .../admin/cluster/shards/ClusterSearchShardsRequest.java      | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/20_slice.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/20_slice.yml
index bf1a5429213df..dafb38df20157 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/20_slice.yml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/20_slice.yml
@@ -1,7 +1,7 @@
 ---
 "Search shards with slice specified in body":
   - skip:
-      version: " - 2.99.99"
+      version: " - 2.18.99"
       reason: "Added slice body to search_shards in 2.19"
   - do:
       indices.create:
diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java
index d4bf0efbd3eb5..06bd4da1931de 100644
--- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java
+++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java
@@ -80,7 +80,7 @@ public ClusterSearchShardsRequest(StreamInput in) throws IOException {
         preference = in.readOptionalString();
 
         indicesOptions = IndicesOptions.readIndicesOptions(in);
-        if (in.getVersion().onOrAfter(Version.V_3_0_0)) {
+        if (in.getVersion().onOrAfter(Version.V_2_19_0)) {
             boolean hasSlice = in.readBoolean();
             if (hasSlice) {
                 sliceBuilder = new SliceBuilder(in);
@@ -95,7 +95,7 @@ public void writeTo(StreamOutput out) throws IOException {
         out.writeOptionalString(routing);
         out.writeOptionalString(preference);
         indicesOptions.writeIndicesOptions(out);
-        if (out.getVersion().onOrAfter(Version.V_3_0_0)) {
+        if (out.getVersion().onOrAfter(Version.V_2_19_0)) {
             if (sliceBuilder != null) {
                 out.writeBoolean(true);
                 sliceBuilder.writeTo(out);

From 34ef1462abd55e931961c08d21b1dfa5855b8121 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Wed, 15 Jan 2025 15:11:18 -0500
Subject: [PATCH 48/61] Bump com.nimbusds:nimbus-jose-jwt from 9.47 to 10.0.1
 in /test/fixtures/hdfs-fixture (#17011)

* Bump com.nimbusds:nimbus-jose-jwt in /test/fixtures/hdfs-fixture

Bumps [com.nimbusds:nimbus-jose-jwt](https://bitbucket.org/connect2id/nimbus-jose-jwt) from 9.47 to 10.0.1.
- [Changelog](https://bitbucket.org/connect2id/nimbus-jose-jwt/src/master/CHANGELOG.txt)
- [Commits](https://bitbucket.org/connect2id/nimbus-jose-jwt/branches/compare/10.0.1..9.47)

---
updated-dependencies:
- dependency-name: com.nimbusds:nimbus-jose-jwt
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>

* Update changelog

Signed-off-by: dependabot[bot] <support@github.com>

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
 CHANGELOG.md                            | 2 +-
 test/fixtures/hdfs-fixture/build.gradle | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 9fd5efdc986d1..241d88049214d 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -40,7 +40,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 - Bump `com.azure:azure-storage-blob` from 12.23.0 to 12.28.1 ([#16501](https://github.com/opensearch-project/OpenSearch/pull/16501))
 - Bump `org.apache.hadoop:hadoop-minicluster` from 3.4.0 to 3.4.1 ([#16550](https://github.com/opensearch-project/OpenSearch/pull/16550))
 - Bump `org.apache.xmlbeans:xmlbeans` from 5.2.1 to 5.3.0 ([#16612](https://github.com/opensearch-project/OpenSearch/pull/16612), [#16854](https://github.com/opensearch-project/OpenSearch/pull/16854))
-- Bump `com.nimbusds:nimbus-jose-jwt` from 9.41.1 to 9.47 ([#16611](https://github.com/opensearch-project/OpenSearch/pull/16611), [#16807](https://github.com/opensearch-project/OpenSearch/pull/16807))
+- Bump `com.nimbusds:nimbus-jose-jwt` from 9.41.1 to 10.0.1 ([#16611](https://github.com/opensearch-project/OpenSearch/pull/16611), [#16807](https://github.com/opensearch-project/OpenSearch/pull/16807), [#17011](https://github.com/opensearch-project/OpenSearch/pull/17011))
 - Bump `lycheeverse/lychee-action` from 2.0.2 to 2.2.0 ([#16610](https://github.com/opensearch-project/OpenSearch/pull/16610), [#16897](https://github.com/opensearch-project/OpenSearch/pull/16897))
 - Bump `me.champeau.gradle.japicmp` from 0.4.4 to 0.4.5 ([#16614](https://github.com/opensearch-project/OpenSearch/pull/16614))
 - Bump `mockito` from 5.14.1 to 5.14.2, `objenesis` from 3.2 to 3.3 and `bytebuddy` from 1.15.4 to 1.15.10 ([#16655](https://github.com/opensearch-project/OpenSearch/pull/16655))
diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle
index bb2b7ebafdf81..fdbd3ed0d3571 100644
--- a/test/fixtures/hdfs-fixture/build.gradle
+++ b/test/fixtures/hdfs-fixture/build.gradle
@@ -79,7 +79,7 @@ dependencies {
   api "org.jboss.xnio:xnio-nio:3.8.16.Final"
   api 'org.jline:jline:3.28.0'
   api 'org.apache.commons:commons-configuration2:2.11.0'
-  api 'com.nimbusds:nimbus-jose-jwt:9.47'
+  api 'com.nimbusds:nimbus-jose-jwt:10.0.1'
   api ('org.apache.kerby:kerb-admin:2.1.0') {
     exclude group: "org.jboss.xnio"
     exclude group: "org.jline"

From 13159c1693d088417683f8d7f2a018ea7e8a6866 Mon Sep 17 00:00:00 2001
From: Mohit Godwani <81609427+mgodwan@users.noreply.github.com>
Date: Thu, 16 Jan 2025 12:48:39 +0530
Subject: [PATCH 49/61] Remove user data from logs when not in debug/trace mode
 (#17007)

* Remove user data from logs when not in debug/trace mode

Signed-off-by: Mohit Godwani <mgodwan@amazon.com>
---
 .../test/delete_by_query/50_wait_for_active_shards.yml |  2 +-
 .../test/reindex/60_wait_for_active_shards.yml         |  2 +-
 .../test/update_by_query/50_consistency.yml            |  2 +-
 .../action/support/WaitActiveShardCountIT.java         |  4 ++--
 .../support/replication/ReplicationOperation.java      | 10 +---------
 .../replication/TransportReplicationAction.java        |  2 +-
 .../org/opensearch/action/update/UpdateHelper.java     |  7 ++++++-
 7 files changed, 13 insertions(+), 16 deletions(-)

diff --git a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/delete_by_query/50_wait_for_active_shards.yml b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/delete_by_query/50_wait_for_active_shards.yml
index ea8ed4df3e748..39cf36847f25d 100644
--- a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/delete_by_query/50_wait_for_active_shards.yml
+++ b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/delete_by_query/50_wait_for_active_shards.yml
@@ -25,7 +25,7 @@
             match_all: {}
 
   - match:
-      failures.0.cause.reason: /Not.enough.active.copies.to.meet.shard.count.of.\[4\].\(have.1,.needed.4\)..Timeout\:.\[1s\],.request:.+/
+      failures.0.cause.reason: /Not.enough.active.copies.to.meet.shard.count.of.\[4\].\(have.1,.needed.4\)..Timeout\:.\[1s\]/
 
   - do:
       indices.refresh: {}
diff --git a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/60_wait_for_active_shards.yml b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/60_wait_for_active_shards.yml
index 3498e555d2879..a580c55a95130 100644
--- a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/60_wait_for_active_shards.yml
+++ b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/60_wait_for_active_shards.yml
@@ -25,7 +25,7 @@
           dest:
             index: dest
   - match:
-      failures.0.cause.reason: /Not.enough.active.copies.to.meet.shard.count.of.\[4\].\(have.1,.needed.4\)\..Timeout\:.\[1s\],.request:.+/
+      failures.0.cause.reason: /Not.enough.active.copies.to.meet.shard.count.of.\[4\].\(have.1,.needed.4\)\..Timeout\:.\[1s\]/
 
   - do:
       reindex:
diff --git a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/50_consistency.yml b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/50_consistency.yml
index 4a067580b54d3..e97eacc3c9c25 100644
--- a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/50_consistency.yml
+++ b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/50_consistency.yml
@@ -21,7 +21,7 @@
         wait_for_active_shards: 4
         timeout: 1s
   - match:
-      failures.0.cause.reason: /Not.enough.active.copies.to.meet.shard.count.of.\[4\].\(have.1,.needed.4\)..Timeout\:.\[1s\],.request:.+/
+      failures.0.cause.reason: /Not.enough.active.copies.to.meet.shard.count.of.\[4\].\(have.1,.needed.4\)..Timeout\:.\[1s\]/
 
   - do:
       update_by_query:
diff --git a/server/src/internalClusterTest/java/org/opensearch/action/support/WaitActiveShardCountIT.java b/server/src/internalClusterTest/java/org/opensearch/action/support/WaitActiveShardCountIT.java
index 08cffac8aac5d..c4ffbccf0ab99 100644
--- a/server/src/internalClusterTest/java/org/opensearch/action/support/WaitActiveShardCountIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/action/support/WaitActiveShardCountIT.java
@@ -76,7 +76,7 @@ public void testReplicationWaitsForActiveShardCount() throws Exception {
             assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE));
             assertThat(
                 e.getMessage(),
-                startsWith("[test][0] Not enough active copies to meet shard count of [2] (have 1, needed 2). Timeout: [100ms], request:")
+                startsWith("[test][0] Not enough active copies to meet shard count of [2] (have 1, needed 2). Timeout: [100ms]")
             );
             // but really, all is well
         }
@@ -120,7 +120,7 @@ public void testReplicationWaitsForActiveShardCount() throws Exception {
                 startsWith(
                     "[test][0] Not enough active copies to meet shard count of ["
                         + ActiveShardCount.ALL
-                        + "] (have 2, needed 3). Timeout: [100ms], request:"
+                        + "] (have 2, needed 3). Timeout: [100ms]"
                 )
             );
             // but really, all is well
diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java
index 9f69d41d83f5b..12d3502184ac4 100644
--- a/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java
+++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java
@@ -141,15 +141,7 @@ public void execute() throws Exception {
         final ShardRouting primaryRouting = primary.routingEntry();
         final ShardId primaryId = primaryRouting.shardId();
         if (activeShardCountFailure != null) {
-            finishAsFailed(
-                new UnavailableShardsException(
-                    primaryId,
-                    "{} Timeout: [{}], request: [{}]",
-                    activeShardCountFailure,
-                    request.timeout(),
-                    request
-                )
-            );
+            finishAsFailed(new UnavailableShardsException(primaryId, "{} Timeout: [{}]", activeShardCountFailure, request.timeout()));
             return;
         }
 
diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java
index 49a96603f6802..637a7a31d78cc 100644
--- a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java
+++ b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java
@@ -1246,7 +1246,7 @@ void finishOnSuccess(Response response) {
         }
 
         void retryBecauseUnavailable(ShardId shardId, String message) {
-            retry(new UnavailableShardsException(shardId, "{} Timeout: [{}], request: [{}]", message, request.timeout(), request));
+            retry(new UnavailableShardsException(shardId, "{} Timeout: [{}]", message, request.timeout()));
         }
     }
 
diff --git a/server/src/main/java/org/opensearch/action/update/UpdateHelper.java b/server/src/main/java/org/opensearch/action/update/UpdateHelper.java
index 19c32f9336df8..c02ec1fbb9cf0 100644
--- a/server/src/main/java/org/opensearch/action/update/UpdateHelper.java
+++ b/server/src/main/java/org/opensearch/action/update/UpdateHelper.java
@@ -58,6 +58,7 @@
 import org.opensearch.index.shard.IndexShard;
 import org.opensearch.script.Script;
 import org.opensearch.script.ScriptService;
+import org.opensearch.script.ScriptType;
 import org.opensearch.script.UpdateScript;
 import org.opensearch.search.lookup.SourceLookup;
 
@@ -128,7 +129,11 @@ Tuple<UpdateOpType, Map<String, Object>> executeScriptedUpsert(Map<String, Objec
 
         if (operation != UpdateOpType.CREATE && operation != UpdateOpType.NONE) {
             // Only valid options for an upsert script are "create" (the default) or "none", meaning abort upsert
-            logger.warn("Invalid upsert operation [{}] for script [{}], doing nothing...", operation, script.getIdOrCode());
+            if (logger.isDebugEnabled() || ScriptType.STORED.equals(script.getType())) {
+                logger.warn("Invalid upsert operation [{}] for script [{}], doing nothing...", operation, script.getIdOrCode());
+            } else {
+                logger.warn("Invalid upsert operation [{}] for given script", operation);
+            }
             operation = UpdateOpType.NONE;
         }
 

From a43607677517d84804d223e54d824753d0646f23 Mon Sep 17 00:00:00 2001
From: Peter Alfonsi <peter.alfonsi@gmail.com>
Date: Thu, 16 Jan 2025 11:19:28 -0800
Subject: [PATCH 50/61] [Bugfix] Fix cache maximum size settings not working
 properly with pluggable caching (#16636)

* Fix cache size setting

Signed-off-by: Peter Alfonsi <petealft@amazon.com>

* Changelog

Signed-off-by: Peter Alfonsi <petealft@amazon.com>

* Deprecate original IRC size setting

Signed-off-by: Peter Alfonsi <petealft@amazon.com>

* spotlessApply

Signed-off-by: Peter Alfonsi <petealft@amazon.com>

* Addressed Ankit's comments

Signed-off-by: Peter Alfonsi <petealft@amazon.com>

* Address Sagar's comment

Signed-off-by: Peter Alfonsi <petealft@amazon.com>

---------

Signed-off-by: Peter Alfonsi <petealft@amazon.com>
Signed-off-by: Peter Alfonsi <peter.alfonsi@gmail.com>
Signed-off-by: Ankit Jain <akjain@amazon.com>
Co-authored-by: Peter Alfonsi <petealft@amazon.com>
Co-authored-by: Ankit Jain <akjain@amazon.com>
---
 CHANGELOG.md                                  |   1 +
 .../common/tier/TieredSpilloverCache.java     |  15 ++
 .../tier/TieredSpilloverCacheSettings.java    |   6 +
 .../cache/common/tier/MockDiskCache.java      |   4 +
 .../tier/TieredSpilloverCacheTests.java       | 135 +++++++++++++++++-
 .../cache/EhcacheDiskCacheSettings.java       |   1 +
 .../cache/store/disk/EhcacheDiskCache.java    |   5 +
 .../store/disk/EhCacheDiskCacheTests.java     |  61 ++++++++
 .../common/cache/service/CacheService.java    |  22 ++-
 .../cache/store/OpenSearchOnHeapCache.java    |  14 +-
 .../OpenSearchOnHeapCacheSettings.java        |   1 +
 .../indices/IndicesRequestCache.java          |  74 ++++++----
 .../store/OpenSearchOnHeapCacheTests.java     |  75 +++++++---
 .../settings/MemorySizeSettingsTests.java     |   3 +
 .../indices/IndicesRequestCacheTests.java     |  48 +++++++
 15 files changed, 403 insertions(+), 62 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 241d88049214d..21cdf30867e74 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -95,6 +95,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 - Fix case insensitive and escaped query on wildcard ([#16827](https://github.com/opensearch-project/OpenSearch/pull/16827))
 - Fix _list/shards API failing when closed indices are present ([#16606](https://github.com/opensearch-project/OpenSearch/pull/16606))
 - Fix remote shards balance ([#15335](https://github.com/opensearch-project/OpenSearch/pull/15335))
+- Fix max request cache size settings not working properly with pluggable caching ([#16636](https://github.com/opensearch-project/OpenSearch/pull/16636))
 - Always use `constant_score` query for `match_only_text` field ([#16964](https://github.com/opensearch-project/OpenSearch/pull/16964))
 - Fix Shallow copy snapshot failures on closed index ([#16868](https://github.com/opensearch-project/OpenSearch/pull/16868))
 - Fix multi-value sort for unsigned long ([#16732](https://github.com/opensearch-project/OpenSearch/pull/16732))
diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java
index 38a6915ffd10e..9879235812377 100644
--- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java
+++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java
@@ -150,6 +150,9 @@ static class TieredSpilloverCacheSegment<K, V> implements ICache<K, V> {
 
         private final TieredSpilloverCacheStatsHolder statsHolder;
 
+        private final long onHeapCacheMaxWeight;
+        private final long diskCacheMaxWeight;
+
         /**
          * This map is used to handle concurrent requests for same key in computeIfAbsent() to ensure we load the value
          * only once.
@@ -218,6 +221,8 @@ static class TieredSpilloverCacheSegment<K, V> implements ICache<K, V> {
             cacheListMap.put(diskCache, new TierInfo(isDiskCacheEnabled, TIER_DIMENSION_VALUE_DISK));
             this.caches = Collections.synchronizedMap(cacheListMap);
             this.policies = builder.policies; // Will never be null; builder initializes it to an empty list
+            this.onHeapCacheMaxWeight = onHeapCacheSizeInBytes;
+            this.diskCacheMaxWeight = diskCacheSizeInBytes;
         }
 
         // Package private for testing
@@ -526,6 +531,16 @@ void updateStatsOnPut(String destinationTierValue, ICacheKey<K> key, V value) {
             statsHolder.incrementSizeInBytes(dimensionValues, weigher.applyAsLong(key, value));
         }
 
+        // pkg-private for testing
+        long getOnHeapCacheMaxWeight() {
+            return onHeapCacheMaxWeight;
+        }
+
+        // pkg-private for testing
+        long getDiskCacheMaxWeight() {
+            return diskCacheMaxWeight;
+        }
+
         /**
          * A class which receives removal events from the heap tier.
          */
diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheSettings.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheSettings.java
index 122d00af3bd1e..31dc1795134e4 100644
--- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheSettings.java
+++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheSettings.java
@@ -85,6 +85,9 @@ public class TieredSpilloverCacheSettings {
 
     /**
      * Setting which defines the onHeap cache size to be used within tiered cache.
+     * This setting overrides size settings from the heap tier implementation.
+     * For example, if OpenSearchOnHeapCache is the heap tier in the request cache, and
+     * indices.requests.cache.opensearch_onheap.size is set, that value will be ignored in favor of this setting.
      *
      * Pattern: {cache_type}.tiered_spillover.onheap.store.size
      * Example: indices.request.cache.tiered_spillover.onheap.store.size
@@ -96,6 +99,9 @@ public class TieredSpilloverCacheSettings {
 
     /**
      * Setting which defines the disk cache size to be used within tiered cache.
+     * This setting overrides the size setting from the disk tier implementation.
+     * For example, if EhcacheDiskCache is the disk tier in the request cache, and
+     * indices.requests.cache.ehcache_disk.max_size_in_bytes is set, that value will be ignored in favor of this setting.
      */
     public static final Setting.AffixSetting<Long> TIERED_SPILLOVER_DISK_STORE_SIZE = Setting.suffixKeySetting(
         TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME + ".disk.store.size",
diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java
index fcddd489a27aa..78302cede402f 100644
--- a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java
+++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java
@@ -128,6 +128,10 @@ public void close() {
 
     }
 
+    long getMaximumWeight() {
+        return maxSize;
+    }
+
     public static class MockDiskCacheFactory implements Factory {
 
         public static final String NAME = "mockDiskCache";
diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java
index 3bb1321f9faf2..494534ac74c9f 100644
--- a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java
+++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java
@@ -58,6 +58,7 @@
 
 import static org.opensearch.cache.common.tier.TieredSpilloverCache.ZERO_SEGMENT_COUNT_EXCEPTION_MESSAGE;
 import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.DISK_CACHE_ENABLED_SETTING_MAP;
+import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.MIN_DISK_CACHE_SIZE_IN_BYTES;
 import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_SIZE;
 import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TIERED_SPILLOVER_SEGMENTS;
 import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP;
@@ -2166,6 +2167,134 @@ public void testDropStatsForDimensions() throws Exception {
         assertEquals(new ImmutableCacheStats(0, 0, 0, 0, 0), tieredSpilloverCache.stats().getTotalStats());
     }
 
+    public void testSegmentSizesWhenUsingFactory() {
+        // The TSC's tier size settings, TIERED_SPILLOVER_ONHEAP_STORE_SIZE and TIERED_SPILLOVER_DISK_STORE_SIZE,
+        // should always be respected, overriding the individual implementation's size settings if present
+        long expectedHeapSize = 256L * between(10, 20);
+        long expectedDiskSize = MIN_DISK_CACHE_SIZE_IN_BYTES + 256L * between(30, 40);
+        long heapSizeFromImplSetting = 50;
+        int diskSizeFromImplSetting = 50;
+        int numSegments = getNumberOfSegments();
+
+        int keyValueSize = 1;
+        MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>();
+        Settings settings = Settings.builder()
+            .put(
+                CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(),
+                TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME
+            )
+            .put(
+                TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_NAME.getConcreteSettingForNamespace(
+                    CacheType.INDICES_REQUEST_CACHE.getSettingPrefix()
+                ).getKey(),
+                OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME
+            )
+            .put(
+                TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_STORE_NAME.getConcreteSettingForNamespace(
+                    CacheType.INDICES_REQUEST_CACHE.getSettingPrefix()
+                ).getKey(),
+                MockDiskCache.MockDiskCacheFactory.NAME
+            )
+            // These two size settings should be honored
+            .put(
+                TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_SIZE.getConcreteSettingForNamespace(
+                    CacheType.INDICES_REQUEST_CACHE.getSettingPrefix()
+                ).getKey(),
+                expectedHeapSize + "b"
+            )
+            .put(
+                TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_STORE_SIZE.getConcreteSettingForNamespace(
+                    CacheType.INDICES_REQUEST_CACHE.getSettingPrefix()
+                ).getKey(),
+                expectedDiskSize
+            )
+            // The size setting from the OpenSearchOnHeap implementation should not be honored
+            .put(
+                OpenSearchOnHeapCacheSettings.MAXIMUM_SIZE_IN_BYTES.getConcreteSettingForNamespace(
+                    CacheType.INDICES_REQUEST_CACHE.getSettingPrefix()
+                ).getKey(),
+                heapSizeFromImplSetting + "b"
+            )
+            .put(FeatureFlags.PLUGGABLE_CACHE, "true")
+            .put(
+                TIERED_SPILLOVER_SEGMENTS.getConcreteSettingForNamespace(CacheType.INDICES_REQUEST_CACHE.getSettingPrefix()).getKey(),
+                numSegments
+            )
+            .build();
+        String storagePath = getStoragePath(settings);
+
+        TieredSpilloverCache<String, String> tieredSpilloverCache = (TieredSpilloverCache<
+            String,
+            String>) new TieredSpilloverCache.TieredSpilloverCacheFactory().create(
+                new CacheConfig.Builder<String, String>().setKeyType(String.class)
+                    .setKeyType(String.class)
+                    .setWeigher((k, v) -> keyValueSize)
+                    .setRemovalListener(removalListener)
+                    .setKeySerializer(new StringSerializer())
+                    .setValueSerializer(new StringSerializer())
+                    .setSettings(settings)
+                    .setDimensionNames(dimensionNames)
+                    .setCachedResultParser(s -> new CachedQueryResult.PolicyValues(20_000_000L)) // Values will always appear to have taken
+                    // 20_000_000 ns = 20 ms to compute
+                    .setClusterSettings(clusterSettings)
+                    .setStoragePath(storagePath)
+                    .build(),
+                CacheType.INDICES_REQUEST_CACHE,
+                Map.of(
+                    OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME,
+                    new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(),
+                    MockDiskCache.MockDiskCacheFactory.NAME,
+                    // The size value passed in here acts as the "implementation setting" for the disk tier, and should also be ignored
+                    new MockDiskCache.MockDiskCacheFactory(0, diskSizeFromImplSetting, false, keyValueSize)
+                )
+            );
+        checkSegmentSizes(tieredSpilloverCache, expectedHeapSize, expectedDiskSize);
+    }
+
+    public void testSegmentSizesWhenNotUsingFactory() {
+        long expectedHeapSize = 256L * between(10, 20);
+        long expectedDiskSize = MIN_DISK_CACHE_SIZE_IN_BYTES + 256L * between(30, 40);
+        int heapSizeFromImplSetting = 50;
+        int diskSizeFromImplSetting = 50;
+
+        Settings settings = Settings.builder()
+            .put(
+                CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(),
+                TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME
+            )
+            .put(FeatureFlags.PLUGGABLE_CACHE, "true")
+            // The size setting from the OpenSearchOnHeapCache implementation should not be honored
+            .put(
+                OpenSearchOnHeapCacheSettings.MAXIMUM_SIZE_IN_BYTES.getConcreteSettingForNamespace(
+                    CacheType.INDICES_REQUEST_CACHE.getSettingPrefix()
+                ).getKey(),
+                heapSizeFromImplSetting + "b"
+            )
+            .build();
+
+        int keyValueSize = 1;
+        MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>();
+        int numSegments = getNumberOfSegments();
+        CacheConfig<String, String> cacheConfig = getCacheConfig(1, settings, removalListener, numSegments);
+        TieredSpilloverCache<String, String> tieredSpilloverCache = getTieredSpilloverCache(
+            new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(),
+            new MockDiskCache.MockDiskCacheFactory(0, diskSizeFromImplSetting, true, keyValueSize),
+            cacheConfig,
+            null,
+            removalListener,
+            numSegments,
+            expectedHeapSize,
+            expectedDiskSize
+        );
+        checkSegmentSizes(tieredSpilloverCache, expectedHeapSize, expectedDiskSize);
+    }
+
+    private void checkSegmentSizes(TieredSpilloverCache<String, String> cache, long expectedHeapSize, long expectedDiskSize) {
+        TieredSpilloverCache.TieredSpilloverCacheSegment<String, String> segment = cache.tieredSpilloverCacheSegments[0];
+        assertEquals(expectedHeapSize / cache.getNumberOfSegments(), segment.getOnHeapCacheMaxWeight());
+        assertEquals(expectedDiskSize / cache.getNumberOfSegments(), segment.getDiskCacheMaxWeight());
+    }
+
     private List<String> getMockDimensions() {
         List<String> dims = new ArrayList<>();
         for (String dimensionName : dimensionNames) {
@@ -2455,9 +2584,9 @@ private void verifyComputeIfAbsentThrowsException(
         MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>();
         Settings settings = Settings.builder()
             .put(
-                OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE)
-                    .get(MAXIMUM_SIZE_IN_BYTES_KEY)
-                    .getKey(),
+                TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_SIZE.getConcreteSettingForNamespace(
+                    CacheType.INDICES_REQUEST_CACHE.getSettingPrefix()
+                ).getKey(),
                 onHeapCacheSize * keyValueSize + "b"
             )
             .build();
diff --git a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheDiskCacheSettings.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheDiskCacheSettings.java
index cbc104f2d0b00..e4c9dd1e96c3c 100644
--- a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheDiskCacheSettings.java
+++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheDiskCacheSettings.java
@@ -101,6 +101,7 @@ public class EhcacheDiskCacheSettings {
 
     /**
      * Disk cache max size setting.
+     * If this cache is used as a tier in a TieredSpilloverCache, this setting is ignored.
      */
     public static final Setting.AffixSetting<Long> DISK_CACHE_MAX_SIZE_IN_BYTES_SETTING = Setting.suffixKeySetting(
         EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + ".max_size_in_bytes",
diff --git a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java
index 0fa0f8162bb98..33c27eb301ad1 100644
--- a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java
+++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java
@@ -680,6 +680,11 @@ private V deserializeValue(ByteArrayWrapper binary) {
         return valueSerializer.deserialize(binary.value);
     }
 
+    // Pkg-private for testing.
+    long getMaximumWeight() {
+        return maxWeightInBytes;
+    }
+
     /**
      * Factory to create an ehcache disk cache.
      */
diff --git a/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java
index a0d0aa4ec4914..4e879af052c15 100644
--- a/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java
+++ b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java
@@ -20,11 +20,13 @@
 import org.opensearch.common.cache.RemovalNotification;
 import org.opensearch.common.cache.serializer.BytesReferenceSerializer;
 import org.opensearch.common.cache.serializer.Serializer;
+import org.opensearch.common.cache.settings.CacheSettings;
 import org.opensearch.common.cache.stats.ImmutableCacheStats;
 import org.opensearch.common.cache.store.config.CacheConfig;
 import org.opensearch.common.metrics.CounterMetric;
 import org.opensearch.common.settings.Settings;
 import org.opensearch.common.unit.TimeValue;
+import org.opensearch.common.util.FeatureFlags;
 import org.opensearch.common.util.io.IOUtils;
 import org.opensearch.core.common.bytes.BytesArray;
 import org.opensearch.core.common.bytes.BytesReference;
@@ -1201,6 +1203,65 @@ public void testEhcacheCloseWithDestroyCacheMethodThrowingException() throws Exc
         ehcacheDiskCache.close();
     }
 
+    public void testWithCacheConfigSizeSettings() throws Exception {
+        // The cache should get its size from the config if present, and otherwise should get it from the setting.
+        long maxSizeFromSetting = between(MINIMUM_MAX_SIZE_IN_BYTES + 1000, MINIMUM_MAX_SIZE_IN_BYTES + 2000);
+        long maxSizeFromConfig = between(MINIMUM_MAX_SIZE_IN_BYTES + 3000, MINIMUM_MAX_SIZE_IN_BYTES + 4000);
+
+        EhcacheDiskCache<String, String> cache = setupMaxSizeTest(maxSizeFromSetting, maxSizeFromConfig, false);
+        assertEquals(maxSizeFromSetting, cache.getMaximumWeight());
+
+        cache = setupMaxSizeTest(maxSizeFromSetting, maxSizeFromConfig, true);
+        assertEquals(maxSizeFromConfig, cache.getMaximumWeight());
+    }
+
+    // Modified from OpenSearchOnHeapCacheTests. Can't reuse, as we can't add a dependency on the server.test module.
+    private EhcacheDiskCache<String, String> setupMaxSizeTest(long maxSizeFromSetting, long maxSizeFromConfig, boolean putSizeInConfig)
+        throws Exception {
+        MockRemovalListener<String, String> listener = new MockRemovalListener<>();
+        try (NodeEnvironment env = newNodeEnvironment(Settings.builder().build())) {
+            Settings settings = Settings.builder()
+                .put(FeatureFlags.PLUGGABLE_CACHE, true)
+                .put(
+                    CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(),
+                    EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME
+                )
+                .put(
+                    EhcacheDiskCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE)
+                        .get(DISK_MAX_SIZE_IN_BYTES_KEY)
+                        .getKey(),
+                    maxSizeFromSetting
+                )
+                .put(
+                    EhcacheDiskCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE)
+                        .get(DISK_STORAGE_PATH_KEY)
+                        .getKey(),
+                    env.nodePaths()[0].indicesPath.toString() + "/request_cache/" + 0
+                )
+                .build();
+
+            CacheConfig.Builder<String, String> cacheConfigBuilder = new CacheConfig.Builder<String, String>().setKeyType(String.class)
+                .setValueType(String.class)
+                .setKeySerializer(new StringSerializer())
+                .setValueSerializer(new StringSerializer())
+                .setWeigher(getWeigher())
+                .setRemovalListener(listener)
+                .setSettings(settings)
+                .setDimensionNames(List.of(dimensionName))
+                .setStatsTrackingEnabled(true);
+            if (putSizeInConfig) {
+                cacheConfigBuilder.setMaxSizeInBytes(maxSizeFromConfig);
+            }
+
+            ICache.Factory cacheFactory = new EhcacheDiskCache.EhcacheDiskCacheFactory();
+            return (EhcacheDiskCache<String, String>) cacheFactory.create(
+                cacheConfigBuilder.build(),
+                CacheType.INDICES_REQUEST_CACHE,
+                null
+            );
+        }
+    }
+
     static class MockEhcahceDiskCache extends EhcacheDiskCache<String, String> {
 
         public MockEhcahceDiskCache(Builder<String, String> builder) {
diff --git a/server/src/main/java/org/opensearch/common/cache/service/CacheService.java b/server/src/main/java/org/opensearch/common/cache/service/CacheService.java
index 01da78ecec52e..da006264094d2 100644
--- a/server/src/main/java/org/opensearch/common/cache/service/CacheService.java
+++ b/server/src/main/java/org/opensearch/common/cache/service/CacheService.java
@@ -46,11 +46,8 @@ public CacheService(Map<String, ICache.Factory> cacheStoreTypeFactories, Setting
     }
 
     public <K, V> ICache<K, V> createCache(CacheConfig<K, V> config, CacheType cacheType) {
-        Setting<String> cacheSettingForCacheType = CacheSettings.CACHE_TYPE_STORE_NAME.getConcreteSettingForNamespace(
-            cacheType.getSettingPrefix()
-        );
-        String storeName = cacheSettingForCacheType.get(settings);
-        if (!FeatureFlags.PLUGGABLE_CACHE_SETTING.get(settings) || (storeName == null || storeName.isBlank())) {
+        String storeName = getStoreNameFromSetting(cacheType, settings);
+        if (!pluggableCachingEnabled(cacheType, settings)) {
             // Condition 1: In case feature flag is off, we default to onHeap.
             // Condition 2: In case storeName is not explicitly mentioned, we assume user is looking to use older
             // settings, so we again fallback to onHeap to maintain backward compatibility.
@@ -74,4 +71,19 @@ public NodeCacheStats stats(CommonStatsFlags flags) {
         }
         return new NodeCacheStats(statsMap, flags);
     }
+
+    /**
+     * Check if pluggable caching is on, and if a store type is present for this cache type.
+     */
+    public static boolean pluggableCachingEnabled(CacheType cacheType, Settings settings) {
+        String storeName = getStoreNameFromSetting(cacheType, settings);
+        return FeatureFlags.PLUGGABLE_CACHE_SETTING.get(settings) && storeName != null && !storeName.isBlank();
+    }
+
+    private static String getStoreNameFromSetting(CacheType cacheType, Settings settings) {
+        Setting<String> cacheSettingForCacheType = CacheSettings.CACHE_TYPE_STORE_NAME.getConcreteSettingForNamespace(
+            cacheType.getSettingPrefix()
+        );
+        return cacheSettingForCacheType.get(settings);
+    }
 }
diff --git a/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java b/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java
index 571383a9fce6a..e1039c5d9ee55 100644
--- a/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java
+++ b/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java
@@ -17,6 +17,7 @@
 import org.opensearch.common.cache.RemovalListener;
 import org.opensearch.common.cache.RemovalNotification;
 import org.opensearch.common.cache.RemovalReason;
+import org.opensearch.common.cache.service.CacheService;
 import org.opensearch.common.cache.settings.CacheSettings;
 import org.opensearch.common.cache.stats.CacheStatsHolder;
 import org.opensearch.common.cache.stats.DefaultCacheStatsHolder;
@@ -80,7 +81,7 @@ public OpenSearchOnHeapCache(Builder<K, V> builder) {
         this.weigher = builder.getWeigher();
     }
 
-    // package private for testing
+    // pkg-private for testing
     long getMaximumWeight() {
         return this.maximumWeight;
     }
@@ -192,8 +193,12 @@ public <K, V> ICache<K, V> create(CacheConfig<K, V> config, CacheType cacheType,
             );
             long maxSizeInBytes = ((ByteSizeValue) settingList.get(MAXIMUM_SIZE_IN_BYTES_KEY).get(settings)).getBytes();
 
-            if (config.getMaxSizeInBytes() > 0) { // If this is passed from upstream(like tieredCache), then use this
-                // instead.
+            if (config.getMaxSizeInBytes() > 0) {
+                /*
+                Use the cache config value if present.
+                This can be passed down from the TieredSpilloverCache when creating individual segments,
+                but is not passed in from the IRC if pluggable caching is on.
+                 */
                 builder.setMaximumWeightInBytes(config.getMaxSizeInBytes());
             } else {
                 builder.setMaximumWeightInBytes(maxSizeInBytes);
@@ -204,8 +209,7 @@ public <K, V> ICache<K, V> create(CacheConfig<K, V> config, CacheType cacheType,
                 builder.setNumberOfSegments(-1); // By default it will use 256 segments.
             }
 
-            String storeName = cacheSettingForCacheType.get(settings);
-            if (!FeatureFlags.PLUGGABLE_CACHE_SETTING.get(settings) || (storeName == null || storeName.isBlank())) {
+            if (!CacheService.pluggableCachingEnabled(cacheType, settings)) {
                 // For backward compatibility as the user intent is to use older settings.
                 builder.setMaximumWeightInBytes(config.getMaxSizeInBytes());
                 builder.setExpireAfterAccess(config.getExpireAfterAccess());
diff --git a/server/src/main/java/org/opensearch/common/cache/store/settings/OpenSearchOnHeapCacheSettings.java b/server/src/main/java/org/opensearch/common/cache/store/settings/OpenSearchOnHeapCacheSettings.java
index 5a2964ad011bf..8ba356f9e0597 100644
--- a/server/src/main/java/org/opensearch/common/cache/store/settings/OpenSearchOnHeapCacheSettings.java
+++ b/server/src/main/java/org/opensearch/common/cache/store/settings/OpenSearchOnHeapCacheSettings.java
@@ -26,6 +26,7 @@ public class OpenSearchOnHeapCacheSettings {
 
     /**
      * Setting to define maximum size for the cache as a percentage of heap memory available.
+     * If this cache is used as a tier in a TieredSpilloverCache, this setting is ignored.
      *
      * Setting pattern: {cache_type}.opensearch_onheap.size
      */
diff --git a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java
index 3d158cb60a208..4f42cd8fe8672 100644
--- a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java
+++ b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java
@@ -124,10 +124,18 @@ public final class IndicesRequestCache implements RemovalListener<ICacheKey<Indi
         Property.Dynamic,
         Property.IndexScope
     );
+
+    /**
+     * If pluggable caching is off, or pluggable caching is on but a store name isn't specified, this setting determines the cache size.
+     * Otherwise, the implementation-specific size setting like indices.requests.cache.opensearch_onheap.size is used instead.
+     *
+     * Deprecated; once pluggable caching is no longer behind a feature flag (likely in 2.19), this setting will no longer have any effect.
+     */
     public static final Setting<ByteSizeValue> INDICES_CACHE_QUERY_SIZE = Setting.memorySizeSetting(
         "indices.requests.cache.size",
         "1%",
-        Property.NodeScope
+        Property.NodeScope,
+        Property.Deprecated
     );
     public static final Setting<TimeValue> INDICES_CACHE_QUERY_EXPIRE = Setting.positiveTimeSetting(
         "indices.requests.cache.expire",
@@ -166,7 +174,6 @@ public final class IndicesRequestCache implements RemovalListener<ICacheKey<Indi
     private final static long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(Key.class);
 
     private final ConcurrentMap<CleanupKey, Boolean> registeredClosedListeners = ConcurrentCollections.newConcurrentMap();
-    private final ByteSizeValue size;
     private final TimeValue expire;
     private final ICache<Key, BytesReference> cache;
     private final ClusterService clusterService;
@@ -187,10 +194,7 @@ public final class IndicesRequestCache implements RemovalListener<ICacheKey<Indi
         ClusterService clusterService,
         NodeEnvironment nodeEnvironment
     ) {
-        this.size = INDICES_CACHE_QUERY_SIZE.get(settings);
         this.expire = INDICES_CACHE_QUERY_EXPIRE.exists(settings) ? INDICES_CACHE_QUERY_EXPIRE.get(settings) : null;
-        long sizeInBytes = size.getBytes();
-        ToLongBiFunction<ICacheKey<Key>, BytesReference> weigher = (k, v) -> k.ramBytesUsed(k.key.ramBytesUsed()) + v.ramBytesUsed();
         this.cacheCleanupManager = new IndicesRequestCacheCleanupManager(
             threadPool,
             INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING.get(settings),
@@ -200,30 +204,42 @@ public final class IndicesRequestCache implements RemovalListener<ICacheKey<Indi
         this.clusterService = clusterService;
         this.clusterService.getClusterSettings()
             .addSettingsUpdateConsumer(INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING, this::setStalenessThreshold);
-        this.cache = cacheService.createCache(
-            new CacheConfig.Builder<Key, BytesReference>().setSettings(settings)
-                .setWeigher(weigher)
-                .setValueType(BytesReference.class)
-                .setKeyType(Key.class)
-                .setRemovalListener(this)
-                .setMaxSizeInBytes(sizeInBytes) // for backward compatibility
-                .setExpireAfterAccess(expire) // for backward compatibility
-                .setDimensionNames(List.of(INDEX_DIMENSION_NAME, SHARD_ID_DIMENSION_NAME))
-                .setCachedResultParser((bytesReference) -> {
-                    try {
-                        return CachedQueryResult.getPolicyValues(bytesReference);
-                    } catch (IOException e) {
-                        // Set took time to -1, which will always be rejected by the policy.
-                        return new CachedQueryResult.PolicyValues(-1);
-                    }
-                })
-                .setKeySerializer(new IRCKeyWriteableSerializer())
-                .setValueSerializer(new BytesReferenceSerializer())
-                .setClusterSettings(clusterService.getClusterSettings())
-                .setStoragePath(nodeEnvironment.nodePaths()[0].path.toString() + "/request_cache")
-                .build(),
-            CacheType.INDICES_REQUEST_CACHE
-        );
+
+        CacheConfig<Key, BytesReference> config = getCacheConfig(settings, nodeEnvironment);
+        this.cache = cacheService.createCache(config, CacheType.INDICES_REQUEST_CACHE);
+    }
+
+    // pkg-private for testing
+    CacheConfig<Key, BytesReference> getCacheConfig(Settings settings, NodeEnvironment nodeEnvironment) {
+        long sizeInBytes = INDICES_CACHE_QUERY_SIZE.get(settings).getBytes();
+        ToLongBiFunction<ICacheKey<Key>, BytesReference> weigher = (k, v) -> k.ramBytesUsed(k.key.ramBytesUsed()) + v.ramBytesUsed();
+        CacheConfig.Builder<Key, BytesReference> configBuilder = new CacheConfig.Builder<Key, BytesReference>().setSettings(settings)
+            .setWeigher(weigher)
+            .setValueType(BytesReference.class)
+            .setKeyType(Key.class)
+            .setRemovalListener(this)
+            .setExpireAfterAccess(expire) // for backward compatibility
+            .setDimensionNames(List.of(INDEX_DIMENSION_NAME, SHARD_ID_DIMENSION_NAME))
+            .setCachedResultParser((bytesReference) -> {
+                try {
+                    return CachedQueryResult.getPolicyValues(bytesReference);
+                } catch (IOException e) {
+                    // Set took time to -1, which will always be rejected by the policy.
+                    return new CachedQueryResult.PolicyValues(-1);
+                }
+            })
+            .setKeySerializer(new IRCKeyWriteableSerializer())
+            .setValueSerializer(new BytesReferenceSerializer())
+            .setClusterSettings(clusterService.getClusterSettings())
+            .setStoragePath(nodeEnvironment.nodePaths()[0].path.toString() + "/request_cache");
+
+        if (!CacheService.pluggableCachingEnabled(CacheType.INDICES_REQUEST_CACHE, settings)) {
+            // If pluggable caching is not enabled, use the max size based on the IRC setting into the config.
+            // If pluggable caching is enabled, cache implementations instead determine their own sizes based on their own implementation
+            // size settings.
+            configBuilder.setMaxSizeInBytes(sizeInBytes);
+        }
+        return configBuilder.build();
     }
 
     // package private for testing
diff --git a/server/src/test/java/org/opensearch/common/cache/store/OpenSearchOnHeapCacheTests.java b/server/src/test/java/org/opensearch/common/cache/store/OpenSearchOnHeapCacheTests.java
index 45a7b273eb41e..5a989ad8ab777 100644
--- a/server/src/test/java/org/opensearch/common/cache/store/OpenSearchOnHeapCacheTests.java
+++ b/server/src/test/java/org/opensearch/common/cache/store/OpenSearchOnHeapCacheTests.java
@@ -15,6 +15,7 @@
 import org.opensearch.common.cache.LoadAwareCacheLoader;
 import org.opensearch.common.cache.RemovalListener;
 import org.opensearch.common.cache.RemovalNotification;
+import org.opensearch.common.cache.settings.CacheSettings;
 import org.opensearch.common.cache.stats.ImmutableCacheStats;
 import org.opensearch.common.cache.stats.ImmutableCacheStatsHolder;
 import org.opensearch.common.cache.store.config.CacheConfig;
@@ -105,35 +106,69 @@ public void testStatsWithoutPluggableCaches() throws Exception {
         }
     }
 
-    public void testWithCacheConfigSettings() {
-        MockRemovalListener<String, String> listener = new MockRemovalListener<>();
-        int maxKeys = between(10, 50);
-        ICache.Factory onHeapCacheFactory = new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory();
-        Settings settings = Settings.builder()
-            .put(
-                OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE)
-                    .get(MAXIMUM_SIZE_IN_BYTES_KEY)
-                    .getKey(),
-                1000 + "b" // Setting some random value which shouldn't be honored.
-            )
+    public void testWithCacheConfigSizeSettings_WhenPluggableCachingOff() {
+        // The "pluggable caching off" case can happen when the PLUGGABLE_CACHE setting is false, or if the store name is blank.
+        // The cache should get its size from the config, not the setting, in either case.
+        Settings.Builder settingsBuilder = Settings.builder().put(FeatureFlags.PLUGGABLE_CACHE, false);
+        long maxSizeFromSetting = between(1000, 2000);
+        long maxSizeFromConfig = between(3000, 4000);
+        OpenSearchOnHeapCache<String, String> onHeapCache = setupMaxSizeTest(settingsBuilder, maxSizeFromSetting, maxSizeFromConfig, true);
+        assertEquals(maxSizeFromConfig, onHeapCache.getMaximumWeight());
+
+        Settings.Builder storeNameBlankSettingsBuilder = Settings.builder().put(FeatureFlags.PLUGGABLE_CACHE, true);
+        onHeapCache = setupMaxSizeTest(storeNameBlankSettingsBuilder, maxSizeFromSetting, maxSizeFromConfig, true);
+        assertEquals(maxSizeFromConfig, onHeapCache.getMaximumWeight());
+    }
+
+    public void testWithCacheConfigSettings_WhenPluggableCachingOn() {
+        // When pluggable caching is on, the cache should get its size from the config if present, and otherwise should get it from the
+        // setting.
+        Settings.Builder settingsBuilder = Settings.builder()
             .put(FeatureFlags.PLUGGABLE_CACHE, true)
-            .build();
+            .put(
+                CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(),
+                OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME
+            );
+        long maxSizeFromSetting = between(1000, 2000);
+        long maxSizeFromConfig = between(3000, 4000);
+        OpenSearchOnHeapCache<String, String> onHeapCache = setupMaxSizeTest(settingsBuilder, maxSizeFromSetting, maxSizeFromConfig, false);
+        assertEquals(maxSizeFromSetting, onHeapCache.getMaximumWeight());
+
+        onHeapCache = setupMaxSizeTest(settingsBuilder, maxSizeFromSetting, maxSizeFromConfig, true);
+        assertEquals(maxSizeFromConfig, onHeapCache.getMaximumWeight());
+    }
 
-        CacheConfig<String, String> cacheConfig = new CacheConfig.Builder<String, String>().setKeyType(String.class)
+    private OpenSearchOnHeapCache<String, String> setupMaxSizeTest(
+        Settings.Builder settingsBuilder,
+        long maxSizeFromSetting,
+        long maxSizeFromConfig,
+        boolean putSizeInConfig
+    ) {
+        MockRemovalListener<String, String> listener = new MockRemovalListener<>();
+        settingsBuilder.put(
+            OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE)
+                .get(MAXIMUM_SIZE_IN_BYTES_KEY)
+                .getKey(),
+            maxSizeFromSetting + "b"
+        );
+
+        CacheConfig.Builder<String, String> cacheConfigBuilder = new CacheConfig.Builder<String, String>().setKeyType(String.class)
             .setValueType(String.class)
             .setWeigher((k, v) -> keyValueSize)
             .setRemovalListener(listener)
-            .setSettings(settings)
+            .setSettings(settingsBuilder.build())
             .setDimensionNames(dimensionNames)
-            .setMaxSizeInBytes(maxKeys * keyValueSize) // this should get honored
-            .setStatsTrackingEnabled(true)
-            .build();
-        OpenSearchOnHeapCache<String, String> onHeapCache = (OpenSearchOnHeapCache<String, String>) onHeapCacheFactory.create(
-            cacheConfig,
+            .setStatsTrackingEnabled(true);
+        if (putSizeInConfig) {
+            cacheConfigBuilder.setMaxSizeInBytes(maxSizeFromConfig);
+        }
+
+        ICache.Factory onHeapCacheFactory = new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory();
+        return (OpenSearchOnHeapCache<String, String>) onHeapCacheFactory.create(
+            cacheConfigBuilder.build(),
             CacheType.INDICES_REQUEST_CACHE,
             null
         );
-        assertEquals(maxKeys * keyValueSize, onHeapCache.getMaximumWeight());
     }
 
     private void assertZeroStats(ImmutableCacheStatsHolder stats) {
diff --git a/server/src/test/java/org/opensearch/common/settings/MemorySizeSettingsTests.java b/server/src/test/java/org/opensearch/common/settings/MemorySizeSettingsTests.java
index 78782112be844..c90924cfc0fd1 100644
--- a/server/src/test/java/org/opensearch/common/settings/MemorySizeSettingsTests.java
+++ b/server/src/test/java/org/opensearch/common/settings/MemorySizeSettingsTests.java
@@ -81,6 +81,9 @@ public void testIndicesRequestCacheSetting() {
             "indices.requests.cache.size",
             new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.01))
         );
+        assertWarnings(
+            "[indices.requests.cache.size] setting was deprecated in OpenSearch and will be removed in a future release! See the breaking changes documentation for the next major version."
+        );
     }
 
     public void testCircuitBreakerSettings() {
diff --git a/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java b/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java
index 1a3aece74b3e2..e83ca247b6a1d 100644
--- a/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java
+++ b/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java
@@ -53,12 +53,16 @@
 import org.opensearch.cluster.routing.ShardRoutingHelper;
 import org.opensearch.cluster.routing.UnassignedInfo;
 import org.opensearch.common.CheckedSupplier;
+import org.opensearch.common.cache.CacheType;
 import org.opensearch.common.cache.ICacheKey;
 import org.opensearch.common.cache.RemovalNotification;
 import org.opensearch.common.cache.RemovalReason;
 import org.opensearch.common.cache.module.CacheModule;
+import org.opensearch.common.cache.settings.CacheSettings;
 import org.opensearch.common.cache.stats.ImmutableCacheStats;
 import org.opensearch.common.cache.stats.ImmutableCacheStatsHolder;
+import org.opensearch.common.cache.store.OpenSearchOnHeapCache;
+import org.opensearch.common.cache.store.config.CacheConfig;
 import org.opensearch.common.io.stream.BytesStreamOutput;
 import org.opensearch.common.lucene.index.OpenSearchDirectoryReader;
 import org.opensearch.common.settings.Settings;
@@ -852,6 +856,42 @@ public void testAddingToCleanupKeyToCountMapWorksAppropriatelyWithMultipleThread
         assertFalse(concurrentModificationExceptionDetected.get());
     }
 
+    public void testCacheMaxSize_WhenPluggableCachingOff() throws Exception {
+        // If pluggable caching is off, the IRC should put a max size value into the cache config that it uses to create its cache.
+        threadPool = getThreadPool();
+        long cacheSize = 1000;
+        Settings settings = Settings.builder().put(INDICES_CACHE_QUERY_SIZE.getKey(), cacheSize + "b").build();
+        cache = getIndicesRequestCache(settings);
+        CacheConfig<IndicesRequestCache.Key, BytesReference> config;
+        try (NodeEnvironment env = newNodeEnvironment(settings)) {
+            // For the purposes of this test it doesn't matter if the node environment matches the one used in the constructor
+            config = cache.getCacheConfig(settings, env);
+        }
+        assertEquals(cacheSize, (long) config.getMaxSizeInBytes());
+        allowDeprecationWarning();
+    }
+
+    public void testCacheMaxSize_WhenPluggableCachingOn() throws Exception {
+        // If pluggable caching is on, and a store name is present, the IRC should NOT put a max size value into the cache config.
+        threadPool = getThreadPool();
+        Settings settings = Settings.builder()
+            .put(INDICES_CACHE_QUERY_SIZE.getKey(), 1000 + "b")
+            .put(FeatureFlags.PLUGGABLE_CACHE, true)
+            .put(
+                CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(),
+                OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME
+            )
+            .build();
+        cache = getIndicesRequestCache(settings);
+        CacheConfig<IndicesRequestCache.Key, BytesReference> config;
+        try (NodeEnvironment env = newNodeEnvironment(settings)) {
+            // For the purposes of this test it doesn't matter if the node environment matches the one used in the constructor
+            config = cache.getCacheConfig(settings, env);
+        }
+        assertEquals(0, (long) config.getMaxSizeInBytes());
+        allowDeprecationWarning();
+    }
+
     private IndicesRequestCache getIndicesRequestCache(Settings settings) throws IOException {
         IndicesService indicesService = getInstanceFromNode(IndicesService.class);
         try (NodeEnvironment env = newNodeEnvironment(settings)) {
@@ -1095,6 +1135,7 @@ public void testEviction() throws Exception {
         assertEquals(2, cache.count());
         assertEquals(1, indexShard.requestCache().stats().getEvictions());
         IOUtils.close(reader, secondReader, thirdReader, environment);
+        allowDeprecationWarning();
     }
 
     public void testClearAllEntityIdentity() throws Exception {
@@ -1372,6 +1413,7 @@ public void testGetOrComputeConcurrentlyWithMultipleIndices() throws Exception {
         }
         IOUtils.close(cache);
         executorService.shutdownNow();
+        allowDeprecationWarning();
     }
 
     public void testDeleteAndCreateIndexShardOnSameNodeAndVerifyStats() throws Exception {
@@ -1540,6 +1582,12 @@ public static String generateString(int length) {
         return sb.toString();
     }
 
+    private void allowDeprecationWarning() {
+        assertWarnings(
+            "[indices.requests.cache.size] setting was deprecated in OpenSearch and will be removed in a future release! See the breaking changes documentation for the next major version."
+        );
+    }
+
     private class TestBytesReference extends AbstractBytesReference {
 
         int dummyValue;

From abb81120ab4cc9b61e91f5b57a797409a951e3d7 Mon Sep 17 00:00:00 2001
From: Karen X <karenxyr@gmail.com>
Date: Thu, 16 Jan 2025 15:23:29 -0500
Subject: [PATCH 51/61] Fix GRPC AUX_TRANSPORT_PORT and SETTING_GRPC_PORT
 settings and remove lingering HTTP terminology  (#17037)

* [GRPC] Rename AUX_TRANSPORT_PORT and SETTING_GRPC_PORT settings and remove lingering HTTP terminology

Signed-off-by: Karen Xu <karenxyr@gmail.com>

* Update CHANGELOG

Signed-off-by: Karen Xu <karenxyr@gmail.com>

* remove extra space in CHANGELOG

Signed-off-by: Karen Xu <karenxyr@gmail.com>

* Update plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/Netty4GrpcServerTransport.java

Co-authored-by: Andriy Redko <drreta@gmail.com>
Signed-off-by: Karen X <karenxyr@gmail.com>

---------

Signed-off-by: Karen Xu <karenxyr@gmail.com>
Signed-off-by: Karen X <karenxyr@gmail.com>
Co-authored-by: Andriy Redko <drreta@gmail.com>
---
 CHANGELOG.md                                    |  1 +
 .../opensearch/transport/grpc/GrpcPlugin.java   |  4 ++--
 .../grpc/Netty4GrpcServerTransport.java         | 17 +++++++++--------
 .../grpc/Netty4GrpcServerTransportTests.java    |  2 +-
 .../java/org/opensearch/bootstrap/Security.java |  6 +++---
 .../org/opensearch/plugins/NetworkPlugin.java   |  4 ++--
 6 files changed, 18 insertions(+), 16 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 21cdf30867e74..9d9b9e54c3640 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -100,6 +100,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 - Fix Shallow copy snapshot failures on closed index ([#16868](https://github.com/opensearch-project/OpenSearch/pull/16868))
 - Fix multi-value sort for unsigned long ([#16732](https://github.com/opensearch-project/OpenSearch/pull/16732))
 - The `phone-search` analyzer no longer emits the tel/sip prefix, international calling code, extension numbers and unformatted input as a token ([#16993](https://github.com/opensearch-project/OpenSearch/pull/16993))
+- Fix GRPC AUX_TRANSPORT_PORT and SETTING_GRPC_PORT settings and remove lingering HTTP terminology ([#17037](https://github.com/opensearch-project/OpenSearch/pull/17037))
 
 ### Security
 
diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/GrpcPlugin.java b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/GrpcPlugin.java
index 0a464e135350b..7f02983010f98 100644
--- a/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/GrpcPlugin.java
+++ b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/GrpcPlugin.java
@@ -25,7 +25,7 @@
 import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.GRPC_TRANSPORT_SETTING_KEY;
 import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_BIND_HOST;
 import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_HOST;
-import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PORTS;
+import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PORT;
 import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PUBLISH_HOST;
 import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PUBLISH_PORT;
 import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_WORKER_COUNT;
@@ -58,7 +58,7 @@ public Map<String, Supplier<AuxTransport>> getAuxTransports(
     @Override
     public List<Setting<?>> getSettings() {
         return List.of(
-            SETTING_GRPC_PORTS,
+            SETTING_GRPC_PORT,
             SETTING_GRPC_HOST,
             SETTING_GRPC_PUBLISH_HOST,
             SETTING_GRPC_BIND_HOST,
diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/Netty4GrpcServerTransport.java b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/Netty4GrpcServerTransport.java
index 61c0722772b92..1fb6a0bca03ea 100644
--- a/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/Netty4GrpcServerTransport.java
+++ b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/Netty4GrpcServerTransport.java
@@ -63,9 +63,9 @@ public class Netty4GrpcServerTransport extends NetworkPlugin.AuxTransport {
 
     /**
      * Port range on which to bind.
-     * Note this setting is configured through AffixSetting AUX_TRANSPORT_PORTS where the aux transport type matches the GRPC_TRANSPORT_SETTING_KEY.
+     * Note this setting is configured through AffixSetting AUX_TRANSPORT_PORT where the aux transport type matches the GRPC_TRANSPORT_SETTING_KEY.
      */
-    public static final Setting<PortsRange> SETTING_GRPC_PORTS = AUX_TRANSPORT_PORTS.getConcreteSettingForNamespace(
+    public static final Setting<PortsRange> SETTING_GRPC_PORT = AUX_TRANSPORT_PORT.getConcreteSettingForNamespace(
         GRPC_TRANSPORT_SETTING_KEY
     );
 
@@ -134,20 +134,21 @@ public class Netty4GrpcServerTransport extends NetworkPlugin.AuxTransport {
      * @param networkService the bind/publish addresses.
      */
     public Netty4GrpcServerTransport(Settings settings, List<BindableService> services, NetworkService networkService) {
+        logger.debug("Initializing Netty4GrpcServerTransport with settings = {}", settings);
         this.settings = Objects.requireNonNull(settings);
         this.services = Objects.requireNonNull(services);
         this.networkService = Objects.requireNonNull(networkService);
 
-        final List<String> httpBindHost = SETTING_GRPC_BIND_HOST.get(settings);
-        this.bindHosts = (httpBindHost.isEmpty() ? NetworkService.GLOBAL_NETWORK_BIND_HOST_SETTING.get(settings) : httpBindHost).toArray(
+        final List<String> grpcBindHost = SETTING_GRPC_BIND_HOST.get(settings);
+        this.bindHosts = (grpcBindHost.isEmpty() ? NetworkService.GLOBAL_NETWORK_BIND_HOST_SETTING.get(settings) : grpcBindHost).toArray(
             Strings.EMPTY_ARRAY
         );
 
-        final List<String> httpPublishHost = SETTING_GRPC_PUBLISH_HOST.get(settings);
-        this.publishHosts = (httpPublishHost.isEmpty() ? NetworkService.GLOBAL_NETWORK_PUBLISH_HOST_SETTING.get(settings) : httpPublishHost)
+        final List<String> grpcPublishHost = SETTING_GRPC_PUBLISH_HOST.get(settings);
+        this.publishHosts = (grpcPublishHost.isEmpty() ? NetworkService.GLOBAL_NETWORK_PUBLISH_HOST_SETTING.get(settings) : grpcPublishHost)
             .toArray(Strings.EMPTY_ARRAY);
 
-        this.port = SETTING_GRPC_PORTS.get(settings);
+        this.port = SETTING_GRPC_PORT.get(settings);
         this.nettyEventLoopThreads = SETTING_GRPC_WORKER_COUNT.get(settings);
     }
 
@@ -229,7 +230,7 @@ private void bindServer() {
                     + publishInetAddress
                     + "). "
                     + "Please specify a unique port by setting "
-                    + SETTING_GRPC_PORTS.getKey()
+                    + SETTING_GRPC_PORT.getKey()
                     + " or "
                     + SETTING_GRPC_PUBLISH_PORT.getKey()
             );
diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/transport/grpc/Netty4GrpcServerTransportTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/transport/grpc/Netty4GrpcServerTransportTests.java
index ebeff62c2c23c..8cf44eebb293e 100644
--- a/plugins/transport-grpc/src/test/java/org/opensearch/transport/grpc/Netty4GrpcServerTransportTests.java
+++ b/plugins/transport-grpc/src/test/java/org/opensearch/transport/grpc/Netty4GrpcServerTransportTests.java
@@ -44,6 +44,6 @@ public void test() {
     }
 
     private static Settings createSettings() {
-        return Settings.builder().put(Netty4GrpcServerTransport.SETTING_GRPC_PORTS.getKey(), getPortRange()).build();
+        return Settings.builder().put(Netty4GrpcServerTransport.SETTING_GRPC_PORT.getKey(), getPortRange()).build();
     }
 }
diff --git a/server/src/main/java/org/opensearch/bootstrap/Security.java b/server/src/main/java/org/opensearch/bootstrap/Security.java
index 9f1dcbe8fb587..563a026109059 100644
--- a/server/src/main/java/org/opensearch/bootstrap/Security.java
+++ b/server/src/main/java/org/opensearch/bootstrap/Security.java
@@ -74,7 +74,7 @@
 import static org.opensearch.bootstrap.FilePermissionUtils.addDirectoryPath;
 import static org.opensearch.bootstrap.FilePermissionUtils.addSingleFilePath;
 import static org.opensearch.plugins.NetworkPlugin.AuxTransport.AUX_PORT_DEFAULTS;
-import static org.opensearch.plugins.NetworkPlugin.AuxTransport.AUX_TRANSPORT_PORTS;
+import static org.opensearch.plugins.NetworkPlugin.AuxTransport.AUX_TRANSPORT_PORT;
 import static org.opensearch.plugins.NetworkPlugin.AuxTransport.AUX_TRANSPORT_TYPES_SETTING;
 
 /**
@@ -423,7 +423,7 @@ private static void addSocketPermissionForHttp(final Permissions policy, final S
     }
 
     /**
-     * Add dynamic {@link SocketPermission} based on AffixSetting AUX_TRANSPORT_PORTS.
+     * Add dynamic {@link SocketPermission} based on AffixSetting AUX_TRANSPORT_PORT.
      * If an auxiliary transport type is enabled but has no corresponding port range setting fall back to AUX_PORT_DEFAULTS.
      *
      * @param policy the {@link Permissions} instance to apply the dynamic {@link SocketPermission}s to.
@@ -432,7 +432,7 @@ private static void addSocketPermissionForHttp(final Permissions policy, final S
     private static void addSocketPermissionForAux(final Permissions policy, final Settings settings) {
         Set<PortsRange> portsRanges = new HashSet<>();
         for (String auxType : AUX_TRANSPORT_TYPES_SETTING.get(settings)) {
-            Setting<PortsRange> auxTypePortSettings = AUX_TRANSPORT_PORTS.getConcreteSettingForNamespace(auxType);
+            Setting<PortsRange> auxTypePortSettings = AUX_TRANSPORT_PORT.getConcreteSettingForNamespace(auxType);
             if (auxTypePortSettings.exists(settings)) {
                 portsRanges.add(auxTypePortSettings.get(settings));
             } else {
diff --git a/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java b/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java
index 516aa94534f94..4442189373c93 100644
--- a/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java
+++ b/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java
@@ -79,9 +79,9 @@ abstract class AuxTransport extends AbstractLifecycleComponent {
         public static final String AUX_SETTINGS_PREFIX = "aux.transport.";
         public static final String AUX_TRANSPORT_TYPES_KEY = AUX_SETTINGS_PREFIX + "types";
         public static final String AUX_PORT_DEFAULTS = "9400-9500";
-        public static final Setting.AffixSetting<PortsRange> AUX_TRANSPORT_PORTS = affixKeySetting(
+        public static final Setting.AffixSetting<PortsRange> AUX_TRANSPORT_PORT = affixKeySetting(
             AUX_SETTINGS_PREFIX,
-            "ports",
+            "port",
             key -> new Setting<>(key, AUX_PORT_DEFAULTS, PortsRange::new, Setting.Property.NodeScope)
         );
 

From fe1f0d814230413a3589204376d665e360da4b96 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=EC=A1=B0=ED=98=9C=EC=98=A8?=
 <68319395+hye-on@users.noreply.github.com>
Date: Fri, 17 Jan 2025 21:33:46 +0900
Subject: [PATCH 52/61] Fix getTime field name to time in GetStats (#16894)
 (#17009)

* Fix getTime field name to time in GetStats (#16894)

Signed-off-by: hye-on <ain0103@naver.com>

* Update PR number in changelog

Signed-off-by: hye-on <ain0103@naver.com>

* Deprecate getTime field and add time field in GetStats for backward compatibility

Signed-off-by: hye-on <ain0103@naver.com>

* Add forRemoval flag to getTime field for future removal

Signed-off-by: hye-on <ain0103@naver.com>

* Changed to use field instead of humanReadableField for GET_TIME in JSON response

Replaced the use of builder.humanReadableField for the GET_TIME field with builder.field(Fields.GET_TIME, Objects.toString(getTime())). This prevents the duplication of the time_in_millis field.

Signed-off-by: hye-on <ain0103@naver.com>

* Add test to validate getTime and time fields in _stats API response

getTime and time fields are verified to be included in the _stats API response and correctly aligned.

Signed-off-by: hye-on <ain0103@naver.com>

* Fix formatting in GetStats.java

Signed-off-by: hye-on <ain0103@naver.com>

* Rename test file to better reflect test purpose

Signed-off-by: hye-on <ain0103@naver.com>

* Test Add skip version for stats API human filter test under 2.19.99

Signed-off-by: hye-on <ain0103@naver.com>

* Remove unnecessary changelog entries

Signed-off-by: hye-on <ain0103@naver.com>

* Add a line for styling purposes

Signed-off-by: hye-on <ain0103@naver.com>

---------

Signed-off-by: hye-on <ain0103@naver.com>
---
 CHANGELOG.md                                  |  2 ++
 ...include_both_time_and_gettime_in_stats.yml | 36 +++++++++++++++++++
 .../org/opensearch/index/get/GetStats.java    |  9 ++++-
 3 files changed, 46 insertions(+), 1 deletion(-)
 create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/60_include_both_time_and_gettime_in_stats.yml

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 9d9b9e54c3640..c9d7d9a60a3e5 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -31,6 +31,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 - Changes to support IP field in star tree indexing([#16641](https://github.com/opensearch-project/OpenSearch/pull/16641/))
 - Support object fields in star-tree index([#16728](https://github.com/opensearch-project/OpenSearch/pull/16728/))
 - Support searching from doc_value using termQueryCaseInsensitive/termQuery in flat_object/keyword field([#16974](https://github.com/opensearch-project/OpenSearch/pull/16974/))
+- Added a new `time` field to replace the deprecated `getTime` field in `GetStats`. ([#17009](https://github.com/opensearch-project/OpenSearch/pull/17009))
 
 ### Dependencies
 - Bump `com.google.cloud:google-cloud-core-http` from 2.23.0 to 2.47.0 ([#16504](https://github.com/opensearch-project/OpenSearch/pull/16504))
@@ -75,6 +76,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 
 ### Deprecated
 - Performing update operation with default pipeline or final pipeline is deprecated ([#16712](https://github.com/opensearch-project/OpenSearch/pull/16712))
+- Marked `getTime` field as deprecated in favor of the new `time` field. ([#17009](https://github.com/opensearch-project/OpenSearch/pull/17009))
 
 ### Removed
 
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/60_include_both_time_and_gettime_in_stats.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/60_include_both_time_and_gettime_in_stats.yml
new file mode 100644
index 0000000000000..d5e3e7554b400
--- /dev/null
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/60_include_both_time_and_gettime_in_stats.yml
@@ -0,0 +1,36 @@
+---
+setup:
+  - do:
+      indices.create:
+        index: test1
+        body:
+          settings:
+            number_of_shards: 1
+            number_of_replicas: 0
+        wait_for_active_shards: all
+
+  - do:
+      index:
+        index: test1
+        id: 1
+        body: { "foo": "bar" }
+
+  - do:
+      indices.refresh:
+        index: test1
+
+---
+"Test _stats API includes both time and getTime metrics with human filter":
+  - skip:
+      version: " - 2.19.99"
+      reason: "this change is added in 3.0.0"
+
+  - do:
+      indices.stats:
+        metric: [ get ]
+        human: true
+
+  - is_true: _all.primaries.get.time
+  - is_true: _all.primaries.get.getTime
+  - match: { _all.primaries.get.time: "0s" }
+  - match: { _all.primaries.get.getTime: "0s" }
diff --git a/server/src/main/java/org/opensearch/index/get/GetStats.java b/server/src/main/java/org/opensearch/index/get/GetStats.java
index a366014fe228e..55f14294d774b 100644
--- a/server/src/main/java/org/opensearch/index/get/GetStats.java
+++ b/server/src/main/java/org/opensearch/index/get/GetStats.java
@@ -41,6 +41,7 @@
 import org.opensearch.core.xcontent.XContentBuilder;
 
 import java.io.IOException;
+import java.util.Objects;
 
 /**
  * Stats for a search get
@@ -137,6 +138,7 @@ public long current() {
     public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
         builder.startObject(Fields.GET);
         builder.field(Fields.TOTAL, getCount());
+        builder.field(Fields.GET_TIME, Objects.toString(getTime()));
         builder.humanReadableField(Fields.TIME_IN_MILLIS, Fields.TIME, getTime());
         builder.field(Fields.EXISTS_TOTAL, existsCount);
         builder.humanReadableField(Fields.EXISTS_TIME_IN_MILLIS, Fields.EXISTS_TIME, getExistsTime());
@@ -155,7 +157,12 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
     static final class Fields {
         static final String GET = "get";
         static final String TOTAL = "total";
-        static final String TIME = "getTime";
+        /**
+         * Deprecated field name for time. Use {@link #TIME} instead.
+         */
+        @Deprecated(forRemoval = true)
+        static final String GET_TIME = "getTime";
+        static final String TIME = "time";
         static final String TIME_IN_MILLIS = "time_in_millis";
         static final String EXISTS_TOTAL = "exists_total";
         static final String EXISTS_TIME = "exists_time";

From a72e95a810be51bbe883aba9507093deb2f14a12 Mon Sep 17 00:00:00 2001
From: Daniel Widdis <widdis@gmail.com>
Date: Fri, 17 Jan 2025 15:54:55 -0800
Subject: [PATCH 53/61] Add Craig Perkins as OpenSearch Maintainer (#17046)

Signed-off-by: Daniel Widdis <widdis@gmail.com>
---
 MAINTAINERS.md | 1 +
 1 file changed, 1 insertion(+)

diff --git a/MAINTAINERS.md b/MAINTAINERS.md
index 4a8aa9305df74..93821a3da4c71 100644
--- a/MAINTAINERS.md
+++ b/MAINTAINERS.md
@@ -13,6 +13,7 @@ This document contains a list of maintainers in this repo. See [opensearch-proje
 | Ashish Singh             | [ashking94](https://github.com/ashking94)               | Amazon      |
 | Bukhtawar Khan           | [Bukhtawar](https://github.com/Bukhtawar)               | Amazon      |
 | Charlotte Henkle         | [CEHENKLE](https://github.com/CEHENKLE)                 | Amazon      |
+| Craig Perkins            | [cwperks](https://github.com/cwperks)                   | Amazon      |
 | Dan Widdis               | [dbwiddis](https://github.com/dbwiddis)                 | Amazon      |
 | Daniel "dB." Doubrovkine | [dblock](https://github.com/dblock)                     | Amazon      |
 | Gao Binlong              | [gaobinlong](https://github.com/gaobinlong)             | Amazon      |

From 1b73e9825a78aa084786f8a256a58ce7d8ecbe1d Mon Sep 17 00:00:00 2001
From: Daniel Widdis <widdis@gmail.com>
Date: Sat, 18 Jan 2025 19:37:25 -0800
Subject: [PATCH 54/61] Update CODEOWNERS (#17053)

Signed-off-by: Daniel Widdis <widdis@gmail.com>
---
 .github/CODEOWNERS | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 18a310862dfbb..38ce0c3a3f927 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -11,27 +11,27 @@
 #   3. Use the command palette to run the CODEOWNERS: Show owners of current file command, which will display all code owners for the current file.
 
 # Default ownership for all repo files
-* @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jainankitk @kotwanikunal @linuxpi @mch2 @msfroh @nknize @owaiskazi19  @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah
+* @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @cwperks @dblock @dbwiddis @gbbafna @jainankitk @kotwanikunal @linuxpi @mch2 @msfroh @nknize @owaiskazi19  @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah
 
 /modules/lang-painless/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah
 /modules/parent-join/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah
 /modules/transport-netty4/ @peternied
 
-/plugins/identity-shiro/ @peternied
+/plugins/identity-shiro/ @peternied @cwperks
 
-/server/src/internalClusterTest/java/org/opensearch/index/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah
-/server/src/internalClusterTest/java/org/opensearch/search/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah
+/server/src/internalClusterTest/java/org/opensearch/index/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @cwperks @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah
+/server/src/internalClusterTest/java/org/opensearch/search/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @cwperks @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah
 
 /server/src/main/java/org/opensearch/extensions/ @peternied
-/server/src/main/java/org/opensearch/identity/ @peternied
-/server/src/main/java/org/opensearch/index/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah
-/server/src/main/java/org/opensearch/search/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah
+/server/src/main/java/org/opensearch/identity/ @peternied @cwperks 
+/server/src/main/java/org/opensearch/index/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @cwperks @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah
+/server/src/main/java/org/opensearch/search/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @cwperks @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah
 /server/src/main/java/org/opensearch/threadpool/ @jed326 @peternied
 /server/src/main/java/org/opensearch/transport/ @peternied
 
-/server/src/test/java/org/opensearch/index/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah
-/server/src/test/java/org/opensearch/search/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah
+/server/src/test/java/org/opensearch/index/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @cwperks @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah
+/server/src/test/java/org/opensearch/search/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @cwperks @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah
 
 /.github/ @jed326 @peternied
 
-/MAINTAINERS.md @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gaobinlong @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @peternied @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah
+/MAINTAINERS.md @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @cwperks @dblock @dbwiddis @gaobinlong @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @peternied @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah

From 1b4a817431821050b904184be84c375be5cd4be3 Mon Sep 17 00:00:00 2001
From: Ashish Singh <ssashish@amazon.com>
Date: Mon, 20 Jan 2025 11:11:12 +0530
Subject: [PATCH 55/61] Improve exception handling in S3BlobContainer
 synchronous operations (#17049)

Signed-off-by: Ashish Singh <ssashish@amazon.com>
---
 .../repositories/s3/S3BlobContainer.java      |  23 +++-
 .../s3/S3BlobStoreContainerTests.java         | 110 ++++++++++++++++++
 2 files changed, 129 insertions(+), 4 deletions(-)

diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java
index 8690a5c91680a..d5cf201b171bb 100644
--- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java
+++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java
@@ -99,6 +99,7 @@
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.function.Function;
 import java.util.stream.Collectors;
@@ -373,17 +374,31 @@ public void writeBlobAtomic(String blobName, InputStream inputStream, long blobS
     }
 
     @Override
-    public DeleteResult delete() {
+    public DeleteResult delete() throws IOException {
         PlainActionFuture<DeleteResult> future = new PlainActionFuture<>();
         deleteAsync(future);
-        return future.actionGet();
+        return getFutureValue(future);
     }
 
     @Override
-    public void deleteBlobsIgnoringIfNotExists(List<String> blobNames) {
+    public void deleteBlobsIgnoringIfNotExists(List<String> blobNames) throws IOException {
         PlainActionFuture<Void> future = new PlainActionFuture<>();
         deleteBlobsAsyncIgnoringIfNotExists(blobNames, future);
-        future.actionGet();
+        getFutureValue(future);
+    }
+
+    private <T> T getFutureValue(PlainActionFuture<T> future) throws IOException {
+        try {
+            return future.get();
+        } catch (InterruptedException e) {
+            Thread.currentThread().interrupt();
+            throw new IllegalStateException("Future got interrupted", e);
+        } catch (ExecutionException e) {
+            if (e.getCause() instanceof IOException) {
+                throw (IOException) e.getCause();
+            }
+            throw new RuntimeException(e.getCause());
+        }
     }
 
     @Override
diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java
index 53371cd1529ce..d3725642760dc 100644
--- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java
+++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java
@@ -1947,6 +1947,116 @@ public void onFailure(Exception e) {
         assertEquals(simulatedFailure, exceptionRef.get().getCause());
     }
 
+    public void testDeleteWithInterruptedException() throws Exception {
+        final String bucketName = randomAlphaOfLengthBetween(1, 10);
+        final BlobPath blobPath = new BlobPath();
+        final S3BlobStore blobStore = mock(S3BlobStore.class);
+        when(blobStore.bucket()).thenReturn(bucketName);
+        when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher());
+
+        final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class);
+        final AmazonAsyncS3Reference asyncClientReference = mock(AmazonAsyncS3Reference.class);
+        when(blobStore.asyncClientReference()).thenReturn(asyncClientReference);
+        when(asyncClientReference.get()).thenReturn(AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null));
+
+        // Mock the list operation to block indefinitely
+        final ListObjectsV2Publisher listPublisher = mock(ListObjectsV2Publisher.class);
+        doAnswer(invocation -> {
+            Thread.currentThread().interrupt();
+            return null;
+        }).when(listPublisher).subscribe(ArgumentMatchers.<Subscriber<ListObjectsV2Response>>any());
+
+        when(s3AsyncClient.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listPublisher);
+
+        final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore);
+
+        IllegalStateException e = expectThrows(IllegalStateException.class, blobContainer::delete);
+        assertEquals("Future got interrupted", e.getMessage());
+        assertTrue(Thread.interrupted()); // Clear interrupted state
+    }
+
+    public void testDeleteWithExecutionException() throws Exception {
+        final String bucketName = randomAlphaOfLengthBetween(1, 10);
+        final BlobPath blobPath = new BlobPath();
+        final S3BlobStore blobStore = mock(S3BlobStore.class);
+        when(blobStore.bucket()).thenReturn(bucketName);
+        when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher());
+
+        final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class);
+        final AmazonAsyncS3Reference asyncClientReference = mock(AmazonAsyncS3Reference.class);
+        when(blobStore.asyncClientReference()).thenReturn(asyncClientReference);
+        when(asyncClientReference.get()).thenReturn(AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null));
+
+        RuntimeException simulatedError = new RuntimeException("Simulated error");
+        final ListObjectsV2Publisher listPublisher = mock(ListObjectsV2Publisher.class);
+        doAnswer(invocation -> {
+            Subscriber<? super ListObjectsV2Response> subscriber = invocation.getArgument(0);
+            subscriber.onError(simulatedError);
+            return null;
+        }).when(listPublisher).subscribe(ArgumentMatchers.<Subscriber<ListObjectsV2Response>>any());
+
+        when(s3AsyncClient.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listPublisher);
+
+        final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore);
+
+        IOException e = expectThrows(IOException.class, blobContainer::delete);
+        assertEquals("Failed to list objects for deletion", e.getMessage());
+        assertEquals(simulatedError, e.getCause());
+    }
+
+    public void testDeleteBlobsIgnoringIfNotExistsWithInterruptedException() throws Exception {
+        final String bucketName = randomAlphaOfLengthBetween(1, 10);
+        final BlobPath blobPath = new BlobPath();
+        final S3BlobStore blobStore = mock(S3BlobStore.class);
+        when(blobStore.bucket()).thenReturn(bucketName);
+        when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher());
+        when(blobStore.getBulkDeletesSize()).thenReturn(5);
+
+        final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class);
+        final AmazonAsyncS3Reference asyncClientReference = mock(AmazonAsyncS3Reference.class);
+        when(blobStore.asyncClientReference()).thenReturn(asyncClientReference);
+        when(asyncClientReference.get()).thenReturn(AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null));
+
+        // Mock deleteObjects to block indefinitely
+        when(s3AsyncClient.deleteObjects(any(DeleteObjectsRequest.class))).thenAnswer(invocation -> {
+            Thread.currentThread().interrupt();
+            return null;
+        });
+
+        final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore);
+        List<String> blobNames = Arrays.asList("test1", "test2");
+
+        IllegalStateException e = expectThrows(IllegalStateException.class, () -> blobContainer.deleteBlobsIgnoringIfNotExists(blobNames));
+        assertEquals("Future got interrupted", e.getMessage());
+        assertTrue(Thread.interrupted()); // Clear interrupted state
+    }
+
+    public void testDeleteBlobsIgnoringIfNotExistsWithExecutionException() throws Exception {
+        final String bucketName = randomAlphaOfLengthBetween(1, 10);
+        final BlobPath blobPath = new BlobPath();
+        final S3BlobStore blobStore = mock(S3BlobStore.class);
+        when(blobStore.bucket()).thenReturn(bucketName);
+        when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher());
+        when(blobStore.getBulkDeletesSize()).thenReturn(5);
+
+        final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class);
+        final AmazonAsyncS3Reference asyncClientReference = mock(AmazonAsyncS3Reference.class);
+        when(blobStore.asyncClientReference()).thenReturn(asyncClientReference);
+        when(asyncClientReference.get()).thenReturn(AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null));
+
+        RuntimeException simulatedError = new RuntimeException("Simulated delete error");
+        CompletableFuture<DeleteObjectsResponse> failedFuture = new CompletableFuture<>();
+        failedFuture.completeExceptionally(simulatedError);
+        when(s3AsyncClient.deleteObjects(any(DeleteObjectsRequest.class))).thenReturn(failedFuture);
+
+        final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore);
+        List<String> blobNames = Arrays.asList("test1", "test2");
+
+        IOException e = expectThrows(IOException.class, () -> blobContainer.deleteBlobsIgnoringIfNotExists(blobNames));
+        assertEquals("Failed to delete blobs " + blobNames, e.getMessage());
+        assertEquals(simulatedError, e.getCause().getCause());
+    }
+
     private void mockObjectResponse(S3AsyncClient s3AsyncClient, String bucketName, String blobName, int objectSize) {
 
         final InputStream inputStream = new ByteArrayInputStream(randomByteArrayOfLength(objectSize));

From d7309a959d932e65dd9fd96a340509f60c732dd5 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 20 Jan 2025 11:52:34 -0500
Subject: [PATCH 56/61] Bump com.squareup.okio:okio from 3.9.1 to 3.10.2 in
 /test/fixtures/hdfs-fixture (#17060)

* Bump com.squareup.okio:okio in /test/fixtures/hdfs-fixture

Bumps [com.squareup.okio:okio](https://github.com/square/okio) from 3.9.1 to 3.10.2.
- [Release notes](https://github.com/square/okio/releases)
- [Changelog](https://github.com/square/okio/blob/master/CHANGELOG.md)
- [Commits](https://github.com/square/okio/compare/3.9.1...3.10.2)

---
updated-dependencies:
- dependency-name: com.squareup.okio:okio
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

* Update changelog

Signed-off-by: dependabot[bot] <support@github.com>

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
 CHANGELOG.md                            | 1 +
 test/fixtures/hdfs-fixture/build.gradle | 2 +-
 2 files changed, 2 insertions(+), 1 deletion(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index c9d7d9a60a3e5..139ce50608699 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -65,6 +65,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 - Bump `opentelemetry` from 1.41.0 to 1.46.0 ([#16700](https://github.com/opensearch-project/OpenSearch/pull/16700))
 - Bump `opentelemetry-semconv` from 1.27.0-alpha to 1.29.0-alpha ([#16700](https://github.com/opensearch-project/OpenSearch/pull/16700))
 - Bump `com.google.re2j:re2j` from 1.7 to 1.8 ([#17012](https://github.com/opensearch-project/OpenSearch/pull/17012))
+- Bump `com.squareup.okio:okio` from 3.9.1 to 3.10.2 ([#17060](https://github.com/opensearch-project/OpenSearch/pull/17060))
 
 ### Changed
 - Indexed IP field supports `terms_query` with more than 1025 IP masks [#16391](https://github.com/opensearch-project/OpenSearch/pull/16391)
diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle
index fdbd3ed0d3571..49a728586b2fa 100644
--- a/test/fixtures/hdfs-fixture/build.gradle
+++ b/test/fixtures/hdfs-fixture/build.gradle
@@ -88,6 +88,6 @@ dependencies {
   runtimeOnly("com.squareup.okhttp3:okhttp:4.12.0") {
     exclude group: "com.squareup.okio"
   }
-  runtimeOnly "com.squareup.okio:okio:3.9.1"
+  runtimeOnly "com.squareup.okio:okio:3.10.2"
   runtimeOnly "org.xerial.snappy:snappy-java:1.1.10.7"
 }

From 6e3d710ec9480c42073d9aabb30c404e606176d0 Mon Sep 17 00:00:00 2001
From: Andriy Redko <andriy.redko@aiven.io>
Date: Mon, 20 Jan 2025 21:50:23 -0500
Subject: [PATCH 57/61] Fix the compiler task status reporting for annotation
 processor (#17063)

Signed-off-by: Andriy Redko <andriy.redko@aiven.io>
---
 .../ApiAnnotationProcessorTests.java          | 211 +++++++++++-------
 .../annotation/processor/CompilerSupport.java |  40 ++--
 2 files changed, 159 insertions(+), 92 deletions(-)

diff --git a/libs/common/src/test/java/org/opensearch/common/annotation/processor/ApiAnnotationProcessorTests.java b/libs/common/src/test/java/org/opensearch/common/annotation/processor/ApiAnnotationProcessorTests.java
index 716dcc3b9015f..944b29c139160 100644
--- a/libs/common/src/test/java/org/opensearch/common/annotation/processor/ApiAnnotationProcessorTests.java
+++ b/libs/common/src/test/java/org/opensearch/common/annotation/processor/ApiAnnotationProcessorTests.java
@@ -8,8 +8,11 @@
 
 package org.opensearch.common.annotation.processor;
 
+import org.opensearch.common.SuppressForbidden;
 import org.opensearch.common.annotation.InternalApi;
 import org.opensearch.test.OpenSearchTestCase;
+import org.junit.Rule;
+import org.junit.rules.TemporaryFolder;
 
 import javax.tools.Diagnostic;
 
@@ -20,10 +23,14 @@
 import static org.hamcrest.Matchers.hasSize;
 import static org.hamcrest.Matchers.instanceOf;
 
+@SuppressForbidden(reason = "TemporaryFolder does not support Path-based APIs")
 @SuppressWarnings("deprecation")
 public class ApiAnnotationProcessorTests extends OpenSearchTestCase implements CompilerSupport {
+    @Rule
+    public final TemporaryFolder folder = new TemporaryFolder();
+
     public void testPublicApiMethodArgumentNotAnnotated() {
-        final CompilerResult result = compile("PublicApiMethodArgumentNotAnnotated.java", "NotAnnotated.java");
+        final CompilerResult result = compile(folder.getRoot().toPath(), "PublicApiMethodArgumentNotAnnotated.java", "NotAnnotated.java");
         assertThat(result, instanceOf(Failure.class));
 
         final Failure failure = (Failure) result;
@@ -44,7 +51,11 @@ public void testPublicApiMethodArgumentNotAnnotated() {
     }
 
     public void testPublicApiMethodArgumentNotAnnotatedGenerics() {
-        final CompilerResult result = compile("PublicApiMethodArgumentNotAnnotatedGenerics.java", "NotAnnotated.java");
+        final CompilerResult result = compile(
+            folder.getRoot().toPath(),
+            "PublicApiMethodArgumentNotAnnotatedGenerics.java",
+            "NotAnnotated.java"
+        );
         assertThat(result, instanceOf(Failure.class));
 
         final Failure failure = (Failure) result;
@@ -65,7 +76,11 @@ public void testPublicApiMethodArgumentNotAnnotatedGenerics() {
     }
 
     public void testPublicApiMethodThrowsNotAnnotated() {
-        final CompilerResult result = compile("PublicApiMethodThrowsNotAnnotated.java", "PublicApiAnnotated.java");
+        final CompilerResult result = compile(
+            folder.getRoot().toPath(),
+            "PublicApiMethodThrowsNotAnnotated.java",
+            "PublicApiAnnotated.java"
+        );
         assertThat(result, instanceOf(Failure.class));
 
         final Failure failure = (Failure) result;
@@ -86,7 +101,7 @@ public void testPublicApiMethodThrowsNotAnnotated() {
     }
 
     public void testPublicApiMethodArgumentNotAnnotatedPackagePrivate() {
-        final CompilerResult result = compile("PublicApiMethodArgumentNotAnnotatedPackagePrivate.java");
+        final CompilerResult result = compile(folder.getRoot().toPath(), "PublicApiMethodArgumentNotAnnotatedPackagePrivate.java");
         assertThat(result, instanceOf(Failure.class));
 
         final Failure failure = (Failure) result;
@@ -120,7 +135,7 @@ public void testPublicApiMethodArgumentNotAnnotatedPackagePrivate() {
     }
 
     public void testPublicApiMethodArgumentAnnotatedPackagePrivate() {
-        final CompilerResult result = compile("PublicApiMethodArgumentAnnotatedPackagePrivate.java");
+        final CompilerResult result = compile(folder.getRoot().toPath(), "PublicApiMethodArgumentAnnotatedPackagePrivate.java");
         assertThat(result, instanceOf(Failure.class));
 
         final Failure failure = (Failure) result;
@@ -141,7 +156,7 @@ public void testPublicApiMethodArgumentAnnotatedPackagePrivate() {
     }
 
     public void testPublicApiWithInternalApiMethod() {
-        final CompilerResult result = compile("PublicApiWithInternalApiMethod.java");
+        final CompilerResult result = compile(folder.getRoot().toPath(), "PublicApiWithInternalApiMethod.java");
         assertThat(result, instanceOf(Failure.class));
 
         final Failure failure = (Failure) result;
@@ -164,40 +179,48 @@ public void testPublicApiWithInternalApiMethod() {
      * The constructor arguments have relaxed semantics at the moment: those could be not annotated or be annotated as {@link InternalApi}
      */
     public void testPublicApiConstructorArgumentNotAnnotated() {
-        final CompilerResult result = compile("PublicApiConstructorArgumentNotAnnotated.java", "NotAnnotated.java");
-        assertThat(result, instanceOf(Failure.class));
+        final CompilerResult result = compile(
+            folder.getRoot().toPath(),
+            "PublicApiConstructorArgumentNotAnnotated.java",
+            "NotAnnotated.java"
+        );
+        assertThat(result, instanceOf(Success.class));
 
-        final Failure failure = (Failure) result;
-        assertThat(failure.diagnotics(), hasSize(2));
+        final Success success = (Success) result;
+        assertThat(success.diagnotics(), hasSize(2));
 
-        assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR))));
+        assertThat(success.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR))));
     }
 
     /**
      * The constructor arguments have relaxed semantics at the moment: those could be not annotated or be annotated as {@link InternalApi}
      */
     public void testPublicApiConstructorArgumentAnnotatedInternalApi() {
-        final CompilerResult result = compile("PublicApiConstructorArgumentAnnotatedInternalApi.java", "InternalApiAnnotated.java");
-        assertThat(result, instanceOf(Failure.class));
+        final CompilerResult result = compile(
+            folder.getRoot().toPath(),
+            "PublicApiConstructorArgumentAnnotatedInternalApi.java",
+            "InternalApiAnnotated.java"
+        );
+        assertThat(result, instanceOf(Success.class));
 
-        final Failure failure = (Failure) result;
-        assertThat(failure.diagnotics(), hasSize(2));
+        final Success success = (Success) result;
+        assertThat(success.diagnotics(), hasSize(2));
 
-        assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR))));
+        assertThat(success.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR))));
     }
 
     public void testPublicApiWithExperimentalApiMethod() {
-        final CompilerResult result = compile("PublicApiWithExperimentalApiMethod.java");
-        assertThat(result, instanceOf(Failure.class));
+        final CompilerResult result = compile(folder.getRoot().toPath(), "PublicApiWithExperimentalApiMethod.java");
+        assertThat(result, instanceOf(Success.class));
 
-        final Failure failure = (Failure) result;
-        assertThat(failure.diagnotics(), hasSize(2));
+        final Success success = (Success) result;
+        assertThat(success.diagnotics(), hasSize(2));
 
-        assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR))));
+        assertThat(success.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR))));
     }
 
     public void testPublicApiMethodReturnNotAnnotated() {
-        final CompilerResult result = compile("PublicApiMethodReturnNotAnnotated.java", "NotAnnotated.java");
+        final CompilerResult result = compile(folder.getRoot().toPath(), "PublicApiMethodReturnNotAnnotated.java", "NotAnnotated.java");
         assertThat(result, instanceOf(Failure.class));
 
         final Failure failure = (Failure) result;
@@ -218,7 +241,11 @@ public void testPublicApiMethodReturnNotAnnotated() {
     }
 
     public void testPublicApiMethodReturnNotAnnotatedGenerics() {
-        final CompilerResult result = compile("PublicApiMethodReturnNotAnnotatedGenerics.java", "NotAnnotated.java");
+        final CompilerResult result = compile(
+            folder.getRoot().toPath(),
+            "PublicApiMethodReturnNotAnnotatedGenerics.java",
+            "NotAnnotated.java"
+        );
         assertThat(result, instanceOf(Failure.class));
 
         final Failure failure = (Failure) result;
@@ -239,7 +266,11 @@ public void testPublicApiMethodReturnNotAnnotatedGenerics() {
     }
 
     public void testPublicApiMethodReturnNotAnnotatedArray() {
-        final CompilerResult result = compile("PublicApiMethodReturnNotAnnotatedArray.java", "NotAnnotated.java");
+        final CompilerResult result = compile(
+            folder.getRoot().toPath(),
+            "PublicApiMethodReturnNotAnnotatedArray.java",
+            "NotAnnotated.java"
+        );
         assertThat(result, instanceOf(Failure.class));
 
         final Failure failure = (Failure) result;
@@ -260,7 +291,11 @@ public void testPublicApiMethodReturnNotAnnotatedArray() {
     }
 
     public void testPublicApiMethodReturnNotAnnotatedBoundedGenerics() {
-        final CompilerResult result = compile("PublicApiMethodReturnNotAnnotatedBoundedGenerics.java", "NotAnnotated.java");
+        final CompilerResult result = compile(
+            folder.getRoot().toPath(),
+            "PublicApiMethodReturnNotAnnotatedBoundedGenerics.java",
+            "NotAnnotated.java"
+        );
         assertThat(result, instanceOf(Failure.class));
 
         final Failure failure = (Failure) result;
@@ -282,6 +317,7 @@ public void testPublicApiMethodReturnNotAnnotatedBoundedGenerics() {
 
     public void testPublicApiMethodReturnNotAnnotatedAnnotation() {
         final CompilerResult result = compile(
+            folder.getRoot().toPath(),
             "PublicApiMethodReturnNotAnnotatedAnnotation.java",
             "PublicApiAnnotated.java",
             "NotAnnotatedAnnotation.java"
@@ -306,57 +342,57 @@ public void testPublicApiMethodReturnNotAnnotatedAnnotation() {
     }
 
     public void testPublicApiMethodReturnNotAnnotatedWildcardGenerics() {
-        final CompilerResult result = compile("PublicApiMethodReturnNotAnnotatedWildcardGenerics.java");
-        assertThat(result, instanceOf(Failure.class));
+        final CompilerResult result = compile(folder.getRoot().toPath(), "PublicApiMethodReturnNotAnnotatedWildcardGenerics.java");
+        assertThat(result, instanceOf(Success.class));
 
-        final Failure failure = (Failure) result;
-        assertThat(failure.diagnotics(), hasSize(2));
+        final Success success = (Success) result;
+        assertThat(success.diagnotics(), hasSize(2));
 
-        assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR))));
+        assertThat(success.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR))));
     }
 
     public void testPublicApiWithPackagePrivateMethod() {
-        final CompilerResult result = compile("PublicApiWithPackagePrivateMethod.java", "NotAnnotated.java");
-        assertThat(result, instanceOf(Failure.class));
+        final CompilerResult result = compile(folder.getRoot().toPath(), "PublicApiWithPackagePrivateMethod.java", "NotAnnotated.java");
+        assertThat(result, instanceOf(Success.class));
 
-        final Failure failure = (Failure) result;
-        assertThat(failure.diagnotics(), hasSize(2));
+        final Success success = (Success) result;
+        assertThat(success.diagnotics(), hasSize(2));
 
-        assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR))));
+        assertThat(success.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR))));
     }
 
     public void testPublicApiMethodReturnSelf() {
-        final CompilerResult result = compile("PublicApiMethodReturnSelf.java");
-        assertThat(result, instanceOf(Failure.class));
+        final CompilerResult result = compile(folder.getRoot().toPath(), "PublicApiMethodReturnSelf.java");
+        assertThat(result, instanceOf(Success.class));
 
-        final Failure failure = (Failure) result;
-        assertThat(failure.diagnotics(), hasSize(2));
+        final Success success = (Success) result;
+        assertThat(success.diagnotics(), hasSize(2));
 
-        assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR))));
+        assertThat(success.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR))));
     }
 
     public void testExperimentalApiMethodReturnSelf() {
-        final CompilerResult result = compile("ExperimentalApiMethodReturnSelf.java");
-        assertThat(result, instanceOf(Failure.class));
+        final CompilerResult result = compile(folder.getRoot().toPath(), "ExperimentalApiMethodReturnSelf.java");
+        assertThat(result, instanceOf(Success.class));
 
-        final Failure failure = (Failure) result;
-        assertThat(failure.diagnotics(), hasSize(2));
+        final Success success = (Success) result;
+        assertThat(success.diagnotics(), hasSize(2));
 
-        assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR))));
+        assertThat(success.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR))));
     }
 
     public void testDeprecatedApiMethodReturnSelf() {
-        final CompilerResult result = compile("DeprecatedApiMethodReturnSelf.java");
-        assertThat(result, instanceOf(Failure.class));
+        final CompilerResult result = compile(folder.getRoot().toPath(), "DeprecatedApiMethodReturnSelf.java");
+        assertThat(result, instanceOf(Success.class));
 
-        final Failure failure = (Failure) result;
-        assertThat(failure.diagnotics(), hasSize(2));
+        final Success success = (Success) result;
+        assertThat(success.diagnotics(), hasSize(2));
 
-        assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR))));
+        assertThat(success.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR))));
     }
 
     public void testPublicApiPackagePrivate() {
-        final CompilerResult result = compile("PublicApiPackagePrivate.java");
+        final CompilerResult result = compile(folder.getRoot().toPath(), "PublicApiPackagePrivate.java");
         assertThat(result, instanceOf(Failure.class));
 
         final Failure failure = (Failure) result;
@@ -376,7 +412,11 @@ public void testPublicApiPackagePrivate() {
     }
 
     public void testPublicApiMethodGenericsArgumentNotAnnotated() {
-        final CompilerResult result = compile("PublicApiMethodGenericsArgumentNotAnnotated.java", "NotAnnotated.java");
+        final CompilerResult result = compile(
+            folder.getRoot().toPath(),
+            "PublicApiMethodGenericsArgumentNotAnnotated.java",
+            "NotAnnotated.java"
+        );
         assertThat(result, instanceOf(Failure.class));
 
         final Failure failure = (Failure) result;
@@ -397,27 +437,35 @@ public void testPublicApiMethodGenericsArgumentNotAnnotated() {
     }
 
     public void testPublicApiMethodReturnAnnotatedArray() {
-        final CompilerResult result = compile("PublicApiMethodReturnAnnotatedArray.java", "PublicApiAnnotated.java");
-        assertThat(result, instanceOf(Failure.class));
+        final CompilerResult result = compile(
+            folder.getRoot().toPath(),
+            "PublicApiMethodReturnAnnotatedArray.java",
+            "PublicApiAnnotated.java"
+        );
+        assertThat(result, instanceOf(Success.class));
 
-        final Failure failure = (Failure) result;
-        assertThat(failure.diagnotics(), hasSize(2));
+        final Success success = (Success) result;
+        assertThat(success.diagnotics(), hasSize(2));
 
-        assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR))));
+        assertThat(success.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR))));
     }
 
     public void testPublicApiMethodGenericsArgumentAnnotated() {
-        final CompilerResult result = compile("PublicApiMethodGenericsArgumentAnnotated.java", "PublicApiAnnotated.java");
-        assertThat(result, instanceOf(Failure.class));
+        final CompilerResult result = compile(
+            folder.getRoot().toPath(),
+            "PublicApiMethodGenericsArgumentAnnotated.java",
+            "PublicApiAnnotated.java"
+        );
+        assertThat(result, instanceOf(Success.class));
 
-        final Failure failure = (Failure) result;
-        assertThat(failure.diagnotics(), hasSize(2));
+        final Success success = (Success) result;
+        assertThat(success.diagnotics(), hasSize(2));
 
-        assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR))));
+        assertThat(success.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR))));
     }
 
     public void testPublicApiAnnotatedNotOpensearch() {
-        final CompilerResult result = compileWithPackage("org.acme", "PublicApiAnnotated.java");
+        final CompilerResult result = compileWithPackage(folder.getRoot().toPath(), "org.acme", "PublicApiAnnotated.java");
         assertThat(result, instanceOf(Failure.class));
 
         final Failure failure = (Failure) result;
@@ -438,6 +486,7 @@ public void testPublicApiAnnotatedNotOpensearch() {
 
     public void testPublicApiMethodReturnAnnotatedGenerics() {
         final CompilerResult result = compile(
+            folder.getRoot().toPath(),
             "PublicApiMethodReturnAnnotatedGenerics.java",
             "PublicApiAnnotated.java",
             "NotAnnotatedAnnotation.java"
@@ -465,30 +514,34 @@ public void testPublicApiMethodReturnAnnotatedGenerics() {
      * The type could expose protected inner types which are still considered to be a public API when used
      */
     public void testPublicApiWithProtectedInterface() {
-        final CompilerResult result = compile("PublicApiWithProtectedInterface.java");
-        assertThat(result, instanceOf(Failure.class));
+        final CompilerResult result = compile(folder.getRoot().toPath(), "PublicApiWithProtectedInterface.java");
+        assertThat(result, instanceOf(Success.class));
 
-        final Failure failure = (Failure) result;
-        assertThat(failure.diagnotics(), hasSize(2));
+        final Success success = (Success) result;
+        assertThat(success.diagnotics(), hasSize(2));
 
-        assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR))));
+        assertThat(success.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR))));
     }
 
     /**
      * The constructor arguments have relaxed semantics at the moment: those could be not annotated or be annotated as {@link InternalApi}
      */
     public void testPublicApiConstructorAnnotatedInternalApi() {
-        final CompilerResult result = compile("PublicApiConstructorAnnotatedInternalApi.java", "NotAnnotated.java");
-        assertThat(result, instanceOf(Failure.class));
+        final CompilerResult result = compile(
+            folder.getRoot().toPath(),
+            "PublicApiConstructorAnnotatedInternalApi.java",
+            "NotAnnotated.java"
+        );
+        assertThat(result, instanceOf(Success.class));
 
-        final Failure failure = (Failure) result;
-        assertThat(failure.diagnotics(), hasSize(2));
+        final Success success = (Success) result;
+        assertThat(success.diagnotics(), hasSize(2));
 
-        assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR))));
+        assertThat(success.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR))));
     }
 
     public void testPublicApiUnparseableVersion() {
-        final CompilerResult result = compile("PublicApiAnnotatedUnparseable.java");
+        final CompilerResult result = compile(folder.getRoot().toPath(), "PublicApiAnnotatedUnparseable.java");
         assertThat(result, instanceOf(Failure.class));
 
         final Failure failure = (Failure) result;
@@ -508,13 +561,13 @@ public void testPublicApiUnparseableVersion() {
     }
 
     public void testPublicApiWithDeprecatedApiMethod() {
-        final CompilerResult result = compile("PublicApiWithDeprecatedApiMethod.java");
-        assertThat(result, instanceOf(Failure.class));
+        final CompilerResult result = compile(folder.getRoot().toPath(), "PublicApiWithDeprecatedApiMethod.java");
+        assertThat(result, instanceOf(Success.class));
 
-        final Failure failure = (Failure) result;
-        assertThat(failure.diagnotics(), hasSize(2));
+        final Success success = (Success) result;
+        assertThat(success.diagnotics(), hasSize(2));
 
-        assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR))));
+        assertThat(success.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR))));
     }
 
 }
diff --git a/libs/common/src/test/java/org/opensearch/common/annotation/processor/CompilerSupport.java b/libs/common/src/test/java/org/opensearch/common/annotation/processor/CompilerSupport.java
index c8fdb3333a714..e6bde87ec9348 100644
--- a/libs/common/src/test/java/org/opensearch/common/annotation/processor/CompilerSupport.java
+++ b/libs/common/src/test/java/org/opensearch/common/annotation/processor/CompilerSupport.java
@@ -29,6 +29,7 @@
 import java.net.URI;
 import java.net.URL;
 import java.nio.charset.StandardCharsets;
+import java.nio.file.Path;
 import java.security.AccessController;
 import java.security.PrivilegedAction;
 import java.util.Arrays;
@@ -39,12 +40,12 @@
 import java.util.stream.Stream;
 
 interface CompilerSupport {
-    default CompilerResult compile(String name, String... names) {
-        return compileWithPackage(ApiAnnotationProcessorTests.class.getPackageName(), name, names);
+    default CompilerResult compile(Path outputDirectory, String name, String... names) {
+        return compileWithPackage(outputDirectory, ApiAnnotationProcessorTests.class.getPackageName(), name, names);
     }
 
     @SuppressWarnings("removal")
-    default CompilerResult compileWithPackage(String pck, String name, String... names) {
+    default CompilerResult compileWithPackage(Path outputDirectory, String pck, String name, String... names) {
         final JavaCompiler compiler = ToolProvider.getSystemJavaCompiler();
         final DiagnosticCollector<JavaFileObject> collector = new DiagnosticCollector<>();
 
@@ -54,11 +55,18 @@ default CompilerResult compileWithPackage(String pck, String name, String... nam
                 .map(f -> asSource(pck, f))
                 .collect(Collectors.toList());
 
-            final CompilationTask task = compiler.getTask(out, fileManager, collector, null, null, files);
+            final CompilationTask task = compiler.getTask(
+                out,
+                fileManager,
+                collector,
+                List.of("-d", outputDirectory.toString()),
+                null,
+                files
+            );
             task.setProcessors(Collections.singleton(new ApiAnnotationProcessor()));
 
             if (AccessController.doPrivileged((PrivilegedAction<Boolean>) () -> task.call())) {
-                return new Success();
+                return new Success(collector.getDiagnostics());
             } else {
                 return new Failure(collector.getDiagnostics());
             }
@@ -81,16 +89,10 @@ public CharSequence getCharContent(boolean ignoreEncodingErrors) throws IOExcept
         };
     }
 
-    class CompilerResult {}
-
-    class Success extends CompilerResult {
-
-    }
-
-    class Failure extends CompilerResult {
+    class CompilerResult {
         private final List<Diagnostic<? extends JavaFileObject>> diagnotics;
 
-        Failure(List<Diagnostic<? extends JavaFileObject>> diagnotics) {
+        CompilerResult(List<Diagnostic<? extends JavaFileObject>> diagnotics) {
             this.diagnotics = diagnotics;
         }
 
@@ -99,6 +101,18 @@ List<Diagnostic<? extends JavaFileObject>> diagnotics() {
         }
     }
 
+    class Success extends CompilerResult {
+        Success(List<Diagnostic<? extends JavaFileObject>> diagnotics) {
+            super(diagnotics);
+        }
+    }
+
+    class Failure extends CompilerResult {
+        Failure(List<Diagnostic<? extends JavaFileObject>> diagnotics) {
+            super(diagnotics);
+        }
+    }
+
     class HasDiagnostic extends TypeSafeMatcher<Diagnostic<? extends JavaFileObject>> {
         private final Diagnostic.Kind kind;
         private final Matcher<String> matcher;

From 699a88085a569078f4ed501141a045f589afdf8d Mon Sep 17 00:00:00 2001
From: Meet <105229321+meet-v25@users.noreply.github.com>
Date: Tue, 21 Jan 2025 15:38:42 +0530
Subject: [PATCH 58/61] [BugFix] Hide stracktrace in response while translog
 transfer upload failure (#16891)

---------

Signed-off-by: meetvm <meetvm@amazon.com>
Co-authored-by: meetvm <meetvm@amazon.com>
---
 .../transfer/TranslogTransferManager.java     |  3 +-
 .../TranslogTransferManagerTests.java         | 74 +++++++++++++++++++
 2 files changed, 76 insertions(+), 1 deletion(-)

diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java
index 924669d0e46a9..1e621d6cb7688 100644
--- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java
+++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java
@@ -206,7 +206,8 @@ public boolean transferSnapshot(TransferSnapshot transferSnapshot, TranslogTrans
         } catch (Exception ex) {
             logger.error(() -> new ParameterizedMessage("Transfer failed for snapshot {}", transferSnapshot), ex);
             captureStatsOnUploadFailure();
-            translogTransferListener.onUploadFailed(transferSnapshot, ex);
+            Exception exWithoutSuppressed = new TranslogUploadFailedException(ex.getMessage());
+            translogTransferListener.onUploadFailed(transferSnapshot, exWithoutSuppressed);
             return false;
         }
     }
diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java
index ed0d6b7d50706..77dfd5b27581d 100644
--- a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java
+++ b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java
@@ -206,6 +206,80 @@ public void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) {
         assertEquals(4, fileTransferTracker.allUploaded().size());
     }
 
+    public void testTransferSnapshotOnFileTransferUploadFail() throws Exception {
+        AtomicInteger fileTransferSucceeded = new AtomicInteger();
+        AtomicInteger fileTransferFailed = new AtomicInteger();
+        AtomicInteger translogTransferSucceeded = new AtomicInteger();
+        AtomicInteger translogTransferFailed = new AtomicInteger();
+
+        doAnswer(invocationOnMock -> {
+            ActionListener<TransferFileSnapshot> listener = (ActionListener<TransferFileSnapshot>) invocationOnMock.getArguments()[2];
+            Set<TransferFileSnapshot> transferFileSnapshots = (Set<TransferFileSnapshot>) invocationOnMock.getArguments()[0];
+
+            TransferFileSnapshot actualFileSnapshot = transferFileSnapshots.iterator().next();
+            FileTransferException testException = new FileTransferException(
+                actualFileSnapshot,
+                new RuntimeException("FileTransferUploadNeedsToFail-Exception")
+            );
+
+            listener.onFailure(testException);
+            transferFileSnapshots.stream().skip(1).forEach(listener::onResponse);
+            return null;
+        }).when(transferService).uploadBlobs(anySet(), anyMap(), any(ActionListener.class), any(WritePriority.class));
+
+        FileTransferTracker fileTransferTracker = new FileTransferTracker(
+            new ShardId("index", "indexUUid", 0),
+            remoteTranslogTransferTracker
+        ) {
+            @Override
+            public void onSuccess(TransferFileSnapshot fileSnapshot) {
+                fileTransferSucceeded.incrementAndGet();
+                super.onSuccess(fileSnapshot);
+            }
+
+            @Override
+            public void onFailure(TransferFileSnapshot fileSnapshot, Exception e) {
+                fileTransferFailed.incrementAndGet();
+                super.onFailure(fileSnapshot, e);
+            }
+        };
+
+        TranslogTransferManager translogTransferManager = new TranslogTransferManager(
+            shardId,
+            transferService,
+            remoteBaseTransferPath.add(TRANSLOG.getName()),
+            remoteBaseTransferPath.add(METADATA.getName()),
+            fileTransferTracker,
+            remoteTranslogTransferTracker,
+            DefaultRemoteStoreSettings.INSTANCE,
+            isTranslogMetadataEnabled
+        );
+
+        SetOnce<Exception> exception = new SetOnce<>();
+        assertFalse(translogTransferManager.transferSnapshot(createTransferSnapshot(), new TranslogTransferListener() {
+            @Override
+            public void onUploadComplete(TransferSnapshot transferSnapshot) {
+                translogTransferSucceeded.incrementAndGet();
+            }
+
+            @Override
+            public void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) {
+                translogTransferFailed.incrementAndGet();
+                exception.set(ex);
+            }
+        }));
+
+        assertNotNull(exception.get());
+        assertTrue(exception.get() instanceof TranslogUploadFailedException);
+        assertEquals("Failed to upload 1 files during transfer", exception.get().getMessage());
+        assertEquals(0, exception.get().getSuppressed().length);
+        assertEquals(3, fileTransferSucceeded.get());
+        assertEquals(1, fileTransferFailed.get());
+        assertEquals(0, translogTransferSucceeded.get());
+        assertEquals(1, translogTransferFailed.get());
+        assertEquals(3, fileTransferTracker.allUploaded().size());
+    }
+
     public void testTransferSnapshotOnUploadTimeout() throws Exception {
         doAnswer(invocationOnMock -> {
             Set<TransferFileSnapshot> transferFileSnapshots = invocationOnMock.getArgument(0);

From 827aa6322ac5f6a618e8d2aafe21ce567a338611 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 21 Jan 2025 07:36:49 -0500
Subject: [PATCH 59/61] Bump com.diffplug.spotless from 6.25.0 to 7.0.2
 (#17058)

* Bump com.diffplug.spotless from 6.25.0 to 7.0.2

Bumps com.diffplug.spotless from 6.25.0 to 7.0.2.

---
updated-dependencies:
- dependency-name: com.diffplug.spotless
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>

* Spotless formatting

Signed-off-by: dependabot[bot] <support@github.com>

* Update changelog

Signed-off-by: dependabot[bot] <support@github.com>

---------

Signed-off-by: dependabot[bot] <support@github.com>
Signed-off-by: Craig Perkins <cwperx@amazon.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: opensearch-trigger-bot[bot] <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com>
Co-authored-by: Craig Perkins <cwperx@amazon.com>
---
 CHANGELOG.md                                  |  1 +
 build.gradle                                  |  2 +-
 .../RemoteRoutingTableServiceTests.java       | 11 +++++-----
 .../builder/StarTreesBuilderTests.java        | 22 ++++++++++++++++---
 .../RemoteSegmentStoreDirectoryTests.java     | 18 ++++++++++-----
 ...toreDirectoryWithPinnedTimestampTests.java |  3 ++-
 .../RemoteStoreReplicationSourceTests.java    |  8 +++++--
 7 files changed, 46 insertions(+), 19 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 139ce50608699..fd64ad56a62b9 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -66,6 +66,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 - Bump `opentelemetry-semconv` from 1.27.0-alpha to 1.29.0-alpha ([#16700](https://github.com/opensearch-project/OpenSearch/pull/16700))
 - Bump `com.google.re2j:re2j` from 1.7 to 1.8 ([#17012](https://github.com/opensearch-project/OpenSearch/pull/17012))
 - Bump `com.squareup.okio:okio` from 3.9.1 to 3.10.2 ([#17060](https://github.com/opensearch-project/OpenSearch/pull/17060))
+- Bump `com.diffplug.spotless` from 6.25.0 to 7.0.2 ([#17058](https://github.com/opensearch-project/OpenSearch/pull/17058))
 
 ### Changed
 - Indexed IP field supports `terms_query` with more than 1025 IP masks [#16391](https://github.com/opensearch-project/OpenSearch/pull/16391)
diff --git a/build.gradle b/build.gradle
index 679f7b9299248..fde086b3bd79e 100644
--- a/build.gradle
+++ b/build.gradle
@@ -54,7 +54,7 @@ plugins {
   id 'lifecycle-base'
   id 'opensearch.docker-support'
   id 'opensearch.global-build-info'
-  id "com.diffplug.spotless" version "6.25.0" apply false
+  id "com.diffplug.spotless" version "7.0.2" apply false
   id "test-report-aggregation"
   id 'jacoco-report-aggregation'
 }
diff --git a/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceTests.java b/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceTests.java
index 63501f878d55d..fa6bcc3372fb7 100644
--- a/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceTests.java
+++ b/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceTests.java
@@ -800,9 +800,7 @@ public void testDeleteStaleIndexRoutingPathsThrowsIOException() throws IOExcepti
         doThrow(new IOException("test exception")).when(blobContainer).deleteBlobsIgnoringIfNotExists(Mockito.anyList());
 
         remoteRoutingTableService.doStart();
-        IOException thrown = assertThrows(IOException.class, () -> {
-            remoteRoutingTableService.deleteStaleIndexRoutingPaths(stalePaths);
-        });
+        IOException thrown = assertThrows(IOException.class, () -> { remoteRoutingTableService.deleteStaleIndexRoutingPaths(stalePaths); });
         assertEquals("test exception", thrown.getMessage());
         verify(blobContainer).deleteBlobsIgnoringIfNotExists(stalePaths);
     }
@@ -823,9 +821,10 @@ public void testDeleteStaleIndexRoutingDiffPathsThrowsIOException() throws IOExc
         doThrow(new IOException("test exception")).when(blobContainer).deleteBlobsIgnoringIfNotExists(Mockito.anyList());
 
         remoteRoutingTableService.doStart();
-        IOException thrown = assertThrows(IOException.class, () -> {
-            remoteRoutingTableService.deleteStaleIndexRoutingDiffPaths(stalePaths);
-        });
+        IOException thrown = assertThrows(
+            IOException.class,
+            () -> { remoteRoutingTableService.deleteStaleIndexRoutingDiffPaths(stalePaths); }
+        );
         assertEquals("test exception", thrown.getMessage());
         verify(blobContainer).deleteBlobsIgnoringIfNotExists(stalePaths);
     }
diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreesBuilderTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreesBuilderTests.java
index 4ab21dbce059f..337f948d6da97 100644
--- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreesBuilderTests.java
+++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreesBuilderTests.java
@@ -104,16 +104,32 @@ public void test_buildWithNoStarTreeFields() throws IOException {
     public void test_getStarTreeBuilder() throws IOException {
         when(mapperService.getCompositeFieldTypes()).thenReturn(Set.of(starTreeFieldType));
         StarTreesBuilder starTreesBuilder = new StarTreesBuilder(segmentWriteState, mapperService, new AtomicInteger());
-        StarTreeBuilder starTreeBuilder = starTreesBuilder.getStarTreeBuilder(metaOut, dataOut, starTreeField, segmentWriteState, mapperService);
+        StarTreeBuilder starTreeBuilder = starTreesBuilder.getStarTreeBuilder(
+            metaOut,
+            dataOut,
+            starTreeField,
+            segmentWriteState,
+            mapperService
+        );
         assertTrue(starTreeBuilder instanceof OnHeapStarTreeBuilder);
     }
 
     public void test_getStarTreeBuilder_illegalArgument() throws IOException {
         when(mapperService.getCompositeFieldTypes()).thenReturn(Set.of(starTreeFieldType));
-        StarTreeFieldConfiguration starTreeFieldConfiguration = new StarTreeFieldConfiguration(1, new HashSet<>(), StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP);
+        StarTreeFieldConfiguration starTreeFieldConfiguration = new StarTreeFieldConfiguration(
+            1,
+            new HashSet<>(),
+            StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP
+        );
         StarTreeField starTreeField = new StarTreeField("star_tree", new ArrayList<>(), new ArrayList<>(), starTreeFieldConfiguration);
         StarTreesBuilder starTreesBuilder = new StarTreesBuilder(segmentWriteState, mapperService, new AtomicInteger());
-        StarTreeBuilder starTreeBuilder = starTreesBuilder.getStarTreeBuilder(metaOut, dataOut, starTreeField, segmentWriteState, mapperService);
+        StarTreeBuilder starTreeBuilder = starTreesBuilder.getStarTreeBuilder(
+            metaOut,
+            dataOut,
+            starTreeField,
+            segmentWriteState,
+            mapperService
+        );
         assertTrue(starTreeBuilder instanceof OffHeapStarTreeBuilder);
         starTreeBuilder.close();
     }
diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java
index df3df81361a12..cd2f4cc1eb079 100644
--- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java
+++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java
@@ -135,9 +135,12 @@ public void testGetPrimaryTermGenerationUuid() {
     }
 
     public void testInitException() throws IOException {
-        when(remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, METADATA_FILES_TO_FETCH)).thenThrow(
-            new IOException("Error")
-        );
+        when(
+            remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder(
+                RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX,
+                METADATA_FILES_TO_FETCH
+            )
+        ).thenThrow(new IOException("Error"));
 
         assertThrows(IOException.class, () -> remoteSegmentStoreDirectory.init());
     }
@@ -155,9 +158,12 @@ public void testInitNoMetadataFile() throws IOException {
     }
 
     public void testInitMultipleMetadataFile() throws IOException {
-        when(remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, METADATA_FILES_TO_FETCH)).thenReturn(
-            List.of(metadataFilename, metadataFilenameDup)
-        );
+        when(
+            remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder(
+                RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX,
+                METADATA_FILES_TO_FETCH
+            )
+        ).thenReturn(List.of(metadataFilename, metadataFilenameDup));
         assertThrows(IllegalStateException.class, () -> remoteSegmentStoreDirectory.init());
     }
 
diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryWithPinnedTimestampTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryWithPinnedTimestampTests.java
index e71023125d4cd..2531462d21d40 100644
--- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryWithPinnedTimestampTests.java
+++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryWithPinnedTimestampTests.java
@@ -187,7 +187,8 @@ public void testDeleteStaleCommitsPinnedTimestampMdFile() throws Exception {
             )
         ).thenReturn(List.of(metadataFilename, metadataFilename2, metadataFilename3));
 
-        long pinnedTimestampMatchingMetadataFilename2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getTimestamp(metadataFilename2) + 10;
+        long pinnedTimestampMatchingMetadataFilename2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getTimestamp(metadataFilename2)
+            + 10;
         String blobName = "snapshot1__" + pinnedTimestampMatchingMetadataFilename2;
         when(blobContainer.listBlobs()).thenReturn(Map.of(blobName, new PlainBlobMetadata(blobName, 100)));
 
diff --git a/server/src/test/java/org/opensearch/indices/replication/RemoteStoreReplicationSourceTests.java b/server/src/test/java/org/opensearch/indices/replication/RemoteStoreReplicationSourceTests.java
index 287962b158c79..b41c8718ec23d 100644
--- a/server/src/test/java/org/opensearch/indices/replication/RemoteStoreReplicationSourceTests.java
+++ b/server/src/test/java/org/opensearch/indices/replication/RemoteStoreReplicationSourceTests.java
@@ -166,8 +166,12 @@ private void buildIndexShardBehavior(IndexShard mockShard, IndexShard indexShard
         when(mockShard.getSegmentInfosSnapshot()).thenReturn(indexShard.getSegmentInfosSnapshot());
         Store remoteStore = mock(Store.class);
         when(mockShard.remoteStore()).thenReturn(remoteStore);
-        RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) indexShard.remoteStore().directory()).getDelegate()).getDelegate();
-        FilterDirectory remoteStoreFilterDirectory = new RemoteStoreRefreshListenerTests.TestFilterDirectory(new RemoteStoreRefreshListenerTests.TestFilterDirectory(remoteSegmentStoreDirectory));
+        RemoteSegmentStoreDirectory remoteSegmentStoreDirectory =
+            (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) indexShard.remoteStore().directory()).getDelegate())
+                .getDelegate();
+        FilterDirectory remoteStoreFilterDirectory = new RemoteStoreRefreshListenerTests.TestFilterDirectory(
+            new RemoteStoreRefreshListenerTests.TestFilterDirectory(remoteSegmentStoreDirectory)
+        );
         when(remoteStore.directory()).thenReturn(remoteStoreFilterDirectory);
     }
 }

From f5c89c16c79871007be2c13f604314bd362a2bf4 Mon Sep 17 00:00:00 2001
From: Andriy Redko <andriy.redko@aiven.io>
Date: Tue, 21 Jan 2025 09:50:31 -0500
Subject: [PATCH 60/61] Revert "Bump com.diffplug.spotless from 6.25.0 to 7.0.2
 (#17058)" (#17074)

This reverts commit 827aa6322ac5f6a618e8d2aafe21ce567a338611.

Signed-off-by: Andriy Redko <andriy.redko@aiven.io>
---
 CHANGELOG.md                                  |  1 -
 build.gradle                                  |  2 +-
 .../RemoteRoutingTableServiceTests.java       | 11 +++++-----
 .../builder/StarTreesBuilderTests.java        | 22 +++----------------
 .../RemoteSegmentStoreDirectoryTests.java     | 18 +++++----------
 ...toreDirectoryWithPinnedTimestampTests.java |  3 +--
 .../RemoteStoreReplicationSourceTests.java    |  8 ++-----
 7 files changed, 19 insertions(+), 46 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index fd64ad56a62b9..139ce50608699 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -66,7 +66,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 - Bump `opentelemetry-semconv` from 1.27.0-alpha to 1.29.0-alpha ([#16700](https://github.com/opensearch-project/OpenSearch/pull/16700))
 - Bump `com.google.re2j:re2j` from 1.7 to 1.8 ([#17012](https://github.com/opensearch-project/OpenSearch/pull/17012))
 - Bump `com.squareup.okio:okio` from 3.9.1 to 3.10.2 ([#17060](https://github.com/opensearch-project/OpenSearch/pull/17060))
-- Bump `com.diffplug.spotless` from 6.25.0 to 7.0.2 ([#17058](https://github.com/opensearch-project/OpenSearch/pull/17058))
 
 ### Changed
 - Indexed IP field supports `terms_query` with more than 1025 IP masks [#16391](https://github.com/opensearch-project/OpenSearch/pull/16391)
diff --git a/build.gradle b/build.gradle
index fde086b3bd79e..679f7b9299248 100644
--- a/build.gradle
+++ b/build.gradle
@@ -54,7 +54,7 @@ plugins {
   id 'lifecycle-base'
   id 'opensearch.docker-support'
   id 'opensearch.global-build-info'
-  id "com.diffplug.spotless" version "7.0.2" apply false
+  id "com.diffplug.spotless" version "6.25.0" apply false
   id "test-report-aggregation"
   id 'jacoco-report-aggregation'
 }
diff --git a/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceTests.java b/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceTests.java
index fa6bcc3372fb7..63501f878d55d 100644
--- a/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceTests.java
+++ b/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceTests.java
@@ -800,7 +800,9 @@ public void testDeleteStaleIndexRoutingPathsThrowsIOException() throws IOExcepti
         doThrow(new IOException("test exception")).when(blobContainer).deleteBlobsIgnoringIfNotExists(Mockito.anyList());
 
         remoteRoutingTableService.doStart();
-        IOException thrown = assertThrows(IOException.class, () -> { remoteRoutingTableService.deleteStaleIndexRoutingPaths(stalePaths); });
+        IOException thrown = assertThrows(IOException.class, () -> {
+            remoteRoutingTableService.deleteStaleIndexRoutingPaths(stalePaths);
+        });
         assertEquals("test exception", thrown.getMessage());
         verify(blobContainer).deleteBlobsIgnoringIfNotExists(stalePaths);
     }
@@ -821,10 +823,9 @@ public void testDeleteStaleIndexRoutingDiffPathsThrowsIOException() throws IOExc
         doThrow(new IOException("test exception")).when(blobContainer).deleteBlobsIgnoringIfNotExists(Mockito.anyList());
 
         remoteRoutingTableService.doStart();
-        IOException thrown = assertThrows(
-            IOException.class,
-            () -> { remoteRoutingTableService.deleteStaleIndexRoutingDiffPaths(stalePaths); }
-        );
+        IOException thrown = assertThrows(IOException.class, () -> {
+            remoteRoutingTableService.deleteStaleIndexRoutingDiffPaths(stalePaths);
+        });
         assertEquals("test exception", thrown.getMessage());
         verify(blobContainer).deleteBlobsIgnoringIfNotExists(stalePaths);
     }
diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreesBuilderTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreesBuilderTests.java
index 337f948d6da97..4ab21dbce059f 100644
--- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreesBuilderTests.java
+++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreesBuilderTests.java
@@ -104,32 +104,16 @@ public void test_buildWithNoStarTreeFields() throws IOException {
     public void test_getStarTreeBuilder() throws IOException {
         when(mapperService.getCompositeFieldTypes()).thenReturn(Set.of(starTreeFieldType));
         StarTreesBuilder starTreesBuilder = new StarTreesBuilder(segmentWriteState, mapperService, new AtomicInteger());
-        StarTreeBuilder starTreeBuilder = starTreesBuilder.getStarTreeBuilder(
-            metaOut,
-            dataOut,
-            starTreeField,
-            segmentWriteState,
-            mapperService
-        );
+        StarTreeBuilder starTreeBuilder = starTreesBuilder.getStarTreeBuilder(metaOut, dataOut, starTreeField, segmentWriteState, mapperService);
         assertTrue(starTreeBuilder instanceof OnHeapStarTreeBuilder);
     }
 
     public void test_getStarTreeBuilder_illegalArgument() throws IOException {
         when(mapperService.getCompositeFieldTypes()).thenReturn(Set.of(starTreeFieldType));
-        StarTreeFieldConfiguration starTreeFieldConfiguration = new StarTreeFieldConfiguration(
-            1,
-            new HashSet<>(),
-            StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP
-        );
+        StarTreeFieldConfiguration starTreeFieldConfiguration = new StarTreeFieldConfiguration(1, new HashSet<>(), StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP);
         StarTreeField starTreeField = new StarTreeField("star_tree", new ArrayList<>(), new ArrayList<>(), starTreeFieldConfiguration);
         StarTreesBuilder starTreesBuilder = new StarTreesBuilder(segmentWriteState, mapperService, new AtomicInteger());
-        StarTreeBuilder starTreeBuilder = starTreesBuilder.getStarTreeBuilder(
-            metaOut,
-            dataOut,
-            starTreeField,
-            segmentWriteState,
-            mapperService
-        );
+        StarTreeBuilder starTreeBuilder = starTreesBuilder.getStarTreeBuilder(metaOut, dataOut, starTreeField, segmentWriteState, mapperService);
         assertTrue(starTreeBuilder instanceof OffHeapStarTreeBuilder);
         starTreeBuilder.close();
     }
diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java
index cd2f4cc1eb079..df3df81361a12 100644
--- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java
+++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java
@@ -135,12 +135,9 @@ public void testGetPrimaryTermGenerationUuid() {
     }
 
     public void testInitException() throws IOException {
-        when(
-            remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder(
-                RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX,
-                METADATA_FILES_TO_FETCH
-            )
-        ).thenThrow(new IOException("Error"));
+        when(remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, METADATA_FILES_TO_FETCH)).thenThrow(
+            new IOException("Error")
+        );
 
         assertThrows(IOException.class, () -> remoteSegmentStoreDirectory.init());
     }
@@ -158,12 +155,9 @@ public void testInitNoMetadataFile() throws IOException {
     }
 
     public void testInitMultipleMetadataFile() throws IOException {
-        when(
-            remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder(
-                RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX,
-                METADATA_FILES_TO_FETCH
-            )
-        ).thenReturn(List.of(metadataFilename, metadataFilenameDup));
+        when(remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, METADATA_FILES_TO_FETCH)).thenReturn(
+            List.of(metadataFilename, metadataFilenameDup)
+        );
         assertThrows(IllegalStateException.class, () -> remoteSegmentStoreDirectory.init());
     }
 
diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryWithPinnedTimestampTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryWithPinnedTimestampTests.java
index 2531462d21d40..e71023125d4cd 100644
--- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryWithPinnedTimestampTests.java
+++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryWithPinnedTimestampTests.java
@@ -187,8 +187,7 @@ public void testDeleteStaleCommitsPinnedTimestampMdFile() throws Exception {
             )
         ).thenReturn(List.of(metadataFilename, metadataFilename2, metadataFilename3));
 
-        long pinnedTimestampMatchingMetadataFilename2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getTimestamp(metadataFilename2)
-            + 10;
+        long pinnedTimestampMatchingMetadataFilename2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getTimestamp(metadataFilename2) + 10;
         String blobName = "snapshot1__" + pinnedTimestampMatchingMetadataFilename2;
         when(blobContainer.listBlobs()).thenReturn(Map.of(blobName, new PlainBlobMetadata(blobName, 100)));
 
diff --git a/server/src/test/java/org/opensearch/indices/replication/RemoteStoreReplicationSourceTests.java b/server/src/test/java/org/opensearch/indices/replication/RemoteStoreReplicationSourceTests.java
index b41c8718ec23d..287962b158c79 100644
--- a/server/src/test/java/org/opensearch/indices/replication/RemoteStoreReplicationSourceTests.java
+++ b/server/src/test/java/org/opensearch/indices/replication/RemoteStoreReplicationSourceTests.java
@@ -166,12 +166,8 @@ private void buildIndexShardBehavior(IndexShard mockShard, IndexShard indexShard
         when(mockShard.getSegmentInfosSnapshot()).thenReturn(indexShard.getSegmentInfosSnapshot());
         Store remoteStore = mock(Store.class);
         when(mockShard.remoteStore()).thenReturn(remoteStore);
-        RemoteSegmentStoreDirectory remoteSegmentStoreDirectory =
-            (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) indexShard.remoteStore().directory()).getDelegate())
-                .getDelegate();
-        FilterDirectory remoteStoreFilterDirectory = new RemoteStoreRefreshListenerTests.TestFilterDirectory(
-            new RemoteStoreRefreshListenerTests.TestFilterDirectory(remoteSegmentStoreDirectory)
-        );
+        RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) indexShard.remoteStore().directory()).getDelegate()).getDelegate();
+        FilterDirectory remoteStoreFilterDirectory = new RemoteStoreRefreshListenerTests.TestFilterDirectory(new RemoteStoreRefreshListenerTests.TestFilterDirectory(remoteSegmentStoreDirectory));
         when(remoteStore.directory()).thenReturn(remoteStoreFilterDirectory);
     }
 }

From 6b1861aa93afe382ff4744081f6af98beff506d5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=EC=A1=B0=ED=98=9C=EC=98=A8?=
 <68319395+hye-on@users.noreply.github.com>
Date: Tue, 21 Jan 2025 23:59:10 +0900
Subject: [PATCH 61/61] Fix interchanged formats of
 total_indexing_buffer_in_bytes and total_indexing_buffer (#17070)

* Fix total_indexing_buffer_in_bytes and total_indexing_buffer formats swapped in nodes API

Signed-off-by: hye-on <ain0103@naver.com>

* Move changelog entry to CHANGELOG-3.0 and update PR reference to #17070

Signed-off-by: hye-on <ain0103@naver.com>

---------

Signed-off-by: hye-on <ain0103@naver.com>
---
 CHANGELOG-3.0.md                              |  2 ++
 .../50_nodes_total_indexing_buffer_format.yml | 21 +++++++++++++++++++
 .../cluster/node/info/NodesInfoResponse.java  |  2 +-
 3 files changed, 24 insertions(+), 1 deletion(-)
 create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/50_nodes_total_indexing_buffer_format.yml

diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md
index fddead96aaf45..8d7ee3c860318 100644
--- a/CHANGELOG-3.0.md
+++ b/CHANGELOG-3.0.md
@@ -50,6 +50,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 - Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827))
 - Fix compression support for h2c protocol ([#4944](https://github.com/opensearch-project/OpenSearch/pull/4944))
 - Don't over-allocate in HeapBufferedAsyncEntityConsumer in order to consume the response ([#9993](https://github.com/opensearch-project/OpenSearch/pull/9993))
+- Fix swapped field formats in nodes API where `total_indexing_buffer_in_bytes` and `total_indexing_buffer` values were reversed ([#17070](https://github.com/opensearch-project/OpenSearch/pull/17070))
+
 
 ### Security
 
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/50_nodes_total_indexing_buffer_format.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/50_nodes_total_indexing_buffer_format.yml
new file mode 100644
index 0000000000000..0f855311fe61e
--- /dev/null
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/50_nodes_total_indexing_buffer_format.yml
@@ -0,0 +1,21 @@
+---
+"Test total indexing buffer fields should have correct formats":
+  - skip:
+      version: " - 2.99.99"
+      reason: "this change is added in 3.0.0"
+      features: [arbitrary_key]
+
+  - do:
+      nodes.info: {}
+  - set:
+      nodes._arbitrary_key_: node_id
+
+  - do:
+      nodes.info:
+        human: true
+        filter_path: "nodes.*.total_indexing_buffer*"
+
+  - gte: { nodes.$node_id.total_indexing_buffer_in_bytes: 0 }
+
+  - match:
+      nodes.$node_id.total_indexing_buffer: /^\d+(\.\d+)?(b|kb|mb|gb|tb|pb)$/
diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoResponse.java
index 7ddd70185e8ad..e2ed5cdf4d06c 100644
--- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoResponse.java
+++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoResponse.java
@@ -98,7 +98,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
             builder.field("build_type", nodeInfo.getBuild().type().displayName());
             builder.field("build_hash", nodeInfo.getBuild().hash());
             if (nodeInfo.getTotalIndexingBuffer() != null) {
-                builder.humanReadableField("total_indexing_buffer", "total_indexing_buffer_in_bytes", nodeInfo.getTotalIndexingBuffer());
+                builder.humanReadableField("total_indexing_buffer_in_bytes", "total_indexing_buffer", nodeInfo.getTotalIndexingBuffer());
             }
 
             builder.startArray("roles");