From b1bc4d96d69e5f1278dfe4aea7e532e1e049c794 Mon Sep 17 00:00:00 2001 From: Vijayan Balasubramanian Date: Fri, 7 Oct 2022 05:27:54 -0700 Subject: [PATCH 01/39] Update GeoGrid base class access modifier to support extensibility (#4572) * Update access modifier to support extensibility Change access modifier from default to protected. This will help to build new geo based aggregation outside OpenSearch, by keeping GeoGrid Classes as base class. Signed-off-by: Vijayan Balasubramanian * Updated CHANGELOG Added PR details to CHANGELOG.md Signed-off-by: Vijayan Balasubramanian * Rename InternalGeoGridBucket to BaseGeoGridBucket Update class names, references and comments. Signed-off-by: Vijayan Balasubramanian * Rename InternalGeoGrid to BaseGeoGrid Signed-off-by: Vijayan Balasubramanian * Make GridBucket classes package-private Signed-off-by: Vijayan Balasubramanian * Remove Internal prefix from Geo Grid classes Signed-off-by: Vijayan Balasubramanian * Update constructor and class access modifier Signed-off-by: Vijayan Balasubramanian * Update access modifier based on usage Made classes package private if it is not used outside the package. Signed-off-by: Vijayan Balasubramanian Signed-off-by: Vijayan Balasubramanian --- CHANGELOG.md | 2 + .../org/opensearch/geo/GeoModulePlugin.java | 11 ++-- ...{InternalGeoGrid.java => BaseGeoGrid.java} | 50 +++++++++---------- ...GridBucket.java => BaseGeoGridBucket.java} | 16 +++--- .../bucket/geogrid/BucketPriorityQueue.java | 4 +- .../bucket/geogrid/CellIdSource.java | 2 +- .../aggregations/bucket/geogrid/GeoGrid.java | 4 +- .../geogrid/GeoGridAggregationBuilder.java | 4 +- .../bucket/geogrid/GeoGridAggregator.java | 22 ++++---- ...ernalGeoHashGrid.java => GeoHashGrid.java} | 22 ++++---- .../GeoHashGridAggregationBuilder.java | 2 +- .../bucket/geogrid/GeoHashGridAggregator.java | 15 +++--- .../geogrid/GeoHashGridAggregatorFactory.java | 4 +- ...ernalGeoTileGrid.java => GeoTileGrid.java} | 22 ++++---- .../GeoTileGridAggregationBuilder.java | 2 +- .../bucket/geogrid/GeoTileGridAggregator.java | 15 +++--- .../geogrid/GeoTileGridAggregatorFactory.java | 4 +- .../geogrid/InternalGeoHashGridBucket.java | 4 +- .../geogrid/InternalGeoTileGridBucket.java | 4 +- .../bucket/geogrid/ParsedGeoGrid.java | 4 +- .../bucket/geogrid/ParsedGeoGridBucket.java | 2 +- .../bucket/geogrid/ParsedGeoHashGrid.java | 2 +- .../geogrid/ParsedGeoHashGridBucket.java | 2 +- .../bucket/geogrid/ParsedGeoTileGrid.java | 2 +- ...idAggregationCompositeAggregatorTests.java | 3 +- .../geogrid/GeoGridAggregatorTestCase.java | 12 ++--- .../bucket/geogrid/GeoGridTestCase.java | 16 +++--- .../bucket/geogrid/GeoHashGridTests.java | 11 ++-- .../bucket/geogrid/GeoTileGridTests.java | 11 ++-- .../geo/tests/common/AggregationBuilders.java | 8 +-- .../common/AggregationInspectionHelper.java | 4 +- 31 files changed, 139 insertions(+), 147 deletions(-) rename modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/{InternalGeoGrid.java => BaseGeoGrid.java} (72%) rename modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/{InternalGeoGridBucket.java => BaseGeoGridBucket.java} (87%) rename modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/{InternalGeoHashGrid.java => GeoHashGrid.java} (70%) rename modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/{InternalGeoTileGrid.java => GeoTileGrid.java} (70%) diff --git a/CHANGELOG.md b/CHANGELOG.md index f8bfaf5abfed3..018475c7e9ae6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,6 +29,8 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Added release notes for 1.3.6 ([#4681](https://github.com/opensearch-project/OpenSearch/pull/4681)) - Added precommit support for MacOS ([#4682](https://github.com/opensearch-project/OpenSearch/pull/4682)) - Recommission API changes for service layer ([#4320](https://github.com/opensearch-project/OpenSearch/pull/4320)) +- Update GeoGrid base class access modifier to support extensibility ([#4572](https://github.com/opensearch-project/OpenSearch/pull/4572)) + ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 - Bumps `reactor-netty-http` from 1.0.18 to 1.0.23 diff --git a/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java b/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java index 77abba7f54677..8ca1d2a0c214f 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java +++ b/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java @@ -35,9 +35,8 @@ import org.opensearch.geo.search.aggregations.bucket.composite.GeoTileGridValuesSourceBuilder; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; -import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregator; -import org.opensearch.geo.search.aggregations.bucket.geogrid.InternalGeoHashGrid; -import org.opensearch.geo.search.aggregations.bucket.geogrid.InternalGeoTileGrid; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoHashGrid; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGrid; import org.opensearch.geo.search.aggregations.metrics.GeoBounds; import org.opensearch.geo.search.aggregations.metrics.GeoBoundsAggregationBuilder; import org.opensearch.geo.search.aggregations.metrics.GeoBoundsGeoShapeAggregator; @@ -78,18 +77,18 @@ public List getAggregations() { GeoHashGridAggregationBuilder.NAME, GeoHashGridAggregationBuilder::new, GeoHashGridAggregationBuilder.PARSER - ).addResultReader(InternalGeoHashGrid::new).setAggregatorRegistrar(GeoHashGridAggregationBuilder::registerAggregators); + ).addResultReader(GeoHashGrid::new).setAggregatorRegistrar(GeoHashGridAggregationBuilder::registerAggregators); final AggregationSpec geoTileGrid = new AggregationSpec( GeoTileGridAggregationBuilder.NAME, GeoTileGridAggregationBuilder::new, GeoTileGridAggregationBuilder.PARSER - ).addResultReader(InternalGeoTileGrid::new).setAggregatorRegistrar(GeoTileGridAggregationBuilder::registerAggregators); + ).addResultReader(GeoTileGrid::new).setAggregatorRegistrar(GeoTileGridAggregationBuilder::registerAggregators); return List.of(geoBounds, geoHashGrid, geoTileGrid); } /** - * Registering the {@link GeoTileGridAggregator} in the {@link CompositeAggregation}. + * Registering the geotile grid in the {@link CompositeAggregation}. * * @return a {@link List} of {@link CompositeAggregationSpec} */ diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BaseGeoGrid.java similarity index 72% rename from modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGrid.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BaseGeoGrid.java index 9dbed7b27307a..b58c19a7186e6 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BaseGeoGrid.java @@ -54,30 +54,30 @@ * All geo-grid hash-encoding in a grid are of the same precision and held internally as a single long * for efficiency's sake. * - * @opensearch.internal + * @opensearch.api */ -public abstract class InternalGeoGrid extends InternalMultiBucketAggregation< - InternalGeoGrid, - InternalGeoGridBucket> implements GeoGrid { +public abstract class BaseGeoGrid extends InternalMultiBucketAggregation + implements + GeoGrid { protected final int requiredSize; - protected final List buckets; + protected final List buckets; - InternalGeoGrid(String name, int requiredSize, List buckets, Map metadata) { + protected BaseGeoGrid(String name, int requiredSize, List buckets, Map metadata) { super(name, metadata); this.requiredSize = requiredSize; this.buckets = buckets; } - abstract Writeable.Reader getBucketReader(); + protected abstract Writeable.Reader getBucketReader(); /** * Read from a stream. */ - public InternalGeoGrid(StreamInput in) throws IOException { + public BaseGeoGrid(StreamInput in) throws IOException { super(in); requiredSize = readSize(in); - buckets = (List) in.readList(getBucketReader()); + buckets = (List) in.readList(getBucketReader()); } @Override @@ -86,24 +86,24 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeList(buckets); } - abstract InternalGeoGrid create(String name, int requiredSize, List buckets, Map metadata); + protected abstract BaseGeoGrid create(String name, int requiredSize, List buckets, Map metadata); @Override - public List getBuckets() { + public List getBuckets() { return unmodifiableList(buckets); } @Override - public InternalGeoGrid reduce(List aggregations, ReduceContext reduceContext) { - LongObjectPagedHashMap> buckets = null; + public BaseGeoGrid reduce(List aggregations, ReduceContext reduceContext) { + LongObjectPagedHashMap> buckets = null; for (InternalAggregation aggregation : aggregations) { - InternalGeoGrid grid = (InternalGeoGrid) aggregation; + BaseGeoGrid grid = (BaseGeoGrid) aggregation; if (buckets == null) { buckets = new LongObjectPagedHashMap<>(grid.buckets.size(), reduceContext.bigArrays()); } for (Object obj : grid.buckets) { - InternalGeoGridBucket bucket = (InternalGeoGridBucket) obj; - List existingBuckets = buckets.get(bucket.hashAsLong()); + BaseGeoGridBucket bucket = (BaseGeoGridBucket) obj; + List existingBuckets = buckets.get(bucket.hashAsLong()); if (existingBuckets == null) { existingBuckets = new ArrayList<>(aggregations.size()); buckets.put(bucket.hashAsLong(), existingBuckets); @@ -113,13 +113,13 @@ public InternalGeoGrid reduce(List aggregations, ReduceCont } final int size = Math.toIntExact(reduceContext.isFinalReduce() == false ? buckets.size() : Math.min(requiredSize, buckets.size())); - BucketPriorityQueue ordered = new BucketPriorityQueue<>(size); - for (LongObjectPagedHashMap.Cursor> cursor : buckets) { - List sameCellBuckets = cursor.value; + BucketPriorityQueue ordered = new BucketPriorityQueue<>(size); + for (LongObjectPagedHashMap.Cursor> cursor : buckets) { + List sameCellBuckets = cursor.value; ordered.insertWithOverflow(reduceBucket(sameCellBuckets, reduceContext)); } buckets.close(); - InternalGeoGridBucket[] list = new InternalGeoGridBucket[ordered.size()]; + BaseGeoGridBucket[] list = new BaseGeoGridBucket[ordered.size()]; for (int i = ordered.size() - 1; i >= 0; i--) { list[i] = ordered.pop(); } @@ -128,11 +128,11 @@ public InternalGeoGrid reduce(List aggregations, ReduceCont } @Override - protected InternalGeoGridBucket reduceBucket(List buckets, ReduceContext context) { + protected BaseGeoGridBucket reduceBucket(List buckets, ReduceContext context) { assert buckets.size() > 0; List aggregationsList = new ArrayList<>(buckets.size()); long docCount = 0; - for (InternalGeoGridBucket bucket : buckets) { + for (BaseGeoGridBucket bucket : buckets) { docCount += bucket.docCount; aggregationsList.add(bucket.aggregations); } @@ -140,12 +140,12 @@ protected InternalGeoGridBucket reduceBucket(List buckets return createBucket(buckets.get(0).hashAsLong, docCount, aggs); } - abstract B createBucket(long hashAsLong, long docCount, InternalAggregations aggregations); + protected abstract B createBucket(long hashAsLong, long docCount, InternalAggregations aggregations); @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { builder.startArray(CommonFields.BUCKETS.getPreferredName()); - for (InternalGeoGridBucket bucket : buckets) { + for (BaseGeoGridBucket bucket : buckets) { bucket.toXContent(builder, params); } builder.endArray(); @@ -168,7 +168,7 @@ public boolean equals(Object obj) { if (obj == null || getClass() != obj.getClass()) return false; if (super.equals(obj) == false) return false; - InternalGeoGrid other = (InternalGeoGrid) obj; + BaseGeoGrid other = (BaseGeoGrid) obj; return Objects.equals(requiredSize, other.requiredSize) && Objects.equals(buckets, other.buckets); } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BaseGeoGridBucket.java similarity index 87% rename from modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BaseGeoGridBucket.java index 93fcdbd098400..f362d2b3d33d6 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BaseGeoGridBucket.java @@ -45,12 +45,12 @@ /** * Base implementation of geogrid aggs * - * @opensearch.internal + * @opensearch.api */ -public abstract class InternalGeoGridBucket extends InternalMultiBucketAggregation.InternalBucket +public abstract class BaseGeoGridBucket extends InternalMultiBucketAggregation.InternalBucket implements GeoGrid.Bucket, - Comparable { + Comparable { protected long hashAsLong; protected long docCount; @@ -58,7 +58,7 @@ public abstract class InternalGeoGridBucket ext long bucketOrd; - public InternalGeoGridBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { + public BaseGeoGridBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { this.docCount = docCount; this.aggregations = aggregations; this.hashAsLong = hashAsLong; @@ -67,7 +67,7 @@ public InternalGeoGridBucket(long hashAsLong, long docCount, InternalAggregation /** * Read from a stream. */ - public InternalGeoGridBucket(StreamInput in) throws IOException { + public BaseGeoGridBucket(StreamInput in) throws IOException { hashAsLong = in.readLong(); docCount = in.readVLong(); aggregations = InternalAggregations.readFrom(in); @@ -80,7 +80,7 @@ public void writeTo(StreamOutput out) throws IOException { aggregations.writeTo(out); } - long hashAsLong() { + public long hashAsLong() { return hashAsLong; } @@ -95,7 +95,7 @@ public Aggregations getAggregations() { } @Override - public int compareTo(InternalGeoGridBucket other) { + public int compareTo(BaseGeoGridBucket other) { if (this.hashAsLong > other.hashAsLong) { return 1; } @@ -119,7 +119,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - InternalGeoGridBucket bucket = (InternalGeoGridBucket) o; + BaseGeoGridBucket bucket = (BaseGeoGridBucket) o; return hashAsLong == bucket.hashAsLong && docCount == bucket.docCount && Objects.equals(aggregations, bucket.aggregations); } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BucketPriorityQueue.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BucketPriorityQueue.java index 70d0552b3e80b..83fcdf4f66424 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BucketPriorityQueue.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BucketPriorityQueue.java @@ -38,14 +38,14 @@ * * @opensearch.internal */ -class BucketPriorityQueue extends PriorityQueue { +class BucketPriorityQueue extends PriorityQueue { BucketPriorityQueue(int size) { super(size); } @Override - protected boolean lessThan(InternalGeoGridBucket o1, InternalGeoGridBucket o2) { + protected boolean lessThan(BaseGeoGridBucket o1, BaseGeoGridBucket o2) { int cmp = Long.compare(o2.getDocCount(), o1.getDocCount()); if (cmp == 0) { cmp = o2.compareTo(o1); diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/CellIdSource.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/CellIdSource.java index d40029e9a762d..89ce288770185 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/CellIdSource.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/CellIdSource.java @@ -43,7 +43,7 @@ * Wrapper class to help convert {@link MultiGeoPointValues} * to numeric long values for bucketing. * - * @opensearch.internal + * @opensearch.api */ public class CellIdSource extends ValuesSource.Numeric { private final ValuesSource.GeoPoint valuesSource; diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGrid.java index 4ae888640efc8..b2fe6e33ef95c 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGrid.java @@ -39,13 +39,13 @@ * A geo-grid aggregation. Defines multiple buckets, each representing a cell in a geo-grid of a specific * precision. * - * @opensearch.internal + * @opensearch.api */ public interface GeoGrid extends MultiBucketsAggregation { /** * A bucket that is associated with a geo-grid cell. The key of the bucket is - * the {@link InternalGeoGridBucket#getKeyAsString()} of the cell + * the {@link BaseGeoGridBucket#getKeyAsString()} of the cell */ interface Bucket extends MultiBucketsAggregation.Bucket {} diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java index 4a904b3aa2b16..0ca2a28844f99 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java @@ -58,9 +58,9 @@ import java.util.function.Function; /** - * Base Aggregation Builder for geohash_grid and geotile_grid aggs + * Base Aggregation Builder for geogrid aggs * - * @opensearch.internal + * @opensearch.api */ public abstract class GeoGridAggregationBuilder extends ValuesSourceAggregationBuilder { /* recognized field names in JSON */ diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregator.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregator.java index 909772c61a960..db07ac8f947e5 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregator.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregator.java @@ -55,16 +55,16 @@ /** * Aggregates data expressed as longs (for efficiency's sake) but formats results as aggregation-specific strings. * - * @opensearch.internal + * @opensearch.api */ -public abstract class GeoGridAggregator extends BucketsAggregator { +public abstract class GeoGridAggregator extends BucketsAggregator { protected final int requiredSize; protected final int shardSize; protected final ValuesSource.Numeric valuesSource; protected final LongKeyedBucketOrds bucketOrds; - GeoGridAggregator( + protected GeoGridAggregator( String name, AggregatorFactories factories, ValuesSource.Numeric valuesSource, @@ -118,23 +118,23 @@ public void collect(int doc, long owningBucketOrd) throws IOException { }; } - abstract T buildAggregation(String name, int requiredSize, List buckets, Map metadata); + protected abstract T buildAggregation(String name, int requiredSize, List buckets, Map metadata); /** * This method is used to return a re-usable instance of the bucket when building * the aggregation. - * @return a new {@link InternalGeoGridBucket} implementation with empty parameters + * @return a new {@link BaseGeoGridBucket} implementation with empty parameters */ - abstract InternalGeoGridBucket newEmptyBucket(); + protected abstract BaseGeoGridBucket newEmptyBucket(); @Override public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { - InternalGeoGridBucket[][] topBucketsPerOrd = new InternalGeoGridBucket[owningBucketOrds.length][]; + BaseGeoGridBucket[][] topBucketsPerOrd = new BaseGeoGridBucket[owningBucketOrds.length][]; for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { int size = (int) Math.min(bucketOrds.bucketsInOrd(owningBucketOrds[ordIdx]), shardSize); - BucketPriorityQueue ordered = new BucketPriorityQueue<>(size); - InternalGeoGridBucket spare = null; + BucketPriorityQueue ordered = new BucketPriorityQueue<>(size); + BaseGeoGridBucket spare = null; LongKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds[ordIdx]); while (ordsEnum.next()) { if (spare == null) { @@ -149,7 +149,7 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I spare = ordered.insertWithOverflow(spare); } - topBucketsPerOrd[ordIdx] = new InternalGeoGridBucket[ordered.size()]; + topBucketsPerOrd[ordIdx] = new BaseGeoGridBucket[ordered.size()]; for (int i = ordered.size() - 1; i >= 0; --i) { topBucketsPerOrd[ordIdx][i] = ordered.pop(); } @@ -163,7 +163,7 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I } @Override - public InternalGeoGrid buildEmptyAggregation() { + public BaseGeoGrid buildEmptyAggregation() { return buildAggregation(name, requiredSize, Collections.emptyList(), metadata()); } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGrid.java similarity index 70% rename from modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGrid.java index ff1247300939a..aa1d5504ad24f 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGrid.java @@ -43,40 +43,40 @@ * All geohashes in a grid are of the same precision and held internally as a single long * for efficiency's sake. * - * @opensearch.internal + * @opensearch.api */ -public class InternalGeoHashGrid extends InternalGeoGrid { +public class GeoHashGrid extends BaseGeoGrid { - InternalGeoHashGrid(String name, int requiredSize, List buckets, Map metadata) { + GeoHashGrid(String name, int requiredSize, List buckets, Map metadata) { super(name, requiredSize, buckets, metadata); } - public InternalGeoHashGrid(StreamInput in) throws IOException { + public GeoHashGrid(StreamInput in) throws IOException { super(in); } @Override - public InternalGeoGrid create(List buckets) { - return new InternalGeoHashGrid(name, requiredSize, buckets, metadata); + public BaseGeoGrid create(List buckets) { + return new GeoHashGrid(name, requiredSize, buckets, metadata); } @Override - public InternalGeoGridBucket createBucket(InternalAggregations aggregations, InternalGeoGridBucket prototype) { + public BaseGeoGridBucket createBucket(InternalAggregations aggregations, BaseGeoGridBucket prototype) { return new InternalGeoHashGridBucket(prototype.hashAsLong, prototype.docCount, aggregations); } @Override - InternalGeoGrid create(String name, int requiredSize, List buckets, Map metadata) { - return new InternalGeoHashGrid(name, requiredSize, buckets, metadata); + protected BaseGeoGrid create(String name, int requiredSize, List buckets, Map metadata) { + return new GeoHashGrid(name, requiredSize, buckets, metadata); } @Override - InternalGeoHashGridBucket createBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { + protected InternalGeoHashGridBucket createBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { return new InternalGeoHashGridBucket(hashAsLong, docCount, aggregations); } @Override - Reader getBucketReader() { + protected Reader getBucketReader() { return InternalGeoHashGridBucket::new; } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java index bbaf9613fb216..760d7d643c0a5 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java @@ -51,7 +51,7 @@ /** * Aggregation Builder for geohash_grid * - * @opensearch.internal + * @opensearch.api */ public class GeoHashGridAggregationBuilder extends GeoGridAggregationBuilder { public static final String NAME = "geohash_grid"; diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java index 6ca7a4d8a9cb8..9ff9fe7d8f9ba 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java @@ -47,9 +47,9 @@ * * @opensearch.internal */ -public class GeoHashGridAggregator extends GeoGridAggregator { +class GeoHashGridAggregator extends GeoGridAggregator { - public GeoHashGridAggregator( + GeoHashGridAggregator( String name, AggregatorFactories factories, ValuesSource.Numeric valuesSource, @@ -64,16 +64,17 @@ public GeoHashGridAggregator( } @Override - InternalGeoHashGrid buildAggregation(String name, int requiredSize, List buckets, Map metadata) { - return new InternalGeoHashGrid(name, requiredSize, buckets, metadata); + protected GeoHashGrid buildAggregation(String name, int requiredSize, List buckets, Map metadata) { + return new GeoHashGrid(name, requiredSize, buckets, metadata); } @Override - public InternalGeoHashGrid buildEmptyAggregation() { - return new InternalGeoHashGrid(name, requiredSize, Collections.emptyList(), metadata()); + public GeoHashGrid buildEmptyAggregation() { + return new GeoHashGrid(name, requiredSize, Collections.emptyList(), metadata()); } - InternalGeoGridBucket newEmptyBucket() { + @Override + protected BaseGeoGridBucket newEmptyBucket() { return new InternalGeoHashGridBucket(0, 0, null); } } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java index 1914c07e831f7..898a7d82a4dec 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java @@ -58,7 +58,7 @@ * * @opensearch.internal */ -public class GeoHashGridAggregatorFactory extends ValuesSourceAggregatorFactory { +class GeoHashGridAggregatorFactory extends ValuesSourceAggregatorFactory { private final int precision; private final int requiredSize; @@ -86,7 +86,7 @@ public class GeoHashGridAggregatorFactory extends ValuesSourceAggregatorFactory @Override protected Aggregator createUnmapped(SearchContext searchContext, Aggregator parent, Map metadata) throws IOException { - final InternalAggregation aggregation = new InternalGeoHashGrid(name, requiredSize, emptyList(), metadata); + final InternalAggregation aggregation = new GeoHashGrid(name, requiredSize, emptyList(), metadata); return new NonCollectingAggregator(name, searchContext, parent, factories, metadata) { @Override public InternalAggregation buildEmptyAggregation() { diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoTileGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGrid.java similarity index 70% rename from modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoTileGrid.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGrid.java index fa544b5893f0c..91c523c80855e 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoTileGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGrid.java @@ -43,40 +43,40 @@ * All geohashes in a grid are of the same precision and held internally as a single long * for efficiency's sake. * - * @opensearch.internal + * @opensearch.api */ -public class InternalGeoTileGrid extends InternalGeoGrid { +public class GeoTileGrid extends BaseGeoGrid { - InternalGeoTileGrid(String name, int requiredSize, List buckets, Map metadata) { + GeoTileGrid(String name, int requiredSize, List buckets, Map metadata) { super(name, requiredSize, buckets, metadata); } - public InternalGeoTileGrid(StreamInput in) throws IOException { + public GeoTileGrid(StreamInput in) throws IOException { super(in); } @Override - public InternalGeoGrid create(List buckets) { - return new InternalGeoTileGrid(name, requiredSize, buckets, metadata); + public BaseGeoGrid create(List buckets) { + return new GeoTileGrid(name, requiredSize, buckets, metadata); } @Override - public InternalGeoGridBucket createBucket(InternalAggregations aggregations, InternalGeoGridBucket prototype) { + public BaseGeoGridBucket createBucket(InternalAggregations aggregations, BaseGeoGridBucket prototype) { return new InternalGeoTileGridBucket(prototype.hashAsLong, prototype.docCount, aggregations); } @Override - InternalGeoGrid create(String name, int requiredSize, List buckets, Map metadata) { - return new InternalGeoTileGrid(name, requiredSize, buckets, metadata); + protected BaseGeoGrid create(String name, int requiredSize, List buckets, Map metadata) { + return new GeoTileGrid(name, requiredSize, buckets, metadata); } @Override - InternalGeoTileGridBucket createBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { + protected InternalGeoTileGridBucket createBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { return new InternalGeoTileGridBucket(hashAsLong, docCount, aggregations); } @Override - Reader getBucketReader() { + protected Reader getBucketReader() { return InternalGeoTileGridBucket::new; } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java index 76ad515f34fe5..0f1f87bdc57fa 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java @@ -51,7 +51,7 @@ /** * Aggregation Builder for geotile_grid agg * - * @opensearch.internal + * @opensearch.api */ public class GeoTileGridAggregationBuilder extends GeoGridAggregationBuilder { public static final String NAME = "geotile_grid"; diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java index a205a9afde41e..8faed4e9cd2d4 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java @@ -48,9 +48,9 @@ * * @opensearch.internal */ -public class GeoTileGridAggregator extends GeoGridAggregator { +class GeoTileGridAggregator extends GeoGridAggregator { - public GeoTileGridAggregator( + GeoTileGridAggregator( String name, AggregatorFactories factories, ValuesSource.Numeric valuesSource, @@ -65,16 +65,17 @@ public GeoTileGridAggregator( } @Override - InternalGeoTileGrid buildAggregation(String name, int requiredSize, List buckets, Map metadata) { - return new InternalGeoTileGrid(name, requiredSize, buckets, metadata); + protected GeoTileGrid buildAggregation(String name, int requiredSize, List buckets, Map metadata) { + return new GeoTileGrid(name, requiredSize, buckets, metadata); } @Override - public InternalGeoTileGrid buildEmptyAggregation() { - return new InternalGeoTileGrid(name, requiredSize, Collections.emptyList(), metadata()); + public GeoTileGrid buildEmptyAggregation() { + return new GeoTileGrid(name, requiredSize, Collections.emptyList(), metadata()); } - InternalGeoGridBucket newEmptyBucket() { + @Override + protected BaseGeoGridBucket newEmptyBucket() { return new InternalGeoTileGridBucket(0, 0, null); } } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java index b830988a3d410..6eb73727ad6c8 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java @@ -57,7 +57,7 @@ * * @opensearch.internal */ -public class GeoTileGridAggregatorFactory extends ValuesSourceAggregatorFactory { +class GeoTileGridAggregatorFactory extends ValuesSourceAggregatorFactory { private final int precision; private final int requiredSize; @@ -85,7 +85,7 @@ public class GeoTileGridAggregatorFactory extends ValuesSourceAggregatorFactory @Override protected Aggregator createUnmapped(SearchContext searchContext, Aggregator parent, Map metadata) throws IOException { - final InternalAggregation aggregation = new InternalGeoTileGrid(name, requiredSize, Collections.emptyList(), metadata); + final InternalAggregation aggregation = new GeoTileGrid(name, requiredSize, Collections.emptyList(), metadata); return new NonCollectingAggregator(name, searchContext, parent, factories, metadata) { @Override public InternalAggregation buildEmptyAggregation() { diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java index 659909e868651..6e7ed8a679681 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java @@ -43,7 +43,7 @@ * * @opensearch.internal */ -public class InternalGeoHashGridBucket extends InternalGeoGridBucket { +class InternalGeoHashGridBucket extends BaseGeoGridBucket { InternalGeoHashGridBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { super(hashAsLong, docCount, aggregations); } @@ -51,7 +51,7 @@ public class InternalGeoHashGridBucket extends InternalGeoGridBucket { +class InternalGeoTileGridBucket extends BaseGeoGridBucket { InternalGeoTileGridBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { super(hashAsLong, docCount, aggregations); } @@ -52,7 +52,7 @@ public class InternalGeoTileGridBucket extends InternalGeoGridBucket implements GeoGrid { @@ -63,7 +63,7 @@ public static ObjectParser createParser( return parser; } - protected void setName(String name) { + public void setName(String name) { super.setName(name); } } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java index 80124cda50b19..cbe3a2ee89dd7 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java @@ -40,7 +40,7 @@ /** * A single geo grid bucket result parsed between nodes * - * @opensearch.internal + * @opensearch.api */ public abstract class ParsedGeoGridBucket extends ParsedMultiBucketAggregation.ParsedBucket implements GeoGrid.Bucket { diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGrid.java index 109524e755c4d..343149f8e19ab 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGrid.java @@ -42,7 +42,7 @@ * * @opensearch.internal */ -public class ParsedGeoHashGrid extends ParsedGeoGrid { +class ParsedGeoHashGrid extends ParsedGeoGrid { private static final ObjectParser PARSER = createParser( ParsedGeoHashGrid::new, diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java index 4e6e454b08324..6704273f45580 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java @@ -41,7 +41,7 @@ * * @opensearch.internal */ -public class ParsedGeoHashGridBucket extends ParsedGeoGridBucket { +class ParsedGeoHashGridBucket extends ParsedGeoGridBucket { @Override public GeoPoint getKey() { diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java index 8734c96a15578..cb64a0e153e87 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java @@ -42,7 +42,7 @@ * * @opensearch.internal */ -public class ParsedGeoTileGrid extends ParsedGeoGrid { +class ParsedGeoTileGrid extends ParsedGeoGrid { private static final ObjectParser PARSER = createParser( ParsedGeoTileGrid::new, diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridAggregationCompositeAggregatorTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridAggregationCompositeAggregatorTests.java index 3c7c292f9d193..bc7fde8d66d0a 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridAggregationCompositeAggregatorTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridAggregationCompositeAggregatorTests.java @@ -17,7 +17,6 @@ import org.opensearch.common.geo.GeoPoint; import org.opensearch.geo.GeoModulePlugin; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; -import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregator; import org.opensearch.index.mapper.GeoPointFieldMapper; import org.opensearch.plugins.SearchPlugin; import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; @@ -31,7 +30,7 @@ import java.util.Map; /** - * Testing the {@link GeoTileGridAggregator} as part of CompositeAggregation. + * Testing the geo tile grid as part of CompositeAggregation. */ public class GeoTileGridAggregationCompositeAggregatorTests extends BaseCompositeAggregatorTestCase { diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java index d6153637f656d..5ec10a7f4f7cf 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java @@ -73,7 +73,7 @@ import static org.hamcrest.Matchers.equalTo; -public abstract class GeoGridAggregatorTestCase extends AggregatorTestCase { +public abstract class GeoGridAggregatorTestCase extends AggregatorTestCase { private static final String FIELD_NAME = "location"; protected static final double GEOHASH_TOLERANCE = 1E-5D; @@ -201,9 +201,9 @@ public void testAsSubAgg() throws IOException { Consumer verify = (terms) -> { Map> actual = new TreeMap<>(); for (StringTerms.Bucket tb : terms.getBuckets()) { - InternalGeoGrid gg = tb.getAggregations().get("gg"); + BaseGeoGrid gg = tb.getAggregations().get("gg"); Map sub = new TreeMap<>(); - for (InternalGeoGridBucket ggb : gg.getBuckets()) { + for (BaseGeoGridBucket ggb : gg.getBuckets()) { sub.put(ggb.getKeyAsString(), ggb.getDocCount()); } actual.put(tb.getKeyAsString(), sub); @@ -299,7 +299,7 @@ private void testCase( String field, int precision, GeoBoundingBox geoBoundingBox, - Consumer> verify, + Consumer> verify, CheckedConsumer buildIndex ) throws IOException { testCase(query, precision, geoBoundingBox, verify, buildIndex, createBuilder("_name").field(field)); @@ -309,7 +309,7 @@ private void testCase( Query query, int precision, GeoBoundingBox geoBoundingBox, - Consumer> verify, + Consumer> verify, CheckedConsumer buildIndex, GeoGridAggregationBuilder aggregationBuilder ) throws IOException { @@ -333,7 +333,7 @@ private void testCase( aggregator.preCollection(); indexSearcher.search(query, aggregator); aggregator.postCollection(); - verify.accept((InternalGeoGrid) aggregator.buildTopLevel()); + verify.accept((BaseGeoGrid) aggregator.buildTopLevel()); indexReader.close(); directory.close(); diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridTestCase.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridTestCase.java index 432736a2b43fe..2a655239997b6 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridTestCase.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridTestCase.java @@ -50,16 +50,16 @@ import static org.hamcrest.Matchers.equalTo; -public abstract class GeoGridTestCase> extends - InternalMultiBucketAggregationTestCase { +public abstract class GeoGridTestCase> extends InternalMultiBucketAggregationTestCase< + T> { /** - * Instantiate a {@link InternalGeoGrid}-derived class using the same parameters as constructor. + * Instantiate a {@link BaseGeoGrid}-derived class using the same parameters as constructor. */ - protected abstract T createInternalGeoGrid(String name, int size, List buckets, Map metadata); + protected abstract T createInternalGeoGrid(String name, int size, List buckets, Map metadata); /** - * Instantiate a {@link InternalGeoGridBucket}-derived class using the same parameters as constructor. + * Instantiate a {@link BaseGeoGridBucket}-derived class using the same parameters as constructor. */ protected abstract B createInternalGeoGridBucket(Long key, long docCount, InternalAggregations aggregations); @@ -117,7 +117,7 @@ protected List getNamedXContents() { protected T createTestInstance(String name, Map metadata, InternalAggregations aggregations) { final int precision = randomPrecision(); int size = randomNumberOfBuckets(); - List buckets = new ArrayList<>(size); + List buckets = new ArrayList<>(size); for (int i = 0; i < size; i++) { double latitude = randomDoubleBetween(-90.0, 90.0, false); double longitude = randomDoubleBetween(-180.0, 180.0, false); @@ -176,7 +176,7 @@ protected Class implementationClass() { protected T mutateInstance(T instance) { String name = instance.getName(); int size = instance.getRequiredSize(); - List buckets = instance.getBuckets(); + List buckets = instance.getBuckets(); Map metadata = instance.getMetadata(); switch (between(0, 3)) { case 0: @@ -206,7 +206,7 @@ protected T mutateInstance(T instance) { } public void testCreateFromBuckets() { - InternalGeoGrid original = createTestInstance(); + BaseGeoGrid original = createTestInstance(); assertThat(original, equalTo(original.create(original.buckets))); } } diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridTests.java index c84c6ef5ec076..ada943b6dd369 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridTests.java @@ -37,16 +37,11 @@ import java.util.List; import java.util.Map; -public class GeoHashGridTests extends GeoGridTestCase { +public class GeoHashGridTests extends GeoGridTestCase { @Override - protected InternalGeoHashGrid createInternalGeoGrid( - String name, - int size, - List buckets, - Map metadata - ) { - return new InternalGeoHashGrid(name, size, buckets, metadata); + protected GeoHashGrid createInternalGeoGrid(String name, int size, List buckets, Map metadata) { + return new GeoHashGrid(name, size, buckets, metadata); } @Override diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridTests.java index ead67e0455d94..b59e9ec2cff53 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridTests.java @@ -37,16 +37,11 @@ import java.util.List; import java.util.Map; -public class GeoTileGridTests extends GeoGridTestCase { +public class GeoTileGridTests extends GeoGridTestCase { @Override - protected InternalGeoTileGrid createInternalGeoGrid( - String name, - int size, - List buckets, - Map metadata - ) { - return new InternalGeoTileGrid(name, size, buckets, metadata); + protected GeoTileGrid createInternalGeoGrid(String name, int size, List buckets, Map metadata) { + return new GeoTileGrid(name, size, buckets, metadata); } @Override diff --git a/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java index c0d7e51047c6b..706c73e7416f5 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java +++ b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java @@ -10,8 +10,8 @@ import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; -import org.opensearch.geo.search.aggregations.bucket.geogrid.InternalGeoHashGrid; -import org.opensearch.geo.search.aggregations.bucket.geogrid.InternalGeoTileGrid; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoHashGrid; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGrid; import org.opensearch.geo.search.aggregations.metrics.GeoBounds; import org.opensearch.geo.search.aggregations.metrics.GeoBoundsAggregationBuilder; @@ -24,14 +24,14 @@ public static GeoBoundsAggregationBuilder geoBounds(String name) { } /** - * Create a new {@link InternalGeoHashGrid} aggregation with the given name. + * Create a new {@link GeoHashGrid} aggregation with the given name. */ public static GeoHashGridAggregationBuilder geohashGrid(String name) { return new GeoHashGridAggregationBuilder(name); } /** - * Create a new {@link InternalGeoTileGrid} aggregation with the given name. + * Create a new {@link GeoTileGrid} aggregation with the given name. */ public static GeoTileGridAggregationBuilder geotileGrid(String name) { return new GeoTileGridAggregationBuilder(name); diff --git a/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationInspectionHelper.java b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationInspectionHelper.java index 3473cf2d94b76..89debdf5abd95 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationInspectionHelper.java +++ b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationInspectionHelper.java @@ -8,7 +8,7 @@ package org.opensearch.geo.tests.common; -import org.opensearch.geo.search.aggregations.bucket.geogrid.InternalGeoGrid; +import org.opensearch.geo.search.aggregations.bucket.geogrid.BaseGeoGrid; import org.opensearch.geo.search.aggregations.metrics.InternalGeoBounds; public class AggregationInspectionHelper { @@ -17,7 +17,7 @@ public static boolean hasValue(InternalGeoBounds agg) { return (agg.topLeft() == null && agg.bottomRight() == null) == false; } - public static boolean hasValue(InternalGeoGrid agg) { + public static boolean hasValue(BaseGeoGrid agg) { return agg.getBuckets().stream().anyMatch(bucket -> bucket.getDocCount() > 0); } } From ef45809468ff381498c69a8b0af07e7443967836 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Fri, 7 Oct 2022 08:44:54 -0500 Subject: [PATCH 02/39] [Remove] LegacyESVersion.V_7_0_* and V_7_1_* constants (#2768) * [Remove] LegacyESVersion.V_7_0_* and V_7_1_* constants Removes all usages of LegacyESVersion.V_7_0_ and LegacyESVersion.V_7_1 version constants. Signed-off-by: Nicholas Walter Knize * Rebase from main Signed-off-by: Nicholas Walter Knize * fix serialization issue with build flavor removal Signed-off-by: Nicholas Walter Knize * remove stale bwc test Signed-off-by: Nicholas Walter Knize * rebase and update Signed-off-by: Nicholas Walter Knize * cleanup Signed-off-by: Nicholas Walter Knize * fix failing mapper test Signed-off-by: Nicholas Walter Knize Signed-off-by: Nicholas Walter Knize --- CHANGELOG.md | 1 + .../common/CommonAnalysisModulePlugin.java | 34 +-- .../common/NGramTokenFilterFactory.java | 29 +- .../mustache/MultiSearchTemplateResponse.java | 11 +- ...GoogleCloudStorageRetryingInputStream.java | 3 +- .../s3/S3RetryingInputStream.java | 3 +- .../upgrades/FullClusterRestartIT.java | 9 +- .../upgrades/QueryBuilderBWCIT.java | 3 +- .../upgrades/JodaCompatibilityIT.java | 282 ------------------ .../org/opensearch/upgrades/MappingIT.java | 70 ----- .../org/opensearch/upgrades/RecoveryIT.java | 9 +- .../upgrades/SystemIndicesUpgradeIT.java | 13 +- .../src/main/java/org/opensearch/Build.java | 41 +-- .../java/org/opensearch/LegacyESVersion.java | 4 - .../org/opensearch/OpenSearchException.java | 2 +- .../src/main/java/org/opensearch/Version.java | 2 +- .../cluster/state/ClusterStateResponse.java | 8 - .../indices/alias/IndicesAliasesRequest.java | 8 +- .../indices/create/CreateIndexRequest.java | 7 - .../TransportGetFieldMappingsIndexAction.java | 4 +- .../mapping/put/PutMappingRequest.java | 6 - .../action/delete/DeleteRequest.java | 7 - ...TransportFieldCapabilitiesIndexAction.java | 3 +- .../org/opensearch/action/get/GetRequest.java | 7 - .../action/get/MultiGetRequest.java | 7 - .../opensearch/action/index/IndexRequest.java | 6 - .../opensearch/action/main/MainResponse.java | 7 - .../action/search/MultiSearchResponse.java | 11 +- .../action/search/SearchRequest.java | 8 +- .../termvectors/TermVectorsRequest.java | 8 - .../action/update/UpdateRequest.java | 29 -- .../cluster/coordination/Coordinator.java | 9 +- .../cluster/metadata/MappingMetadata.java | 7 - .../opensearch/cluster/metadata/Metadata.java | 34 +-- .../metadata/MetadataCreateIndexService.java | 27 +- .../cluster/routing/UnassignedInfo.java | 6 +- .../java/org/opensearch/common/Rounding.java | 19 +- .../breaker/CircuitBreakingException.java | 11 +- .../java/org/opensearch/common/joda/Joda.java | 13 - .../org/opensearch/common/lucene/Lucene.java | 56 ++-- .../java/org/opensearch/env/NodeMetadata.java | 6 +- .../opensearch/gateway/GatewayMetaState.java | 5 +- .../index/analysis/AnalysisRegistry.java | 11 +- .../analysis/ShingleTokenFilterFactory.java | 47 +-- .../index/mapper/DateFieldMapper.java | 7 +- .../index/mapper/DocumentMapper.java | 7 +- .../index/mapper/DocumentMapperParser.java | 2 +- .../mapper/LegacyGeoShapeFieldMapper.java | 7 +- .../index/mapper/MapperService.java | 6 +- .../index/mapper/RangeFieldMapper.java | 8 +- .../index/query/MultiMatchQueryBuilder.java | 7 - .../index/similarity/SimilarityProviders.java | 48 +-- .../index/similarity/SimilarityService.java | 60 +--- .../opensearch/indices/IndicesService.java | 9 +- .../indices/analysis/AnalysisModule.java | 17 +- .../indices/mapper/MapperRegistry.java | 14 +- .../org/opensearch/monitor/jvm/JvmInfo.java | 16 +- .../org/opensearch/script/ScriptStats.java | 6 +- .../org/opensearch/search/DocValueFormat.java | 44 +-- .../pipeline/InternalPercentilesBucket.java | 12 +- ...tilesBucketPipelineAggregationBuilder.java | 11 +- .../PercentilesBucketPipelineAggregator.java | 11 +- .../BaseMultiValuesSourceFieldConfig.java | 13 +- .../ValuesSourceAggregationBuilder.java | 14 +- .../search/builder/SearchSourceBuilder.java | 12 +- .../search/dfs/DfsSearchResult.java | 28 +- .../search/internal/ShardSearchRequest.java | 10 +- .../opensearch/search/slice/SliceBuilder.java | 12 +- .../opensearch/search/suggest/Suggest.java | 65 +--- .../completion/context/GeoContextMapping.java | 38 +-- .../search/suggest/term/TermSuggestion.java | 10 +- .../opensearch/snapshots/RestoreService.java | 10 - .../transport/RemoteConnectionInfo.java | 71 +---- .../test/java/org/opensearch/BuildTests.java | 13 +- .../ExceptionSerializationTests.java | 2 +- .../java/org/opensearch/VersionTests.java | 2 +- .../state/ClusterStateRequestTests.java | 15 +- .../action/search/SearchRequestTests.java | 7 +- .../coordination/JoinTaskExecutorTests.java | 20 +- .../MetadataIndexStateServiceTests.java | 90 ------ .../common/settings/SettingsTests.java | 3 +- .../index/query/RangeQueryBuilderTests.java | 2 +- .../similarity/SimilarityServiceTests.java | 8 +- .../indices/IndicesModuleTests.java | 28 +- .../indices/IndicesServiceTests.java | 13 +- .../search/query/QuerySearchResultTests.java | 33 -- .../test/rest/yaml/section/DoSection.java | 13 - .../AbstractFullClusterRestartTestCase.java | 5 +- 88 files changed, 241 insertions(+), 1481 deletions(-) delete mode 100644 qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/JodaCompatibilityIT.java delete mode 100644 qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/MappingIT.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 018475c7e9ae6..e1b23eaa5ac20 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -81,6 +81,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Removed - Remove deprecated code to add node name into log pattern of log4j property file ([#4568](https://github.com/opensearch-project/OpenSearch/pull/4568)) - Unused object and import within TransportClusterAllocationExplainAction ([#4639](https://github.com/opensearch-project/OpenSearch/pull/4639)) +- Remove LegacyESVersion.V_7_0_* and V_7_1_* Constants ([#2768](https://https://github.com/opensearch-project/OpenSearch/pull/2768)) ### Fixed diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java index 57865e15d523a..7cad01c5cc00a 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java @@ -485,19 +485,10 @@ public List getPreConfiguredTokenFilters() { filters.add(PreConfiguredTokenFilter.singleton("dutch_stem", false, input -> new SnowballFilter(input, new DutchStemmer()))); filters.add(PreConfiguredTokenFilter.singleton("edge_ngram", false, false, input -> new EdgeNGramTokenFilter(input, 1))); filters.add(PreConfiguredTokenFilter.openSearchVersion("edgeNGram", false, false, (reader, version) -> { - if (version.onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException( - "The [edgeNGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " - + "Please change the filter name to [edge_ngram] instead." - ); - } else { - deprecationLogger.deprecate( - "edgeNGram_deprecation", - "The [edgeNGram] token filter name is deprecated and will be removed in a future version. " - + "Please change the filter name to [edge_ngram] instead." - ); - } - return new EdgeNGramTokenFilter(reader, 1); + throw new IllegalArgumentException( + "The [edgeNGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " + + "Please change the filter name to [edge_ngram] instead." + ); })); filters.add( PreConfiguredTokenFilter.singleton("elision", true, input -> new ElisionFilter(input, FrenchAnalyzer.DEFAULT_ARTICLES)) @@ -524,19 +515,10 @@ public List getPreConfiguredTokenFilters() { ); filters.add(PreConfiguredTokenFilter.singleton("ngram", false, false, reader -> new NGramTokenFilter(reader, 1, 2, false))); filters.add(PreConfiguredTokenFilter.openSearchVersion("nGram", false, false, (reader, version) -> { - if (version.onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException( - "The [nGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " - + "Please change the filter name to [ngram] instead." - ); - } else { - deprecationLogger.deprecate( - "nGram_deprecation", - "The [nGram] token filter name is deprecated and will be removed in a future version. " - + "Please change the filter name to [ngram] instead." - ); - } - return new NGramTokenFilter(reader, 1, 2, false); + throw new IllegalArgumentException( + "The [nGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " + + "Please change the filter name to [ngram] instead." + ); })); filters.add(PreConfiguredTokenFilter.singleton("persian_normalization", true, PersianNormalizationFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("porter_stem", false, PorterStemFilter::new)); diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/NGramTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/NGramTokenFilterFactory.java index 218bb74b84667..a6adf680a454c 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/NGramTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/NGramTokenFilterFactory.java @@ -34,7 +34,6 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.ngram.NGramTokenFilter; -import org.opensearch.LegacyESVersion; import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; @@ -54,25 +53,15 @@ public class NGramTokenFilterFactory extends AbstractTokenFilterFactory { this.maxGram = settings.getAsInt("max_gram", 2); int ngramDiff = maxGram - minGram; if (ngramDiff > maxAllowedNgramDiff) { - if (indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException( - "The difference between max_gram and min_gram in NGram Tokenizer must be less than or equal to: [" - + maxAllowedNgramDiff - + "] but was [" - + ngramDiff - + "]. This limit can be set by changing the [" - + IndexSettings.MAX_NGRAM_DIFF_SETTING.getKey() - + "] index level setting." - ); - } else { - deprecationLogger.deprecate( - "ngram_big_difference", - "Deprecated big difference between max_gram and min_gram in NGram Tokenizer," - + "expected difference must be less than or equal to: [" - + maxAllowedNgramDiff - + "]" - ); - } + throw new IllegalArgumentException( + "The difference between max_gram and min_gram in NGram Tokenizer must be less than or equal to: [" + + maxAllowedNgramDiff + + "] but was [" + + ngramDiff + + "]. This limit can be set by changing the [" + + IndexSettings.MAX_NGRAM_DIFF_SETTING.getKey() + + "] index level setting." + ); } preserveOriginal = settings.getAsBoolean(PRESERVE_ORIG_KEY, false); } diff --git a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MultiSearchTemplateResponse.java b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MultiSearchTemplateResponse.java index 1802d03e20942..7c2c403fdd487 100644 --- a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MultiSearchTemplateResponse.java +++ b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MultiSearchTemplateResponse.java @@ -32,7 +32,6 @@ package org.opensearch.script.mustache; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionResponse; import org.opensearch.action.search.MultiSearchResponse; @@ -125,11 +124,7 @@ public String toString() { MultiSearchTemplateResponse(StreamInput in) throws IOException { super(in); items = in.readArray(Item::new, Item[]::new); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - tookInMillis = in.readVLong(); - } else { - tookInMillis = -1L; - } + tookInMillis = in.readVLong(); } MultiSearchTemplateResponse(Item[] items, long tookInMillis) { @@ -159,9 +154,7 @@ public TimeValue getTook() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeArray(items); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeVLong(tookInMillis); - } + out.writeVLong(tookInMillis); } @Override diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java index 72d3e37466d09..5448799e7f81b 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java @@ -42,7 +42,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.LegacyESVersion; import org.opensearch.SpecialPermission; import org.opensearch.common.SuppressForbidden; import org.opensearch.core.internal.io.IOUtils; @@ -61,7 +60,7 @@ /** * Wrapper around reads from GCS that will retry blob downloads that fail part-way through, resuming from where the failure occurred. * This should be handled by the SDK but it isn't today. This should be revisited in the future (e.g. before removing - * the {@link LegacyESVersion#V_7_0_0} version constant) and removed if the SDK handles retries itself in the future. + * the {@code LegacyESVersion#V_7_0_0} version constant) and removed if the SDK handles retries itself in the future. */ class GoogleCloudStorageRetryingInputStream extends InputStream { diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java index 388f5b8d74a2b..f751d63232f79 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java @@ -40,7 +40,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.LegacyESVersion; import org.opensearch.core.internal.io.IOUtils; import java.io.IOException; @@ -52,7 +51,7 @@ /** * Wrapper around an S3 object that will retry the {@link GetObjectRequest} if the download fails part-way through, resuming from where * the failure occurred. This should be handled by the SDK but it isn't today. This should be revisited in the future (e.g. before removing - * the {@link LegacyESVersion#V_7_0_0} version constant) and removed when the SDK handles retries itself. + * the {@code LegacyESVersion#V_7_0_0} version constant) and removed when the SDK handles retries itself. * * See https://github.com/aws/aws-sdk-java/issues/856 for the related SDK issue */ diff --git a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java index 714d8a252579f..0ed51b9d8a011 100644 --- a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java @@ -329,9 +329,6 @@ public void testShrink() throws IOException { client().performRequest(updateSettingsRequest); Request shrinkIndexRequest = new Request("PUT", "/" + index + "/_shrink/" + shrunkenIndex); - if (getOldClusterVersion().before(LegacyESVersion.V_7_0_0)) { - shrinkIndexRequest.addParameter("copy_settings", "true"); - } shrinkIndexRequest.setJsonEntity("{\"settings\": {\"index.number_of_shards\": 1}}"); client().performRequest(shrinkIndexRequest); @@ -1253,7 +1250,7 @@ public void testPeerRecoveryRetentionLeases() throws Exception { settings.startObject("settings"); settings.field("number_of_shards", between(1, 5)); settings.field("number_of_replicas", between(0, 1)); - if (randomBoolean() || getOldClusterVersion().before(LegacyESVersion.V_7_0_0)) { + if (randomBoolean()) { // this is the default after v7.0.0, but is required before that settings.field("soft_deletes.enabled", true); } @@ -1436,10 +1433,6 @@ public void testSystemIndexMetadataIsUpgraded() throws Exception { // make sure .tasks index exists Request getTasksIndex = new Request("GET", "/.tasks"); getTasksIndex.addParameter("allow_no_indices", "false"); - if (getOldClusterVersion().before(LegacyESVersion.V_7_0_0)) { - getTasksIndex.addParameter("include_type_name", "false"); - } - getTasksIndex.setOptions(expectVersionSpecificWarnings(v -> { v.current(systemIndexWarning); v.compatible(systemIndexWarning); diff --git a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java index de042cb2b7634..856dc45d42203 100644 --- a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java +++ b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java @@ -46,6 +46,7 @@ import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.ConstantScoreQueryBuilder; import org.opensearch.index.query.DisMaxQueryBuilder; @@ -157,7 +158,7 @@ private static void addCandidate(String querySource, QueryBuilder expectedQb) { } public void testQueryBuilderBWC() throws Exception { - final String type = getOldClusterVersion().before(LegacyESVersion.V_7_0_0) ? "doc" : "_doc"; + final String type = MapperService.SINGLE_MAPPING_NAME; String index = "queries"; if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/JodaCompatibilityIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/JodaCompatibilityIT.java deleted file mode 100644 index 0ef1e3a5050af..0000000000000 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/JodaCompatibilityIT.java +++ /dev/null @@ -1,282 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.upgrades; - -import org.apache.http.HttpStatus; -import org.apache.http.util.EntityUtils; -import org.opensearch.LegacyESVersion; -import org.opensearch.client.Node; -import org.opensearch.client.Request; -import org.opensearch.client.RequestOptions; -import org.opensearch.client.Response; -import org.opensearch.client.WarningsHandler; -import org.opensearch.common.Booleans; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.search.DocValueFormat; -import org.junit.BeforeClass; - -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.util.Collections; -import java.util.List; -import java.util.function.Consumer; - -import static org.opensearch.rest.action.search.RestSearchAction.TOTAL_HITS_AS_INT_PARAM; - -/** - * This is test is meant to verify that when upgrading from 6.x version to 7.7 or newer it is able to parse date fields with joda pattern. - * - * The test is indexing documents and searches with use of joda or java pattern. - * In order to make sure that serialization logic is used a search call is executed 3 times (using all nodes). - * It cannot be guaranteed that serialization logic will always be used as it might happen that - * all shards are allocated on the same node and client is connecting to it. - * Because of this warnings assertions have to be ignored. - * - * A special flag used when serializing {@link DocValueFormat.DateTime#writeTo DocValueFormat.DateTime::writeTo} - * is used to indicate that an index was created in 6.x and has a joda pattern. The same flag is read when - * {@link DocValueFormat.DateTime#DateTime(StreamInput)} deserializing. - * When upgrading from 7.0-7.6 to 7.7 there is no way to tell if a pattern was created in 6.x as this flag cannot be added. - * Hence a skip assume section in init() - * - * @see org.opensearch.search.DocValueFormat.DateTime - */ -public class JodaCompatibilityIT extends AbstractRollingTestCase { - - @BeforeClass - public static void init(){ - assumeTrue("upgrading from 7.0-7.6 will fail parsing joda formats", - UPGRADE_FROM_VERSION.before(LegacyESVersion.V_7_0_0)); - } - - public void testJodaBackedDocValueAndDateFields() throws Exception { - switch (CLUSTER_TYPE) { - case OLD: - Request createTestIndex = indexWithDateField("joda_time", "YYYY-MM-dd'T'HH:mm:ssZZ"); - createTestIndex.setOptions(ignoreWarnings()); - - Response resp = client().performRequest(createTestIndex); - assertEquals(HttpStatus.SC_OK, resp.getStatusLine().getStatusCode()); - - postNewDoc("joda_time", 1); - - break; - case MIXED: - int minute = Booleans.parseBoolean(System.getProperty("tests.first_round")) ? 2 : 3; - postNewDoc("joda_time", minute); - - Request search = dateRangeSearch("joda_time"); - search.setOptions(ignoreWarnings()); - - performOnAllNodes(search, r -> assertEquals(HttpStatus.SC_OK, r.getStatusLine().getStatusCode())); - break; - case UPGRADED: - postNewDoc("joda_time", 4); - - search = searchWithAgg("joda_time"); - search.setOptions(ignoreWarnings()); - //making sure all nodes were used for search - performOnAllNodes(search, r -> assertResponseHasAllDocuments(r)); - break; - } - } - - public void testJavaBackedDocValueAndDateFields() throws Exception { - switch (CLUSTER_TYPE) { - case OLD: - Request createTestIndex = indexWithDateField("java_time", "8yyyy-MM-dd'T'HH:mm:ssXXX"); - Response resp = client().performRequest(createTestIndex); - assertEquals(HttpStatus.SC_OK, resp.getStatusLine().getStatusCode()); - - postNewDoc("java_time", 1); - - break; - case MIXED: - int minute = Booleans.parseBoolean(System.getProperty("tests.first_round")) ? 2 : 3; - postNewDoc("java_time", minute); - - Request search = dateRangeSearch("java_time"); - Response searchResp = client().performRequest(search); - assertEquals(HttpStatus.SC_OK, searchResp.getStatusLine().getStatusCode()); - break; - case UPGRADED: - postNewDoc("java_time", 4); - - search = searchWithAgg("java_time"); - //making sure all nodes were used for search - performOnAllNodes(search, r -> assertResponseHasAllDocuments(r)); - - break; - } - } - - private RequestOptions ignoreWarnings() { - RequestOptions.Builder options = RequestOptions.DEFAULT.toBuilder(); - options.setWarningsHandler(WarningsHandler.PERMISSIVE); - return options.build(); - } - - private void performOnAllNodes(Request search, Consumer consumer) throws IOException { - List nodes = client().getNodes(); - for (Node node : nodes) { - client().setNodes(Collections.singletonList(node)); - Response response = client().performRequest(search); - consumer.accept(response); - assertEquals(HttpStatus.SC_OK, response.getStatusLine().getStatusCode()); - } - client().setNodes(nodes); - } - - private void assertResponseHasAllDocuments(Response searchResp) { - assertEquals(HttpStatus.SC_OK, searchResp.getStatusLine().getStatusCode()); - try { - assertEquals(removeWhiteSpace("{" + - " \"_shards\": {" + - " \"total\": 3," + - " \"successful\": 3" + - " },"+ - " \"hits\": {" + - " \"total\": 4," + - " \"hits\": [" + - " {" + - " \"_source\": {" + - " \"datetime\": \"2020-01-01T00:00:01+01:00\"" + - " }" + - " }," + - " {" + - " \"_source\": {" + - " \"datetime\": \"2020-01-01T00:00:02+01:00\"" + - " }" + - " }," + - " {" + - " \"_source\": {" + - " \"datetime\": \"2020-01-01T00:00:03+01:00\"" + - " }" + - " }," + - " {" + - " \"_source\": {" + - " \"datetime\": \"2020-01-01T00:00:04+01:00\"" + - " }" + - " }" + - " ]" + - " }" + - "}"), - EntityUtils.toString(searchResp.getEntity(), StandardCharsets.UTF_8)); - } catch (IOException e) { - throw new AssertionError("Exception during response parising", e); - } - } - - private String removeWhiteSpace(String input) { - return input.replaceAll("[\\n\\r\\t\\ ]", ""); - } - - private Request dateRangeSearch(String endpoint) { - Request search = new Request("GET", endpoint+"/_search"); - search.addParameter(TOTAL_HITS_AS_INT_PARAM, "true"); - search.addParameter("filter_path", "hits.total,hits.hits._source.datetime,_shards.total,_shards.successful"); - search.setJsonEntity("" + - "{\n" + - " \"track_total_hits\": true,\n" + - " \"sort\": \"datetime\",\n" + - " \"query\": {\n" + - " \"range\": {\n" + - " \"datetime\": {\n" + - " \"gte\": \"2020-01-01T00:00:00+01:00\",\n" + - " \"lte\": \"2020-01-02T00:00:00+01:00\"\n" + - " }\n" + - " }\n" + - " }\n" + - "}\n" - ); - return search; - } - - private Request searchWithAgg(String endpoint) throws IOException { - Request search = new Request("GET", endpoint+"/_search"); - search.addParameter(TOTAL_HITS_AS_INT_PARAM, "true"); - search.addParameter("filter_path", "hits.total,hits.hits._source.datetime,_shards.total,_shards.successful"); - - search.setJsonEntity("{\n" + - " \"track_total_hits\": true,\n" + - " \"sort\": \"datetime\",\n" + - " \"query\": {\n" + - " \"range\": {\n" + - " \"datetime\": {\n" + - " \"gte\": \"2020-01-01T00:00:00+01:00\",\n" + - " \"lte\": \"2020-01-02T00:00:00+01:00\"\n" + - " }\n" + - " }\n" + - " },\n" + - " \"aggs\" : {\n" + - " \"docs_per_year\" : {\n" + - " \"date_histogram\" : {\n" + - " \"field\" : \"date\",\n" + - " \"calendar_interval\" : \"year\"\n" + - " }\n" + - " }\n" + - " }\n" + - "}\n" - ); - return search; - } - private Request indexWithDateField(String indexName, String format) { - Request createTestIndex = new Request("PUT", indexName); - createTestIndex.addParameter("include_type_name", "false"); - createTestIndex.setJsonEntity("{\n" + - " \"settings\": {\n" + - " \"index.number_of_shards\": 3\n" + - " },\n" + - " \"mappings\": {\n" + - " \"properties\": {\n" + - " \"datetime\": {\n" + - " \"type\": \"date\",\n" + - " \"format\": \"" + format + "\"\n" + - " }\n" + - " }\n" + - " }\n" + - "}" - ); - return createTestIndex; - } - - private void postNewDoc(String endpoint, int minute) throws IOException { - Request putDoc = new Request("POST", endpoint+"/_doc"); - putDoc.addParameter("refresh", "true"); - putDoc.addParameter("wait_for_active_shards", "all"); - putDoc.setJsonEntity("{\n" + - " \"datetime\": \"2020-01-01T00:00:0" + minute + "+01:00\"\n" + - "}" - ); - Response resp = client().performRequest(putDoc); - assertEquals(HttpStatus.SC_CREATED, resp.getStatusLine().getStatusCode()); - } -} diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/MappingIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/MappingIT.java deleted file mode 100644 index 07b1d67fde7ff..0000000000000 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/MappingIT.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.upgrades; - -import org.opensearch.LegacyESVersion; -import org.opensearch.client.Request; -import org.opensearch.client.Response; -import org.opensearch.common.xcontent.support.XContentMapValues; - -public class MappingIT extends AbstractRollingTestCase { - /** - * Create a mapping that explicitly disables the _all field (possible in 6x, see #37429) - * and check that it can be upgraded to 7x. - */ - public void testAllFieldDisable6x() throws Exception { - assumeTrue("_all", UPGRADE_FROM_VERSION.before(LegacyESVersion.V_7_0_0)); - switch (CLUSTER_TYPE) { - case OLD: - Request createTestIndex = new Request("PUT", "all-index"); - createTestIndex.addParameter("include_type_name", "false"); - createTestIndex.setJsonEntity( - "{ \"settings\": { \"index.number_of_shards\": 1 }, " + - "\"mappings\": {\"_all\": { \"enabled\": false }, \"properties\": { \"field\": { \"type\": \"text\" }}}}" - ); - createTestIndex.setOptions(expectWarnings("[_all] is deprecated in 6.0+ and will be removed in 7.0. As a replacement," + - " " + "you can use [copy_to] on mapping fields to create your own catch all field.")); - Response resp = client().performRequest(createTestIndex); - assertEquals(200, resp.getStatusLine().getStatusCode()); - break; - - default: - final Request request = new Request("GET", "all-index"); - Response response = client().performRequest(request); - assertEquals(200, response.getStatusLine().getStatusCode()); - Object enabled = XContentMapValues.extractValue("all-index.mappings._all.enabled", entityAsMap(response)); - assertNotNull(enabled); - assertEquals(false, enabled); - break; - } - } -} diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java index cbf91fa9d71e7..3d71f4a198aac 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java @@ -47,6 +47,7 @@ import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.xcontent.support.XContentMapValues; import org.opensearch.index.IndexSettings; +import org.opensearch.index.mapper.MapperService; import org.opensearch.rest.RestStatus; import org.opensearch.test.rest.yaml.ObjectPath; import org.hamcrest.Matcher; @@ -244,6 +245,7 @@ private String getNodeId(Predicate versionPredicate) throws IOException if (versionPredicate.test(version)) { return id; } + return id; } return null; } @@ -270,6 +272,7 @@ public void testRelocationWithConcurrentIndexing() throws Exception { updateIndexSettings(index, Settings.builder().put(INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none")); break; case MIXED: + // todo: verify this test can be removed in 3.0.0 final String newNode = getNodeId(v -> v.equals(Version.CURRENT)); final String oldNode = getNodeId(v -> v.before(Version.CURRENT)); // remove the replica and guaranteed the primary is placed on the old node @@ -348,11 +351,7 @@ public void testRecovery() throws Exception { if (randomBoolean()) { indexDocs(index, i, 1); // update } else if (randomBoolean()) { - if (getNodeId(v -> v.onOrAfter(LegacyESVersion.V_7_0_0)) == null) { - client().performRequest(new Request("DELETE", index + "/test/" + i)); - } else { - client().performRequest(new Request("DELETE", index + "/_doc/" + i)); - } + client().performRequest(new Request("DELETE", index + "/" + MapperService.SINGLE_MAPPING_NAME + "/" + i)); } } } diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/SystemIndicesUpgradeIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/SystemIndicesUpgradeIT.java index c50af0084b000..634dc0628f27a 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/SystemIndicesUpgradeIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/SystemIndicesUpgradeIT.java @@ -59,13 +59,8 @@ public void testSystemIndicesUpgrades() throws Exception { Request bulk = new Request("POST", "/_bulk"); bulk.addParameter("refresh", "true"); - if (UPGRADE_FROM_VERSION.before(LegacyESVersion.V_7_0_0)) { - bulk.setJsonEntity("{\"index\": {\"_index\": \"test_index_old\", \"_type\" : \"_doc\"}}\n" + - "{\"f1\": \"v1\", \"f2\": \"v2\"}\n"); - } else { - bulk.setJsonEntity("{\"index\": {\"_index\": \"test_index_old\"}\n" + - "{\"f1\": \"v1\", \"f2\": \"v2\"}\n"); - } + bulk.setJsonEntity("{\"index\": {\"_index\": \"test_index_old\"}\n" + + "{\"f1\": \"v1\", \"f2\": \"v2\"}\n"); client().performRequest(bulk); // start a async reindex job @@ -91,10 +86,6 @@ public void testSystemIndicesUpgrades() throws Exception { // make sure .tasks index exists Request getTasksIndex = new Request("GET", "/.tasks"); getTasksIndex.addParameter("allow_no_indices", "false"); - if (UPGRADE_FROM_VERSION.before(LegacyESVersion.V_7_0_0)) { - getTasksIndex.addParameter("include_type_name", "false"); - } - getTasksIndex.setOptions(expectVersionSpecificWarnings(v -> { v.current(systemIndexWarning); v.compatible(systemIndexWarning); diff --git a/server/src/main/java/org/opensearch/Build.java b/server/src/main/java/org/opensearch/Build.java index 364b17ad4aa33..13c951b10cfe3 100644 --- a/server/src/main/java/org/opensearch/Build.java +++ b/server/src/main/java/org/opensearch/Build.java @@ -207,58 +207,27 @@ public String date() { } public static Build readBuild(StreamInput in) throws IOException { - final String distribution; - final Type type; // the following is new for opensearch: we write the distribution to support any "forks" - if (in.getVersion().onOrAfter(Version.V_1_0_0)) { - distribution = in.readString(); - } else { - distribution = "other"; - } - - // The following block is kept for existing BWS tests to pass. - // TODO - clean up this code when we remove all v6 bwc tests. - // TODO - clean this up when OSS flavor is removed in all of the code base - // (Integ test zip still write OSS as distribution) - // See issue: https://github.com/opendistro-for-elasticsearch/search/issues/159 - if (in.getVersion().before(Version.V_1_3_0)) { - String flavor = in.readString(); - } + final String distribution = in.readString(); // be lenient when reading on the wire, the enumeration values from other versions might be different than what we know - type = Type.fromDisplayName(in.readString(), false); + final Type type = Type.fromDisplayName(in.readString(), false); String hash = in.readString(); String date = in.readString(); boolean snapshot = in.readBoolean(); - - final String version; - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - version = in.readString(); - } else { - version = in.getVersion().toString(); - } + final String version = in.readString(); return new Build(type, hash, date, snapshot, version, distribution); } public static void writeBuild(Build build, StreamOutput out) throws IOException { // the following is new for opensearch: we write the distribution name to support any "forks" of the code - if (out.getVersion().onOrAfter(Version.V_1_0_0)) { - out.writeString(build.distribution); - } + out.writeString(build.distribution); - // The following block is kept for existing BWS tests to pass. - // TODO - clean up this code when we remove all v6 bwc tests. - // TODO - clean this up when OSS flavor is removed in all of the code base - if (out.getVersion().before(Version.V_1_3_0)) { - out.writeString("oss"); - } final Type buildType = build.type(); out.writeString(buildType.displayName()); out.writeString(build.hash()); out.writeString(build.date()); out.writeBoolean(build.isSnapshot()); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeString(build.getQualifiedVersion()); - } + out.writeString(build.getQualifiedVersion()); } /** diff --git a/server/src/main/java/org/opensearch/LegacyESVersion.java b/server/src/main/java/org/opensearch/LegacyESVersion.java index d4ac3c7d2f8b1..1eb22a6bef3b5 100644 --- a/server/src/main/java/org/opensearch/LegacyESVersion.java +++ b/server/src/main/java/org/opensearch/LegacyESVersion.java @@ -48,10 +48,6 @@ */ public class LegacyESVersion extends Version { - public static final LegacyESVersion V_7_0_0 = new LegacyESVersion(7000099, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final LegacyESVersion V_7_0_1 = new LegacyESVersion(7000199, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final LegacyESVersion V_7_1_0 = new LegacyESVersion(7010099, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final LegacyESVersion V_7_1_1 = new LegacyESVersion(7010199, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final LegacyESVersion V_7_2_0 = new LegacyESVersion(7020099, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final LegacyESVersion V_7_2_1 = new LegacyESVersion(7020199, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final LegacyESVersion V_7_3_0 = new LegacyESVersion(7030099, org.apache.lucene.util.Version.LUCENE_8_1_0); diff --git a/server/src/main/java/org/opensearch/OpenSearchException.java b/server/src/main/java/org/opensearch/OpenSearchException.java index 4b6ca173ec692..17ece23f819a2 100644 --- a/server/src/main/java/org/opensearch/OpenSearchException.java +++ b/server/src/main/java/org/opensearch/OpenSearchException.java @@ -1533,7 +1533,7 @@ private enum OpenSearchExceptionHandle { org.opensearch.cluster.coordination.CoordinationStateRejectedException.class, org.opensearch.cluster.coordination.CoordinationStateRejectedException::new, 150, - LegacyESVersion.V_7_0_0 + UNKNOWN_VERSION_ADDED ), SNAPSHOT_IN_PROGRESS_EXCEPTION( org.opensearch.snapshots.SnapshotInProgressException.class, diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index 1bffe9ec98ec5..3387eee2dffc8 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -412,7 +412,7 @@ private Version computeMinIndexCompatVersion() { } else if (major == 7 || major == 1) { return LegacyESVersion.fromId(6000026); } else if (major == 2) { - return LegacyESVersion.V_7_0_0; + return LegacyESVersion.fromId(7000099); } else { bwcMajor = major - 1; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java index 89cd112d30c79..d2d7d843e19db 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java @@ -32,14 +32,12 @@ package org.opensearch.action.admin.cluster.state; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionResponse; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.common.unit.ByteSizeValue; import java.io.IOException; import java.util.Objects; @@ -59,9 +57,6 @@ public ClusterStateResponse(StreamInput in) throws IOException { super(in); clusterName = new ClusterName(in); clusterState = in.readOptionalWriteable(innerIn -> ClusterState.readFrom(innerIn, null)); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - new ByteSizeValue(in); - } waitForTimedOut = in.readBoolean(); } @@ -98,9 +93,6 @@ public boolean isWaitForTimedOut() { public void writeTo(StreamOutput out) throws IOException { clusterName.writeTo(out); out.writeOptionalWriteable(clusterState); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - ByteSizeValue.ZERO.writeTo(out); - } out.writeBoolean(waitForTimedOut); } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java index 62f51aa3f3bff..eb2d2706a6531 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java @@ -284,9 +284,7 @@ public AliasActions(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { isHidden = in.readOptionalBoolean(); } - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - originalAliases = in.readStringArray(); - } + originalAliases = in.readStringArray(); if (in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { mustExist = in.readOptionalBoolean(); } else { @@ -308,9 +306,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { out.writeOptionalBoolean(isHidden); } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeStringArray(originalAliases); - } + out.writeStringArray(originalAliases); if (out.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { out.writeOptionalBoolean(mustExist); } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java index 95837d82be7ac..302c2aad64bb4 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.indices.create; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchGenerationException; import org.opensearch.OpenSearchParseException; import org.opensearch.Version; @@ -126,9 +125,6 @@ public CreateIndexRequest(StreamInput in) throws IOException { for (int i = 0; i < aliasesSize; i++) { aliases.add(new Alias(in)); } - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readBoolean(); // updateAllTypes - } waitForActiveShards = ActiveShardCount.readFrom(in); } @@ -505,9 +501,6 @@ public void writeTo(StreamOutput out) throws IOException { for (Alias alias : aliases) { alias.writeTo(out); } - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(true); // updateAllTypes - } waitForActiveShards.writeTo(out); } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java index 64f76db5e1549..6d238a385231f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java @@ -33,7 +33,6 @@ package org.opensearch.action.admin.indices.mapping.get; import org.opensearch.OpenSearchException; -import org.opensearch.Version; import org.opensearch.action.admin.indices.mapping.get.GetFieldMappingsResponse.FieldMappingMetadata; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.single.shard.TransportSingleShardAction; @@ -120,8 +119,7 @@ protected ShardsIterator shards(ClusterState state, InternalRequest request) { protected GetFieldMappingsResponse shardOperation(final GetFieldMappingsIndexRequest request, ShardId shardId) { assert shardId != null; IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); - Version indexCreatedVersion = indexService.mapperService().getIndexSettings().getIndexVersionCreated(); - Predicate metadataFieldPredicate = (f) -> indicesService.isMetadataField(indexCreatedVersion, f); + Predicate metadataFieldPredicate = (f) -> indicesService.isMetadataField(f); Predicate fieldPredicate = metadataFieldPredicate.or(indicesService.getFieldFilter().apply(shardId.getIndexName())); DocumentMapper documentMapper = indexService.mapperService().documentMapper(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java index 85fd74f0762a5..a8eeedd4a3e4c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -117,9 +117,6 @@ public PutMappingRequest(StreamInput in) throws IOException { } } source = in.readString(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readBoolean(); // updateAllTypes - } concreteIndex = in.readOptionalWriteable(Index::new); origin = in.readOptionalString(); if (in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { @@ -349,9 +346,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(MapperService.SINGLE_MAPPING_NAME); } out.writeString(source); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(true); // updateAllTypes - } out.writeOptionalWriteable(concreteIndex); out.writeOptionalString(origin); if (out.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { diff --git a/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java b/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java index ce723df0c383a..86880c0211c1d 100644 --- a/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java +++ b/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java @@ -33,7 +33,6 @@ package org.opensearch.action.delete; import org.apache.lucene.util.RamUsageEstimator; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.CompositeIndicesRequest; @@ -96,9 +95,6 @@ public DeleteRequest(@Nullable ShardId shardId, StreamInput in) throws IOExcepti } id = in.readString(); routing = in.readOptionalString(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readOptionalString(); // _parent - } version = in.readLong(); versionType = VersionType.fromValue(in.readByte()); ifSeqNo = in.readZLong(); @@ -280,9 +276,6 @@ private void writeBody(StreamOutput out) throws IOException { } out.writeString(id); out.writeOptionalString(routing()); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalString(null); // _parent - } out.writeLong(version); out.writeByte(versionType.getValue()); out.writeZLong(ifSeqNo); diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java b/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java index 99962741299ca..7d9ab4ff93f59 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java @@ -153,8 +153,7 @@ private FieldCapabilitiesIndexResponse shardOperation(final FieldCapabilitiesInd for (String field : fieldNames) { MappedFieldType ft = mapperService.fieldType(field); if (ft != null) { - if (indicesService.isMetadataField(mapperService.getIndexSettings().getIndexVersionCreated(), field) - || fieldPredicate.test(ft.name())) { + if (indicesService.isMetadataField(field) || fieldPredicate.test(ft.name())) { IndexFieldCapabilities fieldCap = new IndexFieldCapabilities( field, ft.familyTypeName(), diff --git a/server/src/main/java/org/opensearch/action/get/GetRequest.java b/server/src/main/java/org/opensearch/action/get/GetRequest.java index 5f740ba789bb2..64148f070cc16 100644 --- a/server/src/main/java/org/opensearch/action/get/GetRequest.java +++ b/server/src/main/java/org/opensearch/action/get/GetRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.get; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.RealtimeRequest; @@ -89,9 +88,6 @@ public GetRequest() {} } id = in.readString(); routing = in.readOptionalString(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readOptionalString(); - } preference = in.readOptionalString(); refresh = in.readBoolean(); storedFields = in.readOptionalStringArray(); @@ -260,9 +256,6 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeString(id); out.writeOptionalString(routing); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalString(null); - } out.writeOptionalString(preference); out.writeBoolean(refresh); diff --git a/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java b/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java index 00df8657736ae..91f506dafafe1 100644 --- a/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java +++ b/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.get; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.Version; import org.opensearch.action.ActionRequest; @@ -114,9 +113,6 @@ public Item(StreamInput in) throws IOException { } id = in.readString(); routing = in.readOptionalString(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readOptionalString(); // _parent - } storedFields = in.readOptionalStringArray(); version = in.readLong(); versionType = VersionType.fromValue(in.readByte()); @@ -211,9 +207,6 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeString(id); out.writeOptionalString(routing); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalString(null); // _parent - } out.writeOptionalStringArray(storedFields); out.writeLong(version); out.writeByte(versionType.getValue()); diff --git a/server/src/main/java/org/opensearch/action/index/IndexRequest.java b/server/src/main/java/org/opensearch/action/index/IndexRequest.java index f863c4a11340e..381eca2dc716f 100644 --- a/server/src/main/java/org/opensearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/opensearch/action/index/IndexRequest.java @@ -148,9 +148,6 @@ public IndexRequest(@Nullable ShardId shardId, StreamInput in) throws IOExceptio } id = in.readOptionalString(); routing = in.readOptionalString(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readOptionalString(); // _parent - } source = in.readBytesReference(); opType = OpType.fromId(in.readByte()); version = in.readLong(); @@ -669,9 +666,6 @@ private void writeBody(StreamOutput out) throws IOException { } out.writeOptionalString(id); out.writeOptionalString(routing); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalString(null); // _parent - } out.writeBytesReference(source); out.writeByte(opType.getId()); out.writeLong(version); diff --git a/server/src/main/java/org/opensearch/action/main/MainResponse.java b/server/src/main/java/org/opensearch/action/main/MainResponse.java index 691bbda512275..0fbfdab9ba294 100644 --- a/server/src/main/java/org/opensearch/action/main/MainResponse.java +++ b/server/src/main/java/org/opensearch/action/main/MainResponse.java @@ -33,7 +33,6 @@ package org.opensearch.action.main; import org.opensearch.Build; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.ActionResponse; import org.opensearch.cluster.ClusterName; @@ -71,9 +70,6 @@ public class MainResponse extends ActionResponse implements ToXContentObject { clusterName = new ClusterName(in); clusterUuid = in.readString(); build = Build.readBuild(in); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readBoolean(); - } } public MainResponse(String nodeName, Version version, ClusterName clusterName, String clusterUuid, Build build) { @@ -111,9 +107,6 @@ public void writeTo(StreamOutput out) throws IOException { clusterName.writeTo(out); out.writeString(clusterUuid); Build.writeBuild(build, out); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(true); - } } @Override diff --git a/server/src/main/java/org/opensearch/action/search/MultiSearchResponse.java b/server/src/main/java/org/opensearch/action/search/MultiSearchResponse.java index 6c25a16a65c75..c4ba3becbc151 100644 --- a/server/src/main/java/org/opensearch/action/search/MultiSearchResponse.java +++ b/server/src/main/java/org/opensearch/action/search/MultiSearchResponse.java @@ -32,7 +32,6 @@ package org.opensearch.action.search; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.ExceptionsHelper; import org.opensearch.action.ActionResponse; @@ -147,11 +146,7 @@ public MultiSearchResponse(StreamInput in) throws IOException { for (int i = 0; i < items.length; i++) { items[i] = new Item(in); } - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - tookInMillis = in.readVLong(); - } else { - tookInMillis = 0L; - } + tookInMillis = in.readVLong(); } public MultiSearchResponse(Item[] items, long tookInMillis) { @@ -184,9 +179,7 @@ public void writeTo(StreamOutput out) throws IOException { for (Item item : items) { item.writeTo(out); } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeVLong(tookInMillis); - } + out.writeVLong(tookInMillis); } @Override diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequest.java b/server/src/main/java/org/opensearch/action/search/SearchRequest.java index da34dab6383d9..e4dd0d0b1a116 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequest.java @@ -251,9 +251,7 @@ public SearchRequest(StreamInput in) throws IOException { absoluteStartMillis = DEFAULT_ABSOLUTE_START_MILLIS; finalReduce = true; } - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - ccsMinimizeRoundtrips = in.readBoolean(); - } + ccsMinimizeRoundtrips = in.readBoolean(); if (in.getVersion().onOrAfter(Version.V_1_1_0)) { cancelAfterTimeInterval = in.readOptionalTimeValue(); @@ -288,9 +286,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(absoluteStartMillis); out.writeBoolean(finalReduce); } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(ccsMinimizeRoundtrips); - } + out.writeBoolean(ccsMinimizeRoundtrips); if (out.getVersion().onOrAfter(Version.V_1_1_0)) { out.writeOptionalTimeValue(cancelAfterTimeInterval); diff --git a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequest.java b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequest.java index cf2d10d2f1db3..dcd5feda0004a 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequest.java +++ b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.termvectors; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; @@ -189,10 +188,6 @@ public TermVectorsRequest() {} xContentType = in.readEnum(XContentType.class); } routing = in.readOptionalString(); - - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readOptionalString(); // _parent - } preference = in.readOptionalString(); long flags = in.readVLong(); @@ -541,9 +536,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeEnum(xContentType); } out.writeOptionalString(routing); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalString(null); // _parent - } out.writeOptionalString(preference); long longFlags = 0; for (Flag flag : flagsEnum) { diff --git a/server/src/main/java/org/opensearch/action/update/UpdateRequest.java b/server/src/main/java/org/opensearch/action/update/UpdateRequest.java index d434f134f4321..abd3c31597c18 100644 --- a/server/src/main/java/org/opensearch/action/update/UpdateRequest.java +++ b/server/src/main/java/org/opensearch/action/update/UpdateRequest.java @@ -170,9 +170,6 @@ public UpdateRequest(@Nullable ShardId shardId, StreamInput in) throws IOExcepti } id = in.readString(); routing = in.readOptionalString(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readOptionalString(); // _parent - } if (in.readBoolean()) { script = new Script(in); } @@ -181,26 +178,11 @@ public UpdateRequest(@Nullable ShardId shardId, StreamInput in) throws IOExcepti if (in.readBoolean()) { doc = new IndexRequest(shardId, in); } - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - String[] fields = in.readOptionalStringArray(); - if (fields != null) { - throw new IllegalArgumentException("[fields] is no longer supported"); - } - } fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new); if (in.readBoolean()) { upsertRequest = new IndexRequest(shardId, in); } docAsUpsert = in.readBoolean(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - long version = in.readLong(); - VersionType versionType = VersionType.readFromStream(in); - if (version != Versions.MATCH_ANY || versionType != VersionType.INTERNAL) { - throw new UnsupportedOperationException( - "versioned update requests have been removed in 7.0. Use if_seq_no and if_primary_term" - ); - } - } ifSeqNo = in.readZLong(); ifPrimaryTerm = in.readVLong(); detectNoop = in.readBoolean(); @@ -893,10 +875,6 @@ private void doWrite(StreamOutput out, boolean thin) throws IOException { } out.writeString(id); out.writeOptionalString(routing); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalString(null); // _parent - } - boolean hasScript = script != null; out.writeBoolean(hasScript); if (hasScript) { @@ -917,9 +895,6 @@ private void doWrite(StreamOutput out, boolean thin) throws IOException { doc.writeTo(out); } } - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalStringArray(null); - } out.writeOptionalWriteable(fetchSourceContext); if (upsertRequest == null) { out.writeBoolean(false); @@ -935,10 +910,6 @@ private void doWrite(StreamOutput out, boolean thin) throws IOException { } } out.writeBoolean(docAsUpsert); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeLong(Versions.MATCH_ANY); - out.writeByte(VersionType.INTERNAL.getValue()); - } out.writeZLong(ifSeqNo); out.writeVLong(ifPrimaryTerm); out.writeBoolean(detectNoop); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index 1c7e7cd0419e2..7ac716084793d 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -36,7 +36,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.util.SetOnce; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterName; @@ -1771,14 +1770,8 @@ protected void sendApplyCommit( } } - // TODO: only here temporarily for BWC development, remove once complete - public static Settings.Builder addZen1Attribute(boolean isZen1Node, Settings.Builder builder) { - return builder.put("node.attr.zen1", isZen1Node); - } - // TODO: only here temporarily for BWC development, remove once complete public static boolean isZen1Node(DiscoveryNode discoveryNode) { - return discoveryNode.getVersion().before(LegacyESVersion.V_7_0_0) - || (Booleans.isTrue(discoveryNode.getAttributes().getOrDefault("zen1", "false"))); + return Booleans.isTrue(discoveryNode.getAttributes().getOrDefault("zen1", "false")); } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java index 35ee222541771..223127783621e 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java @@ -32,7 +32,6 @@ package org.opensearch.cluster.metadata; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.cluster.AbstractDiffable; import org.opensearch.cluster.Diff; @@ -161,9 +160,6 @@ public void writeTo(StreamOutput out) throws IOException { source().writeTo(out); // routing out.writeBoolean(routingRequired); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(false); // hasParentField - } } @Override @@ -190,9 +186,6 @@ public MappingMetadata(StreamInput in) throws IOException { source = CompressedXContent.readCompressedString(in); // routing routingRequired = in.readBoolean(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readBoolean(); // hasParentField - } } public static Diff readDiffFrom(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java index eb5e8bbc2d49b..fb8123f20e904 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java @@ -981,15 +981,9 @@ private static class MetadataDiff implements Diff { MetadataDiff(StreamInput in) throws IOException { clusterUUID = in.readString(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - clusterUUIDCommitted = in.readBoolean(); - } + clusterUUIDCommitted = in.readBoolean(); version = in.readLong(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - coordinationMetadata = new CoordinationMetadata(in); - } else { - coordinationMetadata = CoordinationMetadata.EMPTY_METADATA; - } + coordinationMetadata = new CoordinationMetadata(in); transientSettings = Settings.readSettingsFromStream(in); persistentSettings = Settings.readSettingsFromStream(in); if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { @@ -1005,13 +999,9 @@ private static class MetadataDiff implements Diff { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(clusterUUID); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(clusterUUIDCommitted); - } + out.writeBoolean(clusterUUIDCommitted); out.writeLong(version); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - coordinationMetadata.writeTo(out); - } + coordinationMetadata.writeTo(out); Settings.writeSettingsToStream(transientSettings, out); Settings.writeSettingsToStream(persistentSettings, out); if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { @@ -1043,12 +1033,8 @@ public static Metadata readFrom(StreamInput in) throws IOException { Builder builder = new Builder(); builder.version = in.readLong(); builder.clusterUUID = in.readString(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - builder.clusterUUIDCommitted = in.readBoolean(); - } - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - builder.coordinationMetadata(new CoordinationMetadata(in)); - } + builder.clusterUUIDCommitted = in.readBoolean(); + builder.coordinationMetadata(new CoordinationMetadata(in)); builder.transientSettings(readSettingsFromStream(in)); builder.persistentSettings(readSettingsFromStream(in)); if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { @@ -1074,12 +1060,8 @@ public static Metadata readFrom(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { out.writeLong(version); out.writeString(clusterUUID); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(clusterUUIDCommitted); - } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - coordinationMetadata.writeTo(out); - } + out.writeBoolean(clusterUUIDCommitted); + coordinationMetadata.writeTo(out); writeSettingsToStream(transientSettings, out); writeSettingsToStream(persistentSettings, out); if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index d78e5e872fd2b..36e25b5458b76 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -36,7 +36,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.ResourceAlreadyExistsException; import org.opensearch.Version; @@ -1385,21 +1384,17 @@ static void prepareResizeIndexSettings( * the less default split operations are supported */ public static int calculateNumRoutingShards(int numShards, Version indexVersionCreated) { - if (indexVersionCreated.onOrAfter(LegacyESVersion.V_7_0_0)) { - // only select this automatically for indices that are created on or after 7.0 this will prevent this new behaviour - // until we have a fully upgraded cluster. Additionally it will make integratin testing easier since mixed clusters - // will always have the behavior of the min node in the cluster. - // - // We use as a default number of routing shards the higher number that can be expressed - // as {@code numShards * 2^x`} that is less than or equal to the maximum number of shards: 1024. - int log2MaxNumShards = 10; // logBase2(1024) - int log2NumShards = 32 - Integer.numberOfLeadingZeros(numShards - 1); // ceil(logBase2(numShards)) - int numSplits = log2MaxNumShards - log2NumShards; - numSplits = Math.max(1, numSplits); // Ensure the index can be split at least once - return numShards * 1 << numSplits; - } else { - return numShards; - } + // only select this automatically for indices that are created on or after 7.0 this will prevent this new behaviour + // until we have a fully upgraded cluster. Additionally it will make integratin testing easier since mixed clusters + // will always have the behavior of the min node in the cluster. + // + // We use as a default number of routing shards the higher number that can be expressed + // as {@code numShards * 2^x`} that is less than or equal to the maximum number of shards: 1024. + int log2MaxNumShards = 10; // logBase2(1024) + int log2NumShards = 32 - Integer.numberOfLeadingZeros(numShards - 1); // ceil(logBase2(numShards)) + int numSplits = log2MaxNumShards - log2NumShards; + numSplits = Math.max(1, numSplits); // Ensure the index can be split at least once + return numShards * 1 << numSplits; } public static void validateTranslogRetentionSettings(Settings indexSettings) { diff --git a/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java b/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java index 49c18fe44cc04..489c6125f7d13 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java +++ b/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java @@ -322,11 +322,7 @@ public UnassignedInfo(StreamInput in) throws IOException { } public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().before(LegacyESVersion.V_7_0_0) && reason == Reason.INDEX_CLOSED) { - out.writeByte((byte) Reason.REINITIALIZED.ordinal()); - } else { - out.writeByte((byte) reason.ordinal()); - } + out.writeByte((byte) reason.ordinal()); out.writeLong(unassignedTimeMillis); // Do not serialize unassignedTimeNanos as System.nanoTime() cannot be compared across different JVMs out.writeBoolean(delayed); diff --git a/server/src/main/java/org/opensearch/common/Rounding.java b/server/src/main/java/org/opensearch/common/Rounding.java index c396f6c88fd57..7160cb1e6d233 100644 --- a/server/src/main/java/org/opensearch/common/Rounding.java +++ b/server/src/main/java/org/opensearch/common/Rounding.java @@ -459,20 +459,13 @@ static class TimeUnitRounding extends Rounding { } TimeUnitRounding(StreamInput in) throws IOException { - this( - DateTimeUnit.resolve(in.readByte()), - in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0) ? in.readZoneId() : DateUtils.of(in.readString()) - ); + this(DateTimeUnit.resolve(in.readByte()), in.readZoneId()); } @Override public void innerWriteTo(StreamOutput out) throws IOException { out.writeByte(unit.getId()); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeZoneId(timeZone); - } else { - out.writeString(DateUtils.zoneIdToDateTimeZone(timeZone).getID()); - } + out.writeZoneId(timeZone); } @Override @@ -924,17 +917,13 @@ static class TimeIntervalRounding extends Rounding { } TimeIntervalRounding(StreamInput in) throws IOException { - this(in.readVLong(), in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0) ? in.readZoneId() : DateUtils.of(in.readString())); + this(in.readVLong(), in.readZoneId()); } @Override public void innerWriteTo(StreamOutput out) throws IOException { out.writeVLong(interval); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeZoneId(timeZone); - } else { - out.writeString(DateUtils.zoneIdToDateTimeZone(timeZone).getID()); - } + out.writeZoneId(timeZone); } @Override diff --git a/server/src/main/java/org/opensearch/common/breaker/CircuitBreakingException.java b/server/src/main/java/org/opensearch/common/breaker/CircuitBreakingException.java index ee9977bfa2eb0..fda089cf26942 100644 --- a/server/src/main/java/org/opensearch/common/breaker/CircuitBreakingException.java +++ b/server/src/main/java/org/opensearch/common/breaker/CircuitBreakingException.java @@ -31,7 +31,6 @@ package org.opensearch.common.breaker; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -55,11 +54,7 @@ public CircuitBreakingException(StreamInput in) throws IOException { super(in); byteLimit = in.readLong(); bytesWanted = in.readLong(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - durability = in.readEnum(CircuitBreaker.Durability.class); - } else { - durability = CircuitBreaker.Durability.PERMANENT; - } + durability = in.readEnum(CircuitBreaker.Durability.class); } public CircuitBreakingException(String message, CircuitBreaker.Durability durability) { @@ -78,9 +73,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeLong(byteLimit); out.writeLong(bytesWanted); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeEnum(durability); - } + out.writeEnum(durability); } public long getBytesWanted() { diff --git a/server/src/main/java/org/opensearch/common/joda/Joda.java b/server/src/main/java/org/opensearch/common/joda/Joda.java index 9ecb3f2236e7c..7a82b8ce49d21 100644 --- a/server/src/main/java/org/opensearch/common/joda/Joda.java +++ b/server/src/main/java/org/opensearch/common/joda/Joda.java @@ -32,8 +32,6 @@ package org.opensearch.common.joda; -import org.opensearch.LegacyESVersion; -import org.opensearch.Version; import org.opensearch.common.Strings; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.time.DateFormatter; @@ -388,17 +386,6 @@ public DateTimeField getField(Chronology chronology) { } }; - /** - * Checks if a pattern is Joda-style. - * Joda style patterns are not always compatible with java.time patterns. - * @param version - creation version of the index where pattern was used - * @param pattern - the pattern to check - * @return - true if pattern is joda style, otherwise false - */ - public static boolean isJodaPattern(Version version, String pattern) { - return version.before(LegacyESVersion.V_7_0_0) && pattern.startsWith("8") == false; - } - /** * parses epcoch timers * diff --git a/server/src/main/java/org/opensearch/common/lucene/Lucene.java b/server/src/main/java/org/opensearch/common/lucene/Lucene.java index 2692a8fa2b914..7b69dff020bc4 100644 --- a/server/src/main/java/org/opensearch/common/lucene/Lucene.java +++ b/server/src/main/java/org/opensearch/common/lucene/Lucene.java @@ -97,7 +97,6 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Version; import org.opensearch.ExceptionsHelper; -import org.opensearch.LegacyESVersion; import org.opensearch.common.Nullable; import org.opensearch.common.Strings; import org.opensearch.common.SuppressForbidden; @@ -322,10 +321,7 @@ public static boolean exists(IndexSearcher searcher, Query query) throws IOExcep public static TotalHits readTotalHits(StreamInput in) throws IOException { long totalHits = in.readVLong(); - TotalHits.Relation totalHitsRelation = TotalHits.Relation.EQUAL_TO; - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - totalHitsRelation = in.readEnum(TotalHits.Relation.class); - } + TotalHits.Relation totalHitsRelation = in.readEnum(TotalHits.Relation.class); return new TotalHits(totalHits, totalHitsRelation); } @@ -444,11 +440,7 @@ public static ScoreDoc readScoreDoc(StreamInput in) throws IOException { public static void writeTotalHits(StreamOutput out, TotalHits totalHits) throws IOException { out.writeVLong(totalHits.value); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeEnum(totalHits.relation); - } else if (totalHits.value > 0 && totalHits.relation != TotalHits.Relation.EQUAL_TO) { - throw new IllegalArgumentException("Cannot serialize approximate total hit counts to nodes that are on a version < 7.0.0"); - } + out.writeEnum(totalHits.relation); } public static void writeTopDocs(StreamOutput out, TopDocsAndMaxScore topDocs) throws IOException { @@ -648,20 +640,16 @@ public static void writeSortField(StreamOutput out, SortField sortField) throws } private static Number readExplanationValue(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - final int numberType = in.readByte(); - switch (numberType) { - case 0: - return in.readFloat(); - case 1: - return in.readDouble(); - case 2: - return in.readZLong(); - default: - throw new IOException("Unexpected number type: " + numberType); - } - } else { - return in.readFloat(); + final int numberType = in.readByte(); + switch (numberType) { + case 0: + return in.readFloat(); + case 1: + return in.readDouble(); + case 2: + return in.readZLong(); + default: + throw new IOException("Unexpected number type: " + numberType); } } @@ -680,19 +668,15 @@ public static Explanation readExplanation(StreamInput in) throws IOException { } private static void writeExplanationValue(StreamOutput out, Number value) throws IOException { - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - if (value instanceof Float) { - out.writeByte((byte) 0); - out.writeFloat(value.floatValue()); - } else if (value instanceof Double) { - out.writeByte((byte) 1); - out.writeDouble(value.doubleValue()); - } else { - out.writeByte((byte) 2); - out.writeZLong(value.longValue()); - } - } else { + if (value instanceof Float) { + out.writeByte((byte) 0); out.writeFloat(value.floatValue()); + } else if (value instanceof Double) { + out.writeByte((byte) 1); + out.writeDouble(value.doubleValue()); + } else { + out.writeByte((byte) 2); + out.writeZLong(value.longValue()); } } diff --git a/server/src/main/java/org/opensearch/env/NodeMetadata.java b/server/src/main/java/org/opensearch/env/NodeMetadata.java index 3944ecfd72d4c..03e92424c4517 100644 --- a/server/src/main/java/org/opensearch/env/NodeMetadata.java +++ b/server/src/main/java/org/opensearch/env/NodeMetadata.java @@ -32,7 +32,6 @@ package org.opensearch.env; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.ParseField; import org.opensearch.common.xcontent.ObjectParser; @@ -93,7 +92,7 @@ public Version nodeVersion() { public NodeMetadata upgradeToCurrentVersion() { if (nodeVersion.equals(Version.V_EMPTY)) { - assert Version.CURRENT.major <= LegacyESVersion.V_7_0_0.major + 1 : "version is required in the node metadata from v9 onwards"; + assert Version.CURRENT.major <= Version.V_3_0_0.major + 1 : "version is required in the node metadata from v4 onwards"; return new NodeMetadata(nodeId, Version.CURRENT); } @@ -127,8 +126,7 @@ public void setNodeVersionId(int nodeVersionId) { public NodeMetadata build() { final Version nodeVersion; if (this.nodeVersion == null) { - assert Version.CURRENT.major <= LegacyESVersion.V_7_0_0.major + 1 - : "version is required in the node metadata from v9 onwards"; + assert Version.CURRENT.major <= Version.V_3_0_0.major + 1 : "version is required in the node metadata from v4 onwards"; nodeVersion = Version.V_EMPTY; } else { nodeVersion = this.nodeVersion; diff --git a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java index f70fdea153893..48dd0ddf90413 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java @@ -37,7 +37,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.SetOnce; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.ExceptionsHelper; import org.opensearch.Version; @@ -136,8 +135,8 @@ public void start( long currentTerm = onDiskState.currentTerm; if (onDiskState.empty()) { - assert Version.CURRENT.major <= LegacyESVersion.V_7_0_0.major + 1 - : "legacy metadata loader is not needed anymore from v9 onwards"; + assert Version.CURRENT.major <= Version.V_3_0_0.major + 1 + : "legacy metadata loader is not needed anymore from v4 onwards"; final Tuple legacyState = metaStateService.loadFullState(); if (legacyState.v1().isEmpty() == false) { metadata = legacyState.v2(); diff --git a/server/src/main/java/org/opensearch/index/analysis/AnalysisRegistry.java b/server/src/main/java/org/opensearch/index/analysis/AnalysisRegistry.java index 8ec2b70001fc9..7a78d97edf360 100644 --- a/server/src/main/java/org/opensearch/index/analysis/AnalysisRegistry.java +++ b/server/src/main/java/org/opensearch/index/analysis/AnalysisRegistry.java @@ -35,7 +35,6 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.core.KeywordTokenizer; import org.apache.lucene.analysis.core.WhitespaceTokenizer; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; @@ -213,12 +212,10 @@ public Analyzer getAnalyzer(String analyzer) throws IOException { } }); } else if ("standard_html_strip".equals(analyzer)) { - if (Version.CURRENT.onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException( - "[standard_html_strip] analyzer is not supported for new indices, " - + "use a custom analyzer using [standard] tokenizer and [html_strip] char_filter, plus [lowercase] filter" - ); - } + throw new IllegalArgumentException( + "[standard_html_strip] analyzer is not supported for new indices, " + + "use a custom analyzer using [standard] tokenizer and [html_strip] char_filter, plus [lowercase] filter" + ); } return analyzerProvider.get(environment, analyzer).get(); diff --git a/server/src/main/java/org/opensearch/index/analysis/ShingleTokenFilterFactory.java b/server/src/main/java/org/opensearch/index/analysis/ShingleTokenFilterFactory.java index 701a9302fc164..e66ae20508dfe 100644 --- a/server/src/main/java/org/opensearch/index/analysis/ShingleTokenFilterFactory.java +++ b/server/src/main/java/org/opensearch/index/analysis/ShingleTokenFilterFactory.java @@ -35,8 +35,6 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.miscellaneous.DisableGraphAttribute; import org.apache.lucene.analysis.shingle.ShingleFilter; -import org.opensearch.LegacyESVersion; -import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; @@ -48,8 +46,6 @@ */ public class ShingleTokenFilterFactory extends AbstractTokenFilterFactory { - private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(ShingleTokenFilterFactory.class); - private final Factory factory; public ShingleTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { @@ -61,27 +57,17 @@ public ShingleTokenFilterFactory(IndexSettings indexSettings, Environment enviro int shingleDiff = maxShingleSize - minShingleSize + (outputUnigrams ? 1 : 0); if (shingleDiff > maxAllowedShingleDiff) { - if (indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException( - "In Shingle TokenFilter the difference between max_shingle_size and min_shingle_size (and +1 if outputting unigrams)" - + " must be less than or equal to: [" - + maxAllowedShingleDiff - + "] but was [" - + shingleDiff - + "]. This limit" - + " can be set by changing the [" - + IndexSettings.MAX_SHINGLE_DIFF_SETTING.getKey() - + "] index level setting." - ); - } else { - deprecationLogger.deprecate( - "excessive_shingle_diff", - "Deprecated big difference between maxShingleSize and minShingleSize" - + " in Shingle TokenFilter, expected difference must be less than or equal to: [" - + maxAllowedShingleDiff - + "]" - ); - } + throw new IllegalArgumentException( + "In Shingle TokenFilter the difference between max_shingle_size and min_shingle_size (and +1 if outputting unigrams)" + + " must be less than or equal to: [" + + maxAllowedShingleDiff + + "] but was [" + + shingleDiff + + "]. This limit" + + " can be set by changing the [" + + IndexSettings.MAX_SHINGLE_DIFF_SETTING.getKey() + + "] index level setting." + ); } Boolean outputUnigramsIfNoShingles = settings.getAsBoolean("output_unigrams_if_no_shingles", false); @@ -105,16 +91,7 @@ public TokenStream create(TokenStream tokenStream) { @Override public TokenFilterFactory getSynonymFilter() { - if (indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException("Token filter [" + name() + "] cannot be used to parse synonyms"); - } else { - DEPRECATION_LOGGER.deprecate( - name() + "_synonym_tokenfilters", - "Token filter " + name() + "] will not be usable to parse synonyms after v7.0" - ); - } - return this; - + throw new IllegalArgumentException("Token filter [" + name() + "] cannot be used to parse synonyms"); } public Factory getInnerFactory() { diff --git a/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java index 8e01c1f41f078..4b19fe4c5de79 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java @@ -45,7 +45,6 @@ import org.opensearch.Version; import org.opensearch.common.Nullable; import org.opensearch.common.geo.ShapeRelation; -import org.opensearch.common.joda.Joda; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.lucene.BytesRefs; import org.opensearch.common.time.DateFormatter; @@ -260,11 +259,7 @@ public Builder( private DateFormatter buildFormatter() { try { - if (Joda.isJodaPattern(indexCreatedVersion, format.getValue())) { - return Joda.forPattern(format.getValue()).withLocale(locale.getValue()); - } else { - return DateFormatter.forPattern(format.getValue()).withLocale(locale.getValue()); - } + return DateFormatter.forPattern(format.getValue()).withLocale(locale.getValue()); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Error parsing [format] on field [" + name() + "]: " + e.getMessage(), e); } diff --git a/server/src/main/java/org/opensearch/index/mapper/DocumentMapper.java b/server/src/main/java/org/opensearch/index/mapper/DocumentMapper.java index 1e5b3b4a9c93e..23d58fa18b7e3 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DocumentMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/DocumentMapper.java @@ -40,7 +40,6 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.util.BytesRef; import org.opensearch.OpenSearchGenerationException; -import org.opensearch.Version; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Settings; @@ -90,12 +89,8 @@ public Builder(RootObjectMapper.Builder builder, MapperService mapperService) { this.builderContext = new Mapper.BuilderContext(indexSettings, new ContentPath(1)); this.rootObjectMapper = builder.build(builderContext); - final String type = rootObjectMapper.name(); final DocumentMapper existingMapper = mapperService.documentMapper(); - final Version indexCreatedVersion = mapperService.getIndexSettings().getIndexVersionCreated(); - final Map metadataMapperParsers = mapperService.mapperRegistry.getMetadataMapperParsers( - indexCreatedVersion - ); + final Map metadataMapperParsers = mapperService.mapperRegistry.getMetadataMapperParsers(); for (Map.Entry entry : metadataMapperParsers.entrySet()) { final String name = entry.getKey(); final MetadataFieldMapper existingMetadataMapper = existingMapper == null diff --git a/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java b/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java index 9fa088396a38b..237d69e3ad244 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java +++ b/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java @@ -91,7 +91,7 @@ public DocumentMapperParser( this.scriptService = scriptService; this.typeParsers = mapperRegistry.getMapperParsers(); this.indexVersionCreated = indexSettings.getIndexVersionCreated(); - this.rootTypeParsers = mapperRegistry.getMetadataMapperParsers(indexVersionCreated); + this.rootTypeParsers = mapperRegistry.getMetadataMapperParsers(); } public Mapper.TypeParser.ParserContext parserContext() { diff --git a/server/src/main/java/org/opensearch/index/mapper/LegacyGeoShapeFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/LegacyGeoShapeFieldMapper.java index 71d76c6a835c2..a2224e7214f4b 100644 --- a/server/src/main/java/org/opensearch/index/mapper/LegacyGeoShapeFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/LegacyGeoShapeFieldMapper.java @@ -41,7 +41,6 @@ import org.apache.lucene.spatial.prefix.tree.PackedQuadPrefixTree; import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree; import org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.Version; import org.opensearch.common.Explicit; @@ -577,11 +576,7 @@ public void doXContentBody(XContentBuilder builder, boolean includeDefaults, Par } else if (includeDefaults && fieldType().treeLevels() == 0) { // defaults only make sense if tree levels are not specified builder.field(DeprecatedParameters.Names.PRECISION.getPreferredName(), DistanceUnit.METERS.toString(50)); } - - if (indexCreatedVersion.onOrAfter(LegacyESVersion.V_7_0_0)) { - builder.field(DeprecatedParameters.Names.STRATEGY.getPreferredName(), fieldType().strategy().getStrategyName()); - } - + builder.field(DeprecatedParameters.Names.STRATEGY.getPreferredName(), fieldType().strategy().getStrategyName()); if (includeDefaults || fieldType().distanceErrorPct() != fieldType().defaultDistanceErrorPct) { builder.field(DeprecatedParameters.Names.DISTANCE_ERROR_PCT.getPreferredName(), fieldType().distanceErrorPct()); } diff --git a/server/src/main/java/org/opensearch/index/mapper/MapperService.java b/server/src/main/java/org/opensearch/index/mapper/MapperService.java index f0d0b77396b0e..af37ddc41b567 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/opensearch/index/mapper/MapperService.java @@ -36,7 +36,6 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.DelegatingAnalyzerWrapper; import org.opensearch.Assertions; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MappingMetadata; @@ -228,8 +227,7 @@ public MapperService( this.mapperRegistry = mapperRegistry; this.idFieldDataEnabled = idFieldDataEnabled; - if (INDEX_MAPPER_DYNAMIC_SETTING.exists(indexSettings.getSettings()) - && indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_0_0)) { + if (INDEX_MAPPER_DYNAMIC_SETTING.exists(indexSettings.getSettings())) { throw new IllegalArgumentException("Setting " + INDEX_MAPPER_DYNAMIC_SETTING.getKey() + " was removed after version 6.0.0"); } } @@ -674,7 +672,7 @@ public void close() throws IOException { * this method considers all mapper plugins */ public boolean isMetadataField(String field) { - return mapperRegistry.isMetadataField(indexVersionCreated, field); + return mapperRegistry.isMetadataField(field); } /** diff --git a/server/src/main/java/org/opensearch/index/mapper/RangeFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/RangeFieldMapper.java index 5257609e0cba9..faf9fd5182690 100644 --- a/server/src/main/java/org/opensearch/index/mapper/RangeFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/RangeFieldMapper.java @@ -40,7 +40,6 @@ import org.opensearch.common.Explicit; import org.opensearch.common.collect.Tuple; import org.opensearch.common.geo.ShapeRelation; -import org.opensearch.common.joda.Joda; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.network.InetAddresses; import org.opensearch.common.settings.Setting; @@ -197,12 +196,7 @@ protected RangeFieldType setupFieldType(BuilderContext context) { // The builder context may not have index created version, falling back to indexCreatedVersion // property of this mapper builder. - DateFormatter dateTimeFormatter; - if (Joda.isJodaPattern(context.indexCreatedVersionOrDefault(indexCreatedVersion), format.getValue())) { - dateTimeFormatter = Joda.forPattern(format.getValue()).withLocale(locale.getValue()); - } else { - dateTimeFormatter = DateFormatter.forPattern(format.getValue()).withLocale(locale.getValue()); - } + DateFormatter dateTimeFormatter = DateFormatter.forPattern(format.getValue()).withLocale(locale.getValue()); return new RangeFieldType( buildFullName(context), index.getValue(), diff --git a/server/src/main/java/org/opensearch/index/query/MultiMatchQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/MultiMatchQueryBuilder.java index 4e7b6fb51291b..fe3bcd81e72be 100644 --- a/server/src/main/java/org/opensearch/index/query/MultiMatchQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/MultiMatchQueryBuilder.java @@ -34,7 +34,6 @@ import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.Query; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.common.ParseField; import org.opensearch.common.ParsingException; @@ -254,9 +253,6 @@ public MultiMatchQueryBuilder(StreamInput in) throws IOException { maxExpansions = in.readVInt(); minimumShouldMatch = in.readOptionalString(); fuzzyRewrite = in.readOptionalString(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readOptionalBoolean(); // unused use_dis_max flag - } tieBreaker = in.readOptionalFloat(); lenient = in.readOptionalBoolean(); cutoffFrequency = in.readOptionalFloat(); @@ -282,9 +278,6 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeVInt(maxExpansions); out.writeOptionalString(minimumShouldMatch); out.writeOptionalString(fuzzyRewrite); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalBoolean(null); - } out.writeOptionalFloat(tieBreaker); out.writeOptionalBoolean(lenient); out.writeOptionalFloat(cutoffFrequency); diff --git a/server/src/main/java/org/opensearch/index/similarity/SimilarityProviders.java b/server/src/main/java/org/opensearch/index/similarity/SimilarityProviders.java index 1e3ec368df411..139b8fffbac3a 100644 --- a/server/src/main/java/org/opensearch/index/similarity/SimilarityProviders.java +++ b/server/src/main/java/org/opensearch/index/similarity/SimilarityProviders.java @@ -63,7 +63,6 @@ import org.apache.lucene.search.similarities.NormalizationH2; import org.apache.lucene.search.similarities.NormalizationH3; import org.apache.lucene.search.similarities.NormalizationZ; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; @@ -157,22 +156,9 @@ private static BasicModel parseBasicModel(Version indexCreatedVersion, Settings if (model == null) { String replacement = LEGACY_BASIC_MODELS.get(basicModel); if (replacement != null) { - if (indexCreatedVersion.onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException( - "Basic model [" + basicModel + "] isn't supported anymore, " + "please use another model." - ); - } else { - deprecationLogger.deprecate( - basicModel + "_similarity_model_replaced", - "Basic model [" - + basicModel - + "] isn't supported anymore and has arbitrarily been replaced with [" - + replacement - + "]." - ); - model = BASIC_MODELS.get(replacement); - assert model != null; - } + throw new IllegalArgumentException( + "Basic model [" + basicModel + "] isn't supported anymore, " + "please use another model." + ); } } @@ -195,22 +181,9 @@ private static AfterEffect parseAfterEffect(Version indexCreatedVersion, Setting if (effect == null) { String replacement = LEGACY_AFTER_EFFECTS.get(afterEffect); if (replacement != null) { - if (indexCreatedVersion.onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException( - "After effect [" + afterEffect + "] isn't supported anymore, please use another effect." - ); - } else { - deprecationLogger.deprecate( - afterEffect + "_after_effect_replaced", - "After effect [" - + afterEffect - + "] isn't supported anymore and has arbitrarily been replaced with [" - + replacement - + "]." - ); - effect = AFTER_EFFECTS.get(replacement); - assert effect != null; - } + throw new IllegalArgumentException( + "After effect [" + afterEffect + "] isn't supported anymore, please use another effect." + ); } } @@ -294,14 +267,7 @@ static void assertSettingsIsSubsetOf(String type, Version version, Settings sett unknownSettings.removeAll(Arrays.asList(supportedSettings)); unknownSettings.remove("type"); // used to figure out which sim this is if (unknownSettings.isEmpty() == false) { - if (version.onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException("Unknown settings for similarity of type [" + type + "]: " + unknownSettings); - } else { - deprecationLogger.deprecate( - "unknown_similarity_setting", - "Unknown settings for similarity of type [" + type + "]: " + unknownSettings - ); - } + throw new IllegalArgumentException("Unknown settings for similarity of type [" + type + "]: " + unknownSettings); } } diff --git a/server/src/main/java/org/opensearch/index/similarity/SimilarityService.java b/server/src/main/java/org/opensearch/index/similarity/SimilarityService.java index d575ec508acb6..c3fc7ffbb0fe5 100644 --- a/server/src/main/java/org/opensearch/index/similarity/SimilarityService.java +++ b/server/src/main/java/org/opensearch/index/similarity/SimilarityService.java @@ -39,12 +39,10 @@ import org.apache.lucene.search.Explanation; import org.apache.lucene.search.TermStatistics; import org.apache.lucene.search.similarities.BooleanSimilarity; -import org.apache.lucene.search.similarities.ClassicSimilarity; import org.apache.lucene.search.similarities.PerFieldSimilarityWrapper; import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.search.similarities.Similarity.SimScorer; import org.apache.lucene.util.BytesRef; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.TriFunction; import org.opensearch.common.logging.DeprecationLogger; @@ -76,25 +74,12 @@ public final class SimilarityService extends AbstractIndexComponent { static { Map>> defaults = new HashMap<>(); defaults.put(CLASSIC_SIMILARITY, version -> { - if (version.onOrAfter(LegacyESVersion.V_7_0_0)) { - return () -> { - throw new IllegalArgumentException( - "The [classic] similarity may not be used anymore. Please use the [BM25] " - + "similarity or build a custom [scripted] similarity instead." - ); - }; - } else { - final ClassicSimilarity similarity = SimilarityProviders.createClassicSimilarity(Settings.EMPTY, version); - return () -> { - deprecationLogger.deprecate( - "classic_similarity", - "The [classic] similarity is now deprecated in favour of BM25, which is generally " - + "accepted as a better alternative. Use the [BM25] similarity or build a custom [scripted] similarity " - + "instead." - ); - return similarity; - }; - } + return () -> { + throw new IllegalArgumentException( + "The [classic] similarity may not be used anymore. Please use the [BM25] " + + "similarity or build a custom [scripted] similarity instead." + ); + }; }); defaults.put("BM25", version -> { final LegacyBM25Similarity similarity = SimilarityProviders.createBM25Similarity(Settings.EMPTY, version); @@ -107,20 +92,10 @@ public final class SimilarityService extends AbstractIndexComponent { Map> builtIn = new HashMap<>(); builtIn.put(CLASSIC_SIMILARITY, (settings, version, script) -> { - if (version.onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException( - "The [classic] similarity may not be used anymore. Please use the [BM25] " - + "similarity or build a custom [scripted] similarity instead." - ); - } else { - deprecationLogger.deprecate( - "classic_similarity", - "The [classic] similarity is now deprecated in favour of BM25, which is generally " - + "accepted as a better alternative. Use the [BM25] similarity or build a custom [scripted] similarity " - + "instead." - ); - return SimilarityProviders.createClassicSimilarity(settings, version); - } + throw new IllegalArgumentException( + "The [classic] similarity may not be used anymore. Please use the [BM25] " + + "similarity or build a custom [scripted] similarity instead." + ); }); builtIn.put("BM25", (settings, version, scriptService) -> SimilarityProviders.createBM25Similarity(settings, version)); builtIn.put("boolean", (settings, version, scriptService) -> SimilarityProviders.createBooleanSimilarity(settings, version)); @@ -258,10 +233,7 @@ private static void validateScoresArePositive(Version indexCreatedVersion, Simil for (int freq = 1; freq <= 10; ++freq) { float score = scorer.score(freq, norm); if (score < 0) { - fail( - indexCreatedVersion, - "Similarities should not return negative scores:\n" + scorer.explain(Explanation.match(freq, "term freq"), norm) - ); + fail("Similarities should not return negative scores:\n" + scorer.explain(Explanation.match(freq, "term freq"), norm)); break; } } @@ -288,7 +260,6 @@ private static void validateScoresDoNotDecreaseWithFreq(Version indexCreatedVers float score = scorer.score(freq, norm); if (score < previousScore) { fail( - indexCreatedVersion, "Similarity scores should not decrease when term frequency increases:\n" + scorer.explain(Explanation.match(freq - 1, "term freq"), norm) + "\n" @@ -327,7 +298,6 @@ private static void validateScoresDoNotIncreaseWithNorm(Version indexCreatedVers float score = scorer.score(1, norm); if (score > previousScore) { fail( - indexCreatedVersion, "Similarity scores should not increase when norm increases:\n" + scorer.explain(Explanation.match(1, "term freq"), norm - 1) + "\n" @@ -340,12 +310,8 @@ private static void validateScoresDoNotIncreaseWithNorm(Version indexCreatedVers } } - private static void fail(Version indexCreatedVersion, String message) { - if (indexCreatedVersion.onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException(message); - } else { - deprecationLogger.deprecate("similarity_failure", message); - } + private static void fail(String message) { + throw new IllegalArgumentException(message); } } diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 6808803ee0988..f2961c0f3b13d 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -40,10 +40,8 @@ import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.RamUsageEstimator; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.ResourceAlreadyExistsException; -import org.opensearch.Version; import org.opensearch.action.admin.indices.stats.CommonStats; import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.admin.indices.stats.CommonStatsFlags.Flag; @@ -699,8 +697,7 @@ private synchronized IndexService createIndexService( IndexingOperationListener... indexingOperationListeners ) throws IOException { final IndexSettings idxSettings = new IndexSettings(indexMetadata, settings, indexScopedSettings); - if (idxSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_0_0) - && EngineConfig.INDEX_OPTIMIZE_AUTO_GENERATED_IDS.exists(idxSettings.getSettings())) { + if (EngineConfig.INDEX_OPTIMIZE_AUTO_GENERATED_IDS.exists(idxSettings.getSettings())) { throw new IllegalArgumentException( "Setting [" + EngineConfig.INDEX_OPTIMIZE_AUTO_GENERATED_IDS.getKey() + "] was removed in version 7.0.0" ); @@ -1710,8 +1707,8 @@ public Function> getFieldFilter() { /** * Returns true if the provided field is a registered metadata field (including ones registered via plugins), false otherwise. */ - public boolean isMetadataField(Version indexCreatedVersion, String field) { - return mapperRegistry.isMetadataField(indexCreatedVersion, field); + public boolean isMetadataField(String field) { + return mapperRegistry.isMetadataField(field); } /** diff --git a/server/src/main/java/org/opensearch/indices/analysis/AnalysisModule.java b/server/src/main/java/org/opensearch/indices/analysis/AnalysisModule.java index cc87c982a684d..22be07dd90f94 100644 --- a/server/src/main/java/org/opensearch/indices/analysis/AnalysisModule.java +++ b/server/src/main/java/org/opensearch/indices/analysis/AnalysisModule.java @@ -33,7 +33,6 @@ package org.opensearch.indices.analysis; import org.apache.lucene.analysis.LowerCaseFilter; -import org.apache.lucene.analysis.TokenStream; import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; @@ -42,7 +41,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; -import org.opensearch.index.analysis.AbstractTokenFilterFactory; import org.opensearch.index.analysis.AnalysisRegistry; import org.opensearch.index.analysis.AnalyzerProvider; import org.opensearch.index.analysis.CharFilterFactory; @@ -152,20 +150,7 @@ private NamedRegistry> setupTokenFilters( tokenFilters.register("standard", new AnalysisProvider() { @Override public TokenFilterFactory get(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - if (indexSettings.getIndexVersionCreated().before(LegacyESVersion.V_7_0_0)) { - deprecationLogger.deprecate( - "standard_deprecation", - "The [standard] token filter name is deprecated and will be removed in a future version." - ); - } else { - throw new IllegalArgumentException("The [standard] token filter has been removed."); - } - return new AbstractTokenFilterFactory(indexSettings, name, settings) { - @Override - public TokenStream create(TokenStream tokenStream) { - return tokenStream; - } - }; + throw new IllegalArgumentException("The [standard] token filter has been removed."); } @Override diff --git a/server/src/main/java/org/opensearch/indices/mapper/MapperRegistry.java b/server/src/main/java/org/opensearch/indices/mapper/MapperRegistry.java index 3a1d7b1ebb1e3..c26428309aec5 100644 --- a/server/src/main/java/org/opensearch/indices/mapper/MapperRegistry.java +++ b/server/src/main/java/org/opensearch/indices/mapper/MapperRegistry.java @@ -32,10 +32,8 @@ package org.opensearch.indices.mapper; -import org.opensearch.Version; import org.opensearch.index.mapper.Mapper; import org.opensearch.index.mapper.MetadataFieldMapper; -import org.opensearch.index.mapper.NestedPathFieldMapper; import org.opensearch.plugins.MapperPlugin; import java.util.Collections; @@ -53,7 +51,6 @@ public final class MapperRegistry { private final Map mapperParsers; private final Map metadataMapperParsers; - private final Map metadataMapperParsersPre20; private final Function> fieldFilter; public MapperRegistry( @@ -63,9 +60,6 @@ public MapperRegistry( ) { this.mapperParsers = Collections.unmodifiableMap(new LinkedHashMap<>(mapperParsers)); this.metadataMapperParsers = Collections.unmodifiableMap(new LinkedHashMap<>(metadataMapperParsers)); - Map tempPre20 = new LinkedHashMap<>(metadataMapperParsers); - tempPre20.remove(NestedPathFieldMapper.NAME); - this.metadataMapperParsersPre20 = Collections.unmodifiableMap(tempPre20); this.fieldFilter = fieldFilter; } @@ -81,15 +75,15 @@ public Map getMapperParsers() { * Return a map of the meta mappers that have been registered. The * returned map uses the name of the field as a key. */ - public Map getMetadataMapperParsers(Version indexCreatedVersion) { - return indexCreatedVersion.onOrAfter(Version.V_2_0_0) ? metadataMapperParsers : metadataMapperParsersPre20; + public Map getMetadataMapperParsers() { + return metadataMapperParsers; } /** * Returns true if the provided field is a registered metadata field, false otherwise */ - public boolean isMetadataField(Version indexCreatedVersion, String field) { - return getMetadataMapperParsers(indexCreatedVersion).containsKey(field); + public boolean isMetadataField(String field) { + return getMetadataMapperParsers().containsKey(field); } /** diff --git a/server/src/main/java/org/opensearch/monitor/jvm/JvmInfo.java b/server/src/main/java/org/opensearch/monitor/jvm/JvmInfo.java index 819d67cb8621a..426551ab50f18 100644 --- a/server/src/main/java/org/opensearch/monitor/jvm/JvmInfo.java +++ b/server/src/main/java/org/opensearch/monitor/jvm/JvmInfo.java @@ -33,7 +33,6 @@ package org.opensearch.monitor.jvm; import org.apache.lucene.util.Constants; -import org.opensearch.LegacyESVersion; import org.opensearch.common.Booleans; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.io.PathUtils; @@ -305,13 +304,8 @@ public JvmInfo(StreamInput in) throws IOException { vmName = in.readString(); vmVersion = in.readString(); vmVendor = in.readString(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - bundledJdk = in.readBoolean(); - usingBundledJdk = in.readOptionalBoolean(); - } else { - bundledJdk = false; - usingBundledJdk = null; - } + bundledJdk = in.readBoolean(); + usingBundledJdk = in.readOptionalBoolean(); startTime = in.readLong(); inputArguments = new String[in.readInt()]; for (int i = 0; i < inputArguments.length; i++) { @@ -341,10 +335,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(vmName); out.writeString(vmVersion); out.writeString(vmVendor); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(bundledJdk); - out.writeOptionalBoolean(usingBundledJdk); - } + out.writeBoolean(bundledJdk); + out.writeOptionalBoolean(usingBundledJdk); out.writeLong(startTime); out.writeInt(inputArguments.length); for (String inputArgument : inputArguments) { diff --git a/server/src/main/java/org/opensearch/script/ScriptStats.java b/server/src/main/java/org/opensearch/script/ScriptStats.java index 34d868f1d6046..9c8f1157cb718 100644 --- a/server/src/main/java/org/opensearch/script/ScriptStats.java +++ b/server/src/main/java/org/opensearch/script/ScriptStats.java @@ -89,7 +89,7 @@ public ScriptStats(ScriptContextStats context) { public ScriptStats(StreamInput in) throws IOException { compilations = in.readVLong(); cacheEvictions = in.readVLong(); - compilationLimitTriggered = in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0) ? in.readVLong() : 0; + compilationLimitTriggered = in.readVLong(); contextStats = in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0) ? in.readList(ScriptContextStats::new) : Collections.emptyList(); } @@ -97,9 +97,7 @@ public ScriptStats(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { out.writeVLong(compilations); out.writeVLong(cacheEvictions); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeVLong(compilationLimitTriggered); - } + out.writeVLong(compilationLimitTriggered); if (out.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { out.writeList(contextStats); } diff --git a/server/src/main/java/org/opensearch/search/DocValueFormat.java b/server/src/main/java/org/opensearch/search/DocValueFormat.java index 84c46e400543a..4b592303ee253 100644 --- a/server/src/main/java/org/opensearch/search/DocValueFormat.java +++ b/server/src/main/java/org/opensearch/search/DocValueFormat.java @@ -34,7 +34,6 @@ import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.util.BytesRef; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.NamedWriteable; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -44,7 +43,6 @@ import org.opensearch.common.network.NetworkAddress; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.time.DateMathParser; -import org.opensearch.common.time.DateUtils; import org.opensearch.geometry.utils.Geohash; import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.search.aggregations.bucket.GeoTileUtils; @@ -224,34 +222,12 @@ public DateTime(DateFormatter formatter, ZoneId timeZone, DateFieldMapper.Resolu public DateTime(StreamInput in) throws IOException { String datePattern = in.readString(); - String zoneId = in.readString(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - this.timeZone = DateUtils.of(zoneId); - this.resolution = DateFieldMapper.Resolution.MILLISECONDS; - } else { - this.timeZone = ZoneId.of(zoneId); - this.resolution = DateFieldMapper.Resolution.ofOrdinal(in.readVInt()); - } - final boolean isJoda; - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { - // if stream is from 7.7 Node it will have a flag indicating if format is joda - isJoda = in.readBoolean(); - } else { - /* - When received a stream from 6.0-6.latest Node it can be java if starts with 8 otherwise joda. - - If a stream is from [7.0 - 7.7) the boolean indicating that this is joda is not present. - This means that if an index was created in 6.x using joda pattern and then cluster was upgraded to - 7.x but earlier then 7.0, there is no information that can tell that the index is using joda style pattern. - It will be assumed that clusters upgrading from [7.0 - 7.7) are using java style patterns. - */ - isJoda = Joda.isJodaPattern(in.getVersion(), datePattern); - } + this.timeZone = ZoneId.of(zoneId); + this.resolution = DateFieldMapper.Resolution.ofOrdinal(in.readVInt()); + final boolean isJoda = in.readBoolean(); this.formatter = isJoda ? Joda.forPattern(datePattern) : DateFormatter.forPattern(datePattern); - this.parser = formatter.toDateMathParser(); - } @Override @@ -262,16 +238,10 @@ public String getWriteableName() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(formatter.pattern()); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeString(DateUtils.zoneIdToDateTimeZone(timeZone).getID()); - } else { - out.writeString(timeZone.getId()); - out.writeVInt(resolution.ordinal()); - } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { - // in order not to loose information if the formatter is a joda we send a flag - out.writeBoolean(formatter instanceof JodaDateFormatter);// todo pg consider refactor to isJoda method.. - } + out.writeString(timeZone.getId()); + out.writeVInt(resolution.ordinal()); + // in order not to loose information if the formatter is a joda we send a flag + out.writeBoolean(formatter instanceof JodaDateFormatter);// todo pg consider refactor to isJoda method.. } public DateMathParser getDateMathParser() { diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/InternalPercentilesBucket.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/InternalPercentilesBucket.java index 40c7a5791454c..7c30656505663 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/InternalPercentilesBucket.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/InternalPercentilesBucket.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.xcontent.XContentBuilder; @@ -99,11 +98,7 @@ public InternalPercentilesBucket(StreamInput in) throws IOException { format = in.readNamedWriteable(DocValueFormat.class); percentiles = in.readDoubleArray(); percents = in.readDoubleArray(); - - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - keyed = in.readBoolean(); - } - + keyed = in.readBoolean(); computeLookup(); } @@ -112,10 +107,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeNamedWriteable(format); out.writeDoubleArray(percentiles); out.writeDoubleArray(percents); - - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(keyed); - } + out.writeBoolean(keyed); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregationBuilder.java index bef97bbbaa83a..8e68e62b04766 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregationBuilder.java @@ -34,7 +34,6 @@ import com.carrotsearch.hppc.DoubleArrayList; -import org.opensearch.LegacyESVersion; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -70,19 +69,13 @@ public PercentilesBucketPipelineAggregationBuilder(String name, String bucketsPa public PercentilesBucketPipelineAggregationBuilder(StreamInput in) throws IOException { super(in, NAME); percents = in.readDoubleArray(); - - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - keyed = in.readBoolean(); - } + keyed = in.readBoolean(); } @Override protected void innerWriteTo(StreamOutput out) throws IOException { out.writeDoubleArray(percents); - - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(keyed); - } + out.writeBoolean(keyed); } /** diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregator.java index bd838fe23da8b..7fad7e233c424 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregator.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.search.DocValueFormat; @@ -76,19 +75,13 @@ public class PercentilesBucketPipelineAggregator extends BucketMetricsPipelineAg public PercentilesBucketPipelineAggregator(StreamInput in) throws IOException { super(in); percents = in.readDoubleArray(); - - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - keyed = in.readBoolean(); - } + keyed = in.readBoolean(); } @Override public void innerWriteTo(StreamOutput out) throws IOException { out.writeDoubleArray(percents); - - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(keyed); - } + out.writeBoolean(keyed); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/BaseMultiValuesSourceFieldConfig.java b/server/src/main/java/org/opensearch/search/aggregations/support/BaseMultiValuesSourceFieldConfig.java index b7e157b6050fc..ae76fd0a3aa3f 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/BaseMultiValuesSourceFieldConfig.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/BaseMultiValuesSourceFieldConfig.java @@ -15,7 +15,6 @@ import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.io.stream.Writeable; -import org.opensearch.common.time.DateUtils; import org.opensearch.common.xcontent.ObjectParser; import org.opensearch.common.xcontent.ToXContentObject; import org.opensearch.common.xcontent.XContentBuilder; @@ -86,11 +85,7 @@ public BaseMultiValuesSourceFieldConfig(StreamInput in) throws IOException { } this.missing = in.readGenericValue(); this.script = in.readOptionalWriteable(Script::new); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - this.timeZone = DateUtils.dateTimeZoneToZoneId(in.readOptionalTimeZone()); - } else { - this.timeZone = in.readOptionalZoneId(); - } + this.timeZone = in.readOptionalZoneId(); } @Override @@ -102,11 +97,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeGenericValue(missing); out.writeOptionalWriteable(script); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalTimeZone(DateUtils.zoneIdToDateTimeZone(timeZone)); - } else { - out.writeOptionalZoneId(timeZone); - } + out.writeOptionalZoneId(timeZone); doWriteTo(out); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregationBuilder.java index 52afc6435d562..b492d9cadb975 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregationBuilder.java @@ -31,12 +31,10 @@ package org.opensearch.search.aggregations.support; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.common.time.DateUtils; import org.opensearch.common.xcontent.AbstractObjectParser; import org.opensearch.common.xcontent.ObjectParser; import org.opensearch.common.xcontent.XContentBuilder; @@ -233,11 +231,7 @@ private void read(StreamInput in) throws IOException { } format = in.readOptionalString(); missing = in.readGenericValue(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - timeZone = DateUtils.dateTimeZoneToZoneId(in.readOptionalTimeZone()); - } else { - timeZone = in.readOptionalZoneId(); - } + timeZone = in.readOptionalZoneId(); } @Override @@ -259,11 +253,7 @@ protected final void doWriteTo(StreamOutput out) throws IOException { } out.writeOptionalString(format); out.writeGenericValue(missing); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalTimeZone(DateUtils.zoneIdToDateTimeZone(timeZone)); - } else { - out.writeOptionalZoneId(timeZone); - } + out.writeOptionalZoneId(timeZone); innerWriteTo(out); } diff --git a/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java index d3b131cf9f792..565932f1bca13 100644 --- a/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java @@ -260,11 +260,7 @@ public SearchSourceBuilder(StreamInput in) throws IOException { searchAfterBuilder = in.readOptionalWriteable(SearchAfterBuilder::new); sliceBuilder = in.readOptionalWriteable(SliceBuilder::new); collapse = in.readOptionalWriteable(CollapseBuilder::new); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - trackTotalHitsUpTo = in.readOptionalInt(); - } else { - trackTotalHitsUpTo = in.readBoolean() ? TRACK_TOTAL_HITS_ACCURATE : TRACK_TOTAL_HITS_DISABLED; - } + trackTotalHitsUpTo = in.readOptionalInt(); if (in.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { if (in.readBoolean()) { fetchFields = in.readList(FieldAndFormat::new); @@ -326,11 +322,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(searchAfterBuilder); out.writeOptionalWriteable(sliceBuilder); out.writeOptionalWriteable(collapse); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeOptionalInt(trackTotalHitsUpTo); - } else { - out.writeBoolean(trackTotalHitsUpTo == null ? true : trackTotalHitsUpTo > SearchContext.TRACK_TOTAL_HITS_DISABLED); - } + out.writeOptionalInt(trackTotalHitsUpTo); if (out.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { out.writeBoolean(fetchFields != null); if (fetchFields != null) { diff --git a/server/src/main/java/org/opensearch/search/dfs/DfsSearchResult.java b/server/src/main/java/org/opensearch/search/dfs/DfsSearchResult.java index 90cc547f62a95..4a82b8eba653f 100644 --- a/server/src/main/java/org/opensearch/search/dfs/DfsSearchResult.java +++ b/server/src/main/java/org/opensearch/search/dfs/DfsSearchResult.java @@ -147,16 +147,10 @@ public static void writeFieldStats(StreamOutput out, ObjectObjectHashMap= 0; out.writeVLong(statistics.maxDoc()); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - // stats are always positive numbers - out.writeVLong(statistics.docCount()); - out.writeVLong(statistics.sumTotalTermFreq()); - out.writeVLong(statistics.sumDocFreq()); - } else { - out.writeVLong(addOne(statistics.docCount())); - out.writeVLong(addOne(statistics.sumTotalTermFreq())); - out.writeVLong(addOne(statistics.sumDocFreq())); - } + // stats are always positive numbers + out.writeVLong(statistics.docCount()); + out.writeVLong(statistics.sumTotalTermFreq()); + out.writeVLong(statistics.sumDocFreq()); } } @@ -188,16 +182,10 @@ static ObjectObjectHashMap readFieldStats(StreamIn final long docCount; final long sumTotalTermFreq; final long sumDocFreq; - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - // stats are always positive numbers - docCount = in.readVLong(); - sumTotalTermFreq = in.readVLong(); - sumDocFreq = in.readVLong(); - } else { - docCount = subOne(in.readVLong()); - sumTotalTermFreq = subOne(in.readVLong()); - sumDocFreq = subOne(in.readVLong()); - } + // stats are always positive numbers + docCount = in.readVLong(); + sumTotalTermFreq = in.readVLong(); + sumDocFreq = in.readVLong(); CollectionStatistics stats = new CollectionStatistics(field, maxDoc, docCount, sumTotalTermFreq, sumDocFreq); fieldStatistics.put(field, stats); } diff --git a/server/src/main/java/org/opensearch/search/internal/ShardSearchRequest.java b/server/src/main/java/org/opensearch/search/internal/ShardSearchRequest.java index 828c2f8c78d69..006a0627c337d 100644 --- a/server/src/main/java/org/opensearch/search/internal/ShardSearchRequest.java +++ b/server/src/main/java/org/opensearch/search/internal/ShardSearchRequest.java @@ -258,11 +258,7 @@ public ShardSearchRequest(StreamInput in) throws IOException { outboundNetworkTime = in.readVLong(); } clusterAlias = in.readOptionalString(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - allowPartialSearchResults = in.readBoolean(); - } else { - allowPartialSearchResults = false; - } + allowPartialSearchResults = in.readBoolean(); indexRoutings = in.readStringArray(); preference = in.readOptionalString(); if (in.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { @@ -336,9 +332,7 @@ protected final void innerWriteTo(StreamOutput out, boolean asKey) throws IOExce out.writeVLong(outboundNetworkTime); } out.writeOptionalString(clusterAlias); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(allowPartialSearchResults); - } + out.writeBoolean(allowPartialSearchResults); if (asKey == false) { out.writeStringArray(indexRoutings); out.writeOptionalString(preference); diff --git a/server/src/main/java/org/opensearch/search/slice/SliceBuilder.java b/server/src/main/java/org/opensearch/search/slice/SliceBuilder.java index 21e8a5646b9a5..1fd94eaddb2dd 100644 --- a/server/src/main/java/org/opensearch/search/slice/SliceBuilder.java +++ b/server/src/main/java/org/opensearch/search/slice/SliceBuilder.java @@ -35,7 +35,6 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.routing.GroupShardsIterator; @@ -260,16 +259,7 @@ public Query toFilter(ClusterService clusterService, ShardSearchRequest request, String field = this.field; boolean useTermQuery = false; if ("_uid".equals(field)) { - // on new indices, the _id acts as a _uid - field = IdFieldMapper.NAME; - if (context.getIndexSettings().getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException("Computing slices on the [_uid] field is illegal for 7.x indices, use [_id] instead"); - } - DEPRECATION_LOG.deprecate( - "slice_on_uid", - "Computing slices on the [_uid] field is deprecated for 6.x indices, use [_id] instead" - ); - useTermQuery = true; + throw new IllegalArgumentException("Computing slices on the [_uid] field is illegal for 7.x indices, use [_id] instead"); } else if (IdFieldMapper.NAME.equals(field)) { useTermQuery = true; } else if (type.hasDocValues() == false) { diff --git a/server/src/main/java/org/opensearch/search/suggest/Suggest.java b/server/src/main/java/org/opensearch/search/suggest/Suggest.java index 90cc382ee4126..0aa881e2a3c9e 100644 --- a/server/src/main/java/org/opensearch/search/suggest/Suggest.java +++ b/server/src/main/java/org/opensearch/search/suggest/Suggest.java @@ -33,7 +33,6 @@ import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.SetOnce; -import org.opensearch.LegacyESVersion; import org.opensearch.common.CheckedFunction; import org.opensearch.common.ParseField; import org.opensearch.common.ParsingException; @@ -53,8 +52,6 @@ import org.opensearch.search.suggest.Suggest.Suggestion.Entry; import org.opensearch.search.suggest.Suggest.Suggestion.Entry.Option; import org.opensearch.search.suggest.completion.CompletionSuggestion; -import org.opensearch.search.suggest.phrase.PhraseSuggestion; -import org.opensearch.search.suggest.term.TermSuggestion; import java.io.IOException; import java.util.ArrayList; @@ -101,36 +98,11 @@ public Suggest(List>> suggestions) } public Suggest(StreamInput in) throws IOException { - // in older versions, Suggestion types were serialized as Streamable - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - final int size = in.readVInt(); - suggestions = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - Suggestion> suggestion; - final int type = in.readVInt(); - switch (type) { - case TermSuggestion.TYPE: - suggestion = new TermSuggestion(in); - break; - case CompletionSuggestion.TYPE: - suggestion = new CompletionSuggestion(in); - break; - case PhraseSuggestion.TYPE: - suggestion = new PhraseSuggestion(in); - break; - default: - throw new IllegalArgumentException("Unknown suggestion type with ordinal " + type); - } - suggestions.add(suggestion); - } - } else { - int suggestionCount = in.readVInt(); - suggestions = new ArrayList<>(suggestionCount); - for (int i = 0; i < suggestionCount; i++) { - suggestions.add(in.readNamedWriteable(Suggestion.class)); - } + int suggestionCount = in.readVInt(); + suggestions = new ArrayList<>(suggestionCount); + for (int i = 0; i < suggestionCount; i++) { + suggestions.add(in.readNamedWriteable(Suggestion.class)); } - hasScoreDocs = filter(CompletionSuggestion.class).stream().anyMatch(CompletionSuggestion::hasScoreDocs); } @@ -169,18 +141,9 @@ public boolean hasScoreDocs() { @Override public void writeTo(StreamOutput out) throws IOException { - // in older versions, Suggestion types were serialized as Streamable - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeVInt(suggestions.size()); - for (Suggestion command : suggestions) { - out.writeVInt(command.getWriteableType()); - command.writeTo(out); - } - } else { - out.writeVInt(suggestions.size()); - for (Suggestion> suggestion : suggestions) { - out.writeNamedWriteable(suggestion); - } + out.writeVInt(suggestions.size()); + for (Suggestion> suggestion : suggestions) { + out.writeNamedWriteable(suggestion); } } @@ -284,13 +247,6 @@ public Suggestion(String name, int size) { public Suggestion(StreamInput in) throws IOException { name = in.readString(); size = in.readVInt(); - - // this is a hack to work around slightly different serialization order of earlier versions of TermSuggestion - if (in.getVersion().before(LegacyESVersion.V_7_0_0) && this instanceof TermSuggestion) { - TermSuggestion t = (TermSuggestion) this; - t.setSort(SortBy.readFromStream(in)); - } - int entriesCount = in.readVInt(); entries.clear(); for (int i = 0; i < entriesCount; i++) { @@ -398,13 +354,6 @@ public void trim() { public void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeVInt(size); - - // this is a hack to work around slightly different serialization order in older versions of TermSuggestion - if (out.getVersion().before(LegacyESVersion.V_7_0_0) && this instanceof TermSuggestion) { - TermSuggestion termSuggestion = (TermSuggestion) this; - termSuggestion.getSort().writeTo(out); - } - out.writeVInt(entries.size()); for (Entry entry : entries) { entry.writeTo(out); diff --git a/server/src/main/java/org/opensearch/search/suggest/completion/context/GeoContextMapping.java b/server/src/main/java/org/opensearch/search/suggest/completion/context/GeoContextMapping.java index e3b809dc57b83..bf9598a3110ad 100644 --- a/server/src/main/java/org/opensearch/search/suggest/completion/context/GeoContextMapping.java +++ b/server/src/main/java/org/opensearch/search/suggest/completion/context/GeoContextMapping.java @@ -37,7 +37,6 @@ import org.apache.lucene.document.StringField; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexableField; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.Version; import org.opensearch.common.geo.GeoPoint; @@ -312,37 +311,14 @@ public void validateReferences(Version indexVersionCreated, Function sortComparator() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - sort.writeTo(out); - } + sort.writeTo(out); } @Override diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index 417498467622a..ca5078c4d1c56 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -391,16 +391,6 @@ public void restoreSnapshot(final RestoreSnapshotRequest request, final ActionLi @Override public ClusterState execute(ClusterState currentState) { RestoreInProgress restoreInProgress = currentState.custom(RestoreInProgress.TYPE, RestoreInProgress.EMPTY); - if (currentState.getNodes().getMinNodeVersion().before(LegacyESVersion.V_7_0_0)) { - // Check if another restore process is already running - cannot run two restore processes at the - // same time in versions prior to 7.0 - if (restoreInProgress.isEmpty() == false) { - throw new ConcurrentSnapshotExecutionException( - snapshot, - "Restore process is already running in this cluster" - ); - } - } // Check if the snapshot to restore is currently being deleted SnapshotDeletionsInProgress deletionsInProgress = currentState.custom( SnapshotDeletionsInProgress.TYPE, diff --git a/server/src/main/java/org/opensearch/transport/RemoteConnectionInfo.java b/server/src/main/java/org/opensearch/transport/RemoteConnectionInfo.java index ac88eae624813..92c0d482a848f 100644 --- a/server/src/main/java/org/opensearch/transport/RemoteConnectionInfo.java +++ b/server/src/main/java/org/opensearch/transport/RemoteConnectionInfo.java @@ -36,20 +36,14 @@ import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.io.stream.Writeable; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.ToXContentFragment; import org.opensearch.common.xcontent.XContentBuilder; import java.io.IOException; -import java.net.InetAddress; -import java.net.UnknownHostException; import java.util.Arrays; import java.util.List; import java.util.Objects; -import java.util.stream.Collectors; - -import static java.util.Collections.emptyList; /** * This class encapsulates all remote cluster information to be rendered on @@ -79,26 +73,7 @@ public RemoteConnectionInfo(StreamInput input) throws IOException { clusterAlias = input.readString(); skipUnavailable = input.readBoolean(); } else { - List seedNodes; - if (input.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - seedNodes = Arrays.asList(input.readStringArray()); - } else { - // versions prior to 7.0.0 sent the resolved transport address of the seed nodes - final List transportAddresses = input.readList(TransportAddress::new); - seedNodes = transportAddresses.stream() - .map(a -> a.address().getHostString() + ":" + a.address().getPort()) - .collect(Collectors.toList()); - /* - * Versions before 7.0 sent the HTTP addresses of all nodes in the - * remote cluster here but it was expensive to fetch and we - * ultimately figured out how to do without it. So we removed it. - * - * We just throw any HTTP addresses received here on the floor - * because we don't need to do anything with them. - */ - input.readList(TransportAddress::new); - } - + List seedNodes = Arrays.asList(input.readStringArray()); int connectionsPerCluster = input.readVInt(); initialConnectionTimeout = input.readTimeValue(); int numNodesConnected = input.readVInt(); @@ -137,52 +112,12 @@ public void writeTo(StreamOutput out) throws IOException { } else { if (modeInfo.modeType() == RemoteConnectionStrategy.ConnectionStrategy.SNIFF) { SniffConnectionStrategy.SniffModeInfo sniffInfo = (SniffConnectionStrategy.SniffModeInfo) this.modeInfo; - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeStringArray(sniffInfo.seedNodes.toArray(new String[0])); - } else { - // versions prior to 7.0.0 received the resolved transport address of the seed nodes - out.writeList(sniffInfo.seedNodes.stream().map(s -> { - final String host = RemoteConnectionStrategy.parseHost(s); - final int port = RemoteConnectionStrategy.parsePort(s); - try { - return new TransportAddress(InetAddress.getByAddress(host, TransportAddress.META_ADDRESS.getAddress()), port); - } catch (final UnknownHostException e) { - throw new AssertionError(e); - } - }).collect(Collectors.toList())); - /* - * Versions before 7.0 sent the HTTP addresses of all nodes in the - * remote cluster here but it was expensive to fetch and we - * ultimately figured out how to do without it. So we removed it. - * - * When sending this request to a node that expects HTTP addresses - * here we pretend that we didn't find any. This *should* be fine - * because, after all, we haven't been using this information for - * a while. - */ - out.writeList(emptyList()); - } + out.writeStringArray(sniffInfo.seedNodes.toArray(new String[0])); out.writeVInt(sniffInfo.maxConnectionsPerCluster); out.writeTimeValue(initialConnectionTimeout); out.writeVInt(sniffInfo.numNodesConnected); } else { - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeStringArray(new String[0]); - } else { - // versions prior to 7.0.0 received the resolved transport address of the seed nodes - out.writeList(emptyList()); - /* - * Versions before 7.0 sent the HTTP addresses of all nodes in the - * remote cluster here but it was expensive to fetch and we - * ultimately figured out how to do without it. So we removed it. - * - * When sending this request to a node that expects HTTP addresses - * here we pretend that we didn't find any. This *should* be fine - * because, after all, we haven't been using this information for - * a while. - */ - out.writeList(emptyList()); - } + out.writeStringArray(new String[0]); out.writeVInt(0); out.writeTimeValue(initialConnectionTimeout); out.writeVInt(0); diff --git a/server/src/test/java/org/opensearch/BuildTests.java b/server/src/test/java/org/opensearch/BuildTests.java index eeb6890699fdc..6e6a91419b762 100644 --- a/server/src/test/java/org/opensearch/BuildTests.java +++ b/server/src/test/java/org/opensearch/BuildTests.java @@ -299,26 +299,17 @@ public void testSerializationBWC() throws IOException { new Build(Build.Type.DOCKER, randomAlphaOfLength(6), randomAlphaOfLength(6), randomBoolean(), randomAlphaOfLength(6), "other") ); - final List versions = Version.getDeclaredVersions(LegacyESVersion.class); - final Version post70Version = randomFrom( - versions.stream().filter(v -> v.onOrAfter(LegacyESVersion.V_7_0_0)).collect(Collectors.toList()) - ); + final List versions = Version.getDeclaredVersions(Version.class); + final Version post10OpenSearchVersion = randomFrom( versions.stream().filter(v -> v.onOrAfter(Version.V_1_0_0)).collect(Collectors.toList()) ); - - final WriteableBuild post70 = copyWriteable(dockerBuild, writableRegistry(), WriteableBuild::new, post70Version); final WriteableBuild post10OpenSearch = copyWriteable( dockerBuild, writableRegistry(), WriteableBuild::new, post10OpenSearchVersion ); - - assertThat(post70.build.type(), equalTo(dockerBuild.build.type())); - - assertThat(post70.build.getQualifiedVersion(), equalTo(dockerBuild.build.getQualifiedVersion())); - assertThat(post70.build.getDistribution(), equalTo(dockerBuild.build.getDistribution())); assertThat(post10OpenSearch.build.getQualifiedVersion(), equalTo(dockerBuild.build.getQualifiedVersion())); assertThat(post10OpenSearch.build.getDistribution(), equalTo(dockerBuild.build.getDistribution())); } diff --git a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java index ff2bb77531486..0ca4cdd780f94 100644 --- a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java @@ -393,7 +393,7 @@ public void testSearchContextMissingException() throws IOException { public void testCircuitBreakingException() throws IOException { CircuitBreakingException ex = serialize( new CircuitBreakingException("Too large", 0, 100, CircuitBreaker.Durability.TRANSIENT), - LegacyESVersion.V_7_0_0 + Version.V_2_0_0 ); assertEquals("Too large", ex.getMessage()); assertEquals(100, ex.getByteLimit()); diff --git a/server/src/test/java/org/opensearch/VersionTests.java b/server/src/test/java/org/opensearch/VersionTests.java index 5b3213ded1c02..70bcf343e4c1e 100644 --- a/server/src/test/java/org/opensearch/VersionTests.java +++ b/server/src/test/java/org/opensearch/VersionTests.java @@ -415,7 +415,7 @@ public static void assertUnknownVersion(Version version) { public void testIsCompatible() { assertTrue(isCompatible(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion())); - assertFalse(isCompatible(Version.fromId(2000099), LegacyESVersion.V_7_0_0)); + assertFalse(isCompatible(Version.fromId(2000099), LegacyESVersion.fromId(7000099))); assertFalse(isCompatible(Version.fromId(2000099), LegacyESVersion.fromId(6050099))); int currentMajorID = Version.computeID(Version.CURRENT.major, 0, 0, 99); diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestTests.java index 7127e0001592f..672e5ace8b5ae 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestTests.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.state; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.support.IndicesOptions; import org.opensearch.common.io.stream.BytesStreamOutput; @@ -64,14 +63,12 @@ public void testSerialization() throws Exception { Version.CURRENT.minimumCompatibilityVersion(), Version.CURRENT ); - // TODO: change version to V_6_6_0 after backporting: - if (testVersion.onOrAfter(LegacyESVersion.V_7_0_0)) { - if (randomBoolean()) { - clusterStateRequest.waitForMetadataVersion(randomLongBetween(1, Long.MAX_VALUE)); - } - if (randomBoolean()) { - clusterStateRequest.waitForTimeout(new TimeValue(randomNonNegativeLong())); - } + + if (randomBoolean()) { + clusterStateRequest.waitForMetadataVersion(randomLongBetween(1, Long.MAX_VALUE)); + } + if (randomBoolean()) { + clusterStateRequest.waitForTimeout(new TimeValue(randomNonNegativeLong())); } BytesStreamOutput output = new BytesStreamOutput(); diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java index 19544af63944c..3ffa6d6910548 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java @@ -32,7 +32,6 @@ package org.opensearch.action.search; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.IndicesOptions; @@ -107,11 +106,7 @@ public void testRandomVersionSerialization() throws IOException { SearchRequest searchRequest = createSearchRequest(); Version version = VersionUtils.randomVersion(random()); SearchRequest deserializedRequest = copyWriteable(searchRequest, namedWriteableRegistry, SearchRequest::new, version); - if (version.before(LegacyESVersion.V_7_0_0)) { - assertTrue(deserializedRequest.isCcsMinimizeRoundtrips()); - } else { - assertEquals(searchRequest.isCcsMinimizeRoundtrips(), deserializedRequest.isCcsMinimizeRoundtrips()); - } + assertEquals(searchRequest.isCcsMinimizeRoundtrips(), deserializedRequest.isCcsMinimizeRoundtrips()); assertEquals(searchRequest.getLocalClusterAlias(), deserializedRequest.getLocalClusterAlias()); assertEquals(searchRequest.getAbsoluteStartMillis(), deserializedRequest.getAbsoluteStartMillis()); assertEquals(searchRequest.isFinalReduce(), deserializedRequest.isFinalReduce()); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java index 5aa582a5e73f6..52922bc6a0e83 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java @@ -111,18 +111,16 @@ public void testPreventJoinClusterWithUnsupportedNodeVersions() { final Version maxNodeVersion = nodes.getMaxNodeVersion(); final Version minNodeVersion = nodes.getMinNodeVersion(); - if (maxNodeVersion.onOrAfter(LegacyESVersion.V_7_0_0)) { - final Version tooLow = LegacyESVersion.fromString("6.7.0"); - expectThrows(IllegalStateException.class, () -> { - if (randomBoolean()) { - JoinTaskExecutor.ensureNodesCompatibility(tooLow, nodes); - } else { - JoinTaskExecutor.ensureNodesCompatibility(tooLow, minNodeVersion, maxNodeVersion); - } - }); - } + final Version tooLow = LegacyESVersion.fromString("6.7.0"); + expectThrows(IllegalStateException.class, () -> { + if (randomBoolean()) { + JoinTaskExecutor.ensureNodesCompatibility(tooLow, nodes); + } else { + JoinTaskExecutor.ensureNodesCompatibility(tooLow, minNodeVersion, maxNodeVersion); + } + }); - if (minNodeVersion.onOrAfter(LegacyESVersion.V_7_0_0) && minNodeVersion.before(Version.V_3_0_0)) { + if (minNodeVersion.before(Version.V_3_0_0)) { Version oldMajor = minNodeVersion.minimumCompatibilityVersion(); expectThrows(IllegalStateException.class, () -> JoinTaskExecutor.ensureMajorVersionBarrier(oldMajor, minNodeVersion)); } diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexStateServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexStateServiceTests.java index fb01a493ff7c3..72b22e0efc09b 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexStateServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexStateServiceTests.java @@ -32,7 +32,6 @@ package org.opensearch.cluster.metadata; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.admin.indices.close.CloseIndexClusterStateUpdateRequest; import org.opensearch.action.admin.indices.close.CloseIndexResponse; @@ -44,9 +43,6 @@ import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.cluster.block.ClusterBlock; import org.opensearch.cluster.block.ClusterBlocks; -import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.cluster.node.DiscoveryNodeRole; -import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.RoutingTable; @@ -97,7 +93,6 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -181,91 +176,6 @@ public void testCloseRoutingTableWithSnapshottedIndex() { assertThat(updatedState.blocks().hasIndexBlockWithId(index.getName(), INDEX_CLOSED_BLOCK_ID), is(true)); } - public void testCloseRoutingTableRemovesRoutingTable() { - final Set nonBlockedIndices = new HashSet<>(); - final Map blockedIndices = new HashMap<>(); - final Map results = new HashMap<>(); - final ClusterBlock closingBlock = MetadataIndexStateService.createIndexClosingBlock(); - - ClusterState state = ClusterState.builder(new ClusterName("testCloseRoutingTableRemovesRoutingTable")).build(); - for (int i = 0; i < randomIntBetween(1, 25); i++) { - final String indexName = "index-" + i; - - if (randomBoolean()) { - state = addOpenedIndex(indexName, randomIntBetween(1, 5), randomIntBetween(0, 5), state); - nonBlockedIndices.add(state.metadata().index(indexName).getIndex()); - } else { - state = addBlockedIndex(indexName, randomIntBetween(1, 5), randomIntBetween(0, 5), state, closingBlock); - final Index index = state.metadata().index(indexName).getIndex(); - blockedIndices.put(index, closingBlock); - if (randomBoolean()) { - results.put(index, new CloseIndexResponse.IndexResult(index)); - } else { - results.put(index, new CloseIndexResponse.IndexResult(index, new Exception("test"))); - } - } - } - - state = ClusterState.builder(state) - .nodes( - DiscoveryNodes.builder(state.nodes()) - .add( - new DiscoveryNode( - "old_node", - buildNewFakeTransportAddress(), - emptyMap(), - new HashSet<>(DiscoveryNodeRole.BUILT_IN_ROLES), - LegacyESVersion.V_7_0_0 - ) - ) - .add( - new DiscoveryNode( - "new_node", - buildNewFakeTransportAddress(), - emptyMap(), - new HashSet<>(DiscoveryNodeRole.BUILT_IN_ROLES), - LegacyESVersion.V_7_2_0 - ) - ) - ) - .build(); - - state = MetadataIndexStateService.closeRoutingTable(state, blockedIndices, results).v1(); - assertThat(state.metadata().indices().size(), equalTo(nonBlockedIndices.size() + blockedIndices.size())); - - for (Index nonBlockedIndex : nonBlockedIndices) { - assertIsOpened(nonBlockedIndex.getName(), state); - assertThat(state.blocks().hasIndexBlockWithId(nonBlockedIndex.getName(), INDEX_CLOSED_BLOCK_ID), is(false)); - } - for (Index blockedIndex : blockedIndices.keySet()) { - if (results.get(blockedIndex).hasFailures() == false) { - IndexMetadata indexMetadata = state.metadata().index(blockedIndex); - assertThat(indexMetadata.getState(), is(IndexMetadata.State.CLOSE)); - Settings indexSettings = indexMetadata.getSettings(); - assertThat(indexSettings.hasValue(MetadataIndexStateService.VERIFIED_BEFORE_CLOSE_SETTING.getKey()), is(false)); - assertThat(state.blocks().hasIndexBlock(blockedIndex.getName(), MetadataIndexStateService.INDEX_CLOSED_BLOCK), is(true)); - assertThat( - "Index must have only 1 block with [id=" + MetadataIndexStateService.INDEX_CLOSED_BLOCK_ID + "]", - state.blocks() - .indices() - .getOrDefault(blockedIndex.getName(), emptySet()) - .stream() - .filter(clusterBlock -> clusterBlock.id() == MetadataIndexStateService.INDEX_CLOSED_BLOCK_ID) - .count(), - equalTo(1L) - ); - assertThat( - "Index routing table should have been removed when closing the index on mixed cluster version", - state.routingTable().index(blockedIndex), - nullValue() - ); - } else { - assertIsOpened(blockedIndex.getName(), state); - assertThat(state.blocks().hasIndexBlock(blockedIndex.getName(), closingBlock), is(true)); - } - } - } - public void testAddIndexClosedBlocks() { final ClusterState initialState = ClusterState.builder(new ClusterName("testAddIndexClosedBlocks")).build(); { diff --git a/server/src/test/java/org/opensearch/common/settings/SettingsTests.java b/server/src/test/java/org/opensearch/common/settings/SettingsTests.java index 6848cd2bbc773..a1b6cd763476a 100644 --- a/server/src/test/java/org/opensearch/common/settings/SettingsTests.java +++ b/server/src/test/java/org/opensearch/common/settings/SettingsTests.java @@ -32,7 +32,6 @@ package org.opensearch.common.settings; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.Version; import org.opensearch.common.Strings; @@ -634,7 +633,7 @@ public void testMissingValue() throws Exception { public void testReadWriteArray() throws IOException { BytesStreamOutput output = new BytesStreamOutput(); - output.setVersion(randomFrom(Version.CURRENT, LegacyESVersion.V_7_0_0)); + output.setVersion(randomFrom(Version.CURRENT, Version.V_2_0_0)); Settings settings = Settings.builder().putList("foo.bar", "0", "1", "2", "3").put("foo.bar.baz", "baz").build(); Settings.writeSettingsToStream(settings, output); StreamInput in = StreamInput.wrap(BytesReference.toBytes(output.bytes())); diff --git a/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java index 0f451fda7b9fb..22c10844028a9 100644 --- a/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java @@ -94,7 +94,7 @@ protected RangeQueryBuilder doCreateTestQueryBuilder() { if (createShardContext().getMapperService().fieldType(DATE_FIELD_NAME) != null) { if (randomBoolean()) { // drawing a truly random zoneId here can rarely fail under the following conditons: - // - index versionCreated before V_7_0_0 + // - index versionCreated before legacy V_7_0_0 // - no "forced" date parser through a format parameter // - one of the SystemV* time zones that Jodas DateTimeZone parser doesn't know about // thats why we exlude it here (see #58431) diff --git a/server/src/test/java/org/opensearch/index/similarity/SimilarityServiceTests.java b/server/src/test/java/org/opensearch/index/similarity/SimilarityServiceTests.java index eb666f1206c26..1d7b749433c65 100644 --- a/server/src/test/java/org/opensearch/index/similarity/SimilarityServiceTests.java +++ b/server/src/test/java/org/opensearch/index/similarity/SimilarityServiceTests.java @@ -37,7 +37,7 @@ import org.apache.lucene.search.TermStatistics; import org.apache.lucene.search.similarities.BooleanSimilarity; import org.apache.lucene.search.similarities.Similarity; -import org.opensearch.LegacyESVersion; +import org.opensearch.Version; import org.opensearch.common.settings.Settings; import org.opensearch.index.IndexSettings; import org.opensearch.test.OpenSearchTestCase; @@ -97,7 +97,7 @@ public float score(float freq, long norm) { }; IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> SimilarityService.validateSimilarity(LegacyESVersion.V_7_0_0, negativeScoresSim) + () -> SimilarityService.validateSimilarity(Version.V_2_0_0, negativeScoresSim) ); assertThat(e.getMessage(), Matchers.containsString("Similarities should not return negative scores")); @@ -122,7 +122,7 @@ public float score(float freq, long norm) { }; e = expectThrows( IllegalArgumentException.class, - () -> SimilarityService.validateSimilarity(LegacyESVersion.V_7_0_0, decreasingScoresWithFreqSim) + () -> SimilarityService.validateSimilarity(Version.V_2_0_0, decreasingScoresWithFreqSim) ); assertThat(e.getMessage(), Matchers.containsString("Similarity scores should not decrease when term frequency increases")); @@ -147,7 +147,7 @@ public float score(float freq, long norm) { }; e = expectThrows( IllegalArgumentException.class, - () -> SimilarityService.validateSimilarity(LegacyESVersion.V_7_0_0, increasingScoresWithNormSim) + () -> SimilarityService.validateSimilarity(Version.V_2_0_0, increasingScoresWithNormSim) ); assertThat(e.getMessage(), Matchers.containsString("Similarity scores should not increase when norm increases")); } diff --git a/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java b/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java index ec7ab06ac86a6..9c8ad3917c23f 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java @@ -32,7 +32,6 @@ package org.opensearch.indices; -import org.opensearch.Version; import org.opensearch.index.mapper.DocCountFieldMapper; import org.opensearch.index.mapper.DataStreamFieldMapper; import org.opensearch.index.mapper.FieldNamesFieldMapper; @@ -51,7 +50,6 @@ import org.opensearch.indices.mapper.MapperRegistry; import org.opensearch.plugins.MapperPlugin; import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.test.VersionUtils; import java.util.ArrayList; import java.util.Arrays; @@ -105,11 +103,9 @@ public Map getMetadataMappers() { public void testBuiltinMappers() { IndicesModule module = new IndicesModule(Collections.emptyList()); { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); assertFalse(module.getMapperRegistry().getMapperParsers().isEmpty()); - assertFalse(module.getMapperRegistry().getMetadataMapperParsers(version).isEmpty()); - Map metadataMapperParsers = module.getMapperRegistry() - .getMetadataMapperParsers(version); + assertFalse(module.getMapperRegistry().getMetadataMapperParsers().isEmpty()); + Map metadataMapperParsers = module.getMapperRegistry().getMetadataMapperParsers(); assertEquals(EXPECTED_METADATA_FIELDS.length, metadataMapperParsers.size()); int i = 0; for (String field : metadataMapperParsers.keySet()) { @@ -117,12 +113,7 @@ public void testBuiltinMappers() { } } { - Version version = VersionUtils.randomVersionBetween( - random(), - Version.V_1_0_0, - VersionUtils.getPreviousVersion(Version.V_2_0_0) - ); - assertEquals(EXPECTED_METADATA_FIELDS.length - 1, module.getMapperRegistry().getMetadataMapperParsers(version).size()); + assertEquals(EXPECTED_METADATA_FIELDS.length, module.getMapperRegistry().getMetadataMapperParsers().size()); } } @@ -132,11 +123,10 @@ public void testBuiltinWithPlugins() { MapperRegistry registry = module.getMapperRegistry(); assertThat(registry.getMapperParsers().size(), greaterThan(noPluginsModule.getMapperRegistry().getMapperParsers().size())); assertThat( - registry.getMetadataMapperParsers(Version.CURRENT).size(), - greaterThan(noPluginsModule.getMapperRegistry().getMetadataMapperParsers(Version.CURRENT).size()) + registry.getMetadataMapperParsers().size(), + greaterThan(noPluginsModule.getMapperRegistry().getMetadataMapperParsers().size()) ); - Map metadataMapperParsers = module.getMapperRegistry() - .getMetadataMapperParsers(Version.CURRENT); + Map metadataMapperParsers = module.getMapperRegistry().getMetadataMapperParsers(); Iterator iterator = metadataMapperParsers.keySet().iterator(); assertEquals(IgnoredFieldMapper.NAME, iterator.next()); String last = null; @@ -213,15 +203,13 @@ public Map getMetadataMappers() { public void testFieldNamesIsLast() { IndicesModule module = new IndicesModule(Collections.emptyList()); - Version version = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT); - List fieldNames = new ArrayList<>(module.getMapperRegistry().getMetadataMapperParsers(version).keySet()); + List fieldNames = new ArrayList<>(module.getMapperRegistry().getMetadataMapperParsers().keySet()); assertEquals(FieldNamesFieldMapper.NAME, fieldNames.get(fieldNames.size() - 1)); } public void testFieldNamesIsLastWithPlugins() { IndicesModule module = new IndicesModule(fakePlugins); - Version version = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT); - List fieldNames = new ArrayList<>(module.getMapperRegistry().getMetadataMapperParsers(version).keySet()); + List fieldNames = new ArrayList<>(module.getMapperRegistry().getMetadataMapperParsers().keySet()); assertEquals(FieldNamesFieldMapper.NAME, fieldNames.get(fieldNames.size() - 1)); } diff --git a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java index e481384c3d6f3..c39af60650657 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java @@ -66,7 +66,6 @@ import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.Mapper; import org.opensearch.index.mapper.MapperService; -import org.opensearch.index.mapper.NestedPathFieldMapper; import org.opensearch.index.shard.IllegalIndexShardStateException; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; @@ -79,7 +78,6 @@ import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.test.IndexSettingsModule; -import org.opensearch.test.VersionUtils; import org.opensearch.test.hamcrest.RegexMatcher; import java.io.IOException; @@ -565,16 +563,9 @@ public void testStatsByShardDoesNotDieFromExpectedExceptions() { public void testIsMetadataField() { IndicesService indicesService = getIndicesService(); - final Version randVersion = VersionUtils.randomIndexCompatibleVersion(random()); - assertFalse(indicesService.isMetadataField(randVersion, randomAlphaOfLengthBetween(10, 15))); + assertFalse(indicesService.isMetadataField(randomAlphaOfLengthBetween(10, 15))); for (String builtIn : IndicesModule.getBuiltInMetadataFields()) { - if (NestedPathFieldMapper.NAME.equals(builtIn) && randVersion.before(Version.V_2_0_0)) { - continue; // nested field mapper does not exist prior to 2.0 - } - assertTrue( - "Expected " + builtIn + " to be a metadata field for version " + randVersion, - indicesService.isMetadataField(randVersion, builtIn) - ); + assertTrue(indicesService.isMetadataField(builtIn)); } } diff --git a/server/src/test/java/org/opensearch/search/query/QuerySearchResultTests.java b/server/src/test/java/org/opensearch/search/query/QuerySearchResultTests.java index 6b89eb92065b1..1cdc2f166224f 100644 --- a/server/src/test/java/org/opensearch/search/query/QuerySearchResultTests.java +++ b/server/src/test/java/org/opensearch/search/query/QuerySearchResultTests.java @@ -35,16 +35,13 @@ import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TotalHits; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.OriginalIndices; import org.opensearch.action.OriginalIndicesTests; import org.opensearch.action.search.SearchRequest; import org.opensearch.common.Strings; import org.opensearch.common.UUIDs; -import org.opensearch.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.common.io.stream.NamedWriteableRegistry; -import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.lucene.search.TopDocsAndMaxScore; import org.opensearch.common.settings.Settings; import org.opensearch.index.shard.ShardId; @@ -52,7 +49,6 @@ import org.opensearch.search.SearchModule; import org.opensearch.search.SearchShardTarget; import org.opensearch.search.aggregations.Aggregations; -import org.opensearch.search.aggregations.InternalAggregations; import org.opensearch.search.aggregations.InternalAggregationsTests; import org.opensearch.search.internal.AliasFilter; import org.opensearch.search.internal.ShardSearchContextId; @@ -60,9 +56,6 @@ import org.opensearch.search.suggest.SuggestTests; import org.opensearch.test.OpenSearchTestCase; -import java.io.IOException; -import java.util.Base64; - import static java.util.Collections.emptyList; public class QuerySearchResultTests extends OpenSearchTestCase { @@ -127,32 +120,6 @@ public void testSerialization() throws Exception { assertEquals(querySearchResult.terminatedEarly(), deserialized.terminatedEarly()); } - public void testReadFromPre_7_1_0() throws IOException { - String message = "AAAAAAAAAGQAAAEAAAB/wAAAAAEBBnN0ZXJtcwVJblhNRgoDBVNhdWpvAAVrS3l3cwVHSVVZaAAFZXRUbEUFZGN0WVoABXhzYnVrAAEDAfoN" - + "A3JhdwUBAAJRAAAAAAAAA30DBnN0ZXJtcwVNdVVFRwoAAAEDAfoNA3JhdwUBAAdDAAAAAAAAA30AAApQVkFhaUxSdHh5TAAAAAAAAAN9AAAKTVRUeUxnd1hyd" - + "y0AAAAAAAADfQAACnZRQXZ3cWp0SmwPAAAAAAAAA30AAApmYXNyUUhNVWZBCwAAAAAAAAN9AAAKT3FIQ2RMZ1JZUwUAAAAAAAADfQAACm9jT05aZmZ4ZmUmAA" - + "AAAAAAA30AAApvb0tJTkdvbHdzBnN0ZXJtcwVtRmlmZAoAAAEDAfoNA3JhdwUBAARXAAAAAAAAA30AAApZd3BwQlpBZEhpMQAAAAAAAAN9AAAKREZ3UVpTSXh" - + "DSE4AAAAAAAADfQAAClVMZW1YZGtkSHUUAAAAAAAAA30AAApBUVdKVk1kTlF1BnN0ZXJtcwVxbkJGVgoAAAEDAfoNA3JhdwUBAAYJAAAAAAAAA30AAApBS2NL" - + "U1ZVS25EIQAAAAAAAAN9AAAKWGpCbXZBZmduRhsAAAAAAAADfQAACk54TkJEV3pLRmI7AAAAAAAAA30AAApydkdaZnJycXhWSAAAAAAAAAN9AAAKSURVZ3JhQ" - + "lFHSy4AAAAAAAADfQAACmJmZ0x5YlFlVksAClRJZHJlSkpVc1Y4AAAAAAAAA30DBnN0ZXJtcwVNdVVFRwoAAAEDAfoNA3JhdwUBAAdDAAAAAAAAA30AAApQVk" - + "FhaUxSdHh5TAAAAAAAAAN9AAAKTVRUeUxnd1hydy0AAAAAAAADfQAACnZRQXZ3cWp0SmwPAAAAAAAAA30AAApmYXNyUUhNVWZBCwAAAAAAAAN9AAAKT3FIQ2R" - + "MZ1JZUwUAAAAAAAADfQAACm9jT05aZmZ4ZmUmAAAAAAAAA30AAApvb0tJTkdvbHdzBnN0ZXJtcwVtRmlmZAoAAAEDAfoNA3JhdwUBAARXAAAAAAAAA30AAApZ" - + "d3BwQlpBZEhpMQAAAAAAAAN9AAAKREZ3UVpTSXhDSE4AAAAAAAADfQAAClVMZW1YZGtkSHUUAAAAAAAAA30AAApBUVdKVk1kTlF1BnN0ZXJtcwVxbkJGVgoAA" - + "AEDAfoNA3JhdwUBAAYJAAAAAAAAA30AAApBS2NLU1ZVS25EIQAAAAAAAAN9AAAKWGpCbXZBZmduRhsAAAAAAAADfQAACk54TkJEV3pLRmI7AAAAAAAAA30AAA" - + "pydkdaZnJycXhWSAAAAAAAAAN9AAAKSURVZ3JhQlFHSy4AAAAAAAADfQAACmJmZ0x5YlFlVksACm5rdExLUHp3cGgBCm1heF9idWNrZXQFbmFtZTEBB2J1Y2t" - + "ldDH/A3JhdwEBCm1heF9idWNrZXQFbmFtZTEBB2J1Y2tldDH/A3JhdwEAAAIAAf////8AAAAAAAAAAAAAAAAAAAAAAAAAAAAA"; - byte[] bytes = Base64.getDecoder().decode(message); - try (NamedWriteableAwareStreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(bytes), namedWriteableRegistry)) { - in.setVersion(LegacyESVersion.V_7_0_0); - QuerySearchResult querySearchResult = new QuerySearchResult(in); - assertEquals(100, querySearchResult.getContextId().getId()); - assertTrue(querySearchResult.hasAggs()); - InternalAggregations aggs = querySearchResult.consumeAggs().expand(); - assertEquals(1, aggs.asList().size()); - // We deserialize and throw away top level pipeline aggs - } - } - public void testNullResponse() throws Exception { QuerySearchResult querySearchResult = QuerySearchResult.nullInstance(); QuerySearchResult deserialized = copyWriteable(querySearchResult, namedWriteableRegistry, QuerySearchResult::new, Version.CURRENT); diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/DoSection.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/DoSection.java index f71c67ce456bc..22f4fcc1fde3f 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/DoSection.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/DoSection.java @@ -34,7 +34,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.client.HasAttributeNodeSelector; import org.opensearch.client.Node; @@ -367,18 +366,6 @@ void checkWarningHeaders(final List warningHeaders, final Version cluste final boolean matches = matcher.matches(); if (matches) { final String message = HeaderWarning.extractWarningValueFromWarningHeader(header, true); - if (clusterManagerVersion.before(LegacyESVersion.V_7_0_0) - && message.equals( - "the default number of shards will change from [5] to [1] in 7.0.0; " - + "if you wish to continue using the default of [5] shards, " - + "you must manage this on the create index request or with an index template" - )) { - /* - * This warning header will come back in the vast majority of our tests that create an index when running against an - * older cluster-manager. Rather than rewrite our tests to assert this warning header, we assume that it is expected. - */ - continue; - } if (message.startsWith("[types removal]")) { // We skip warnings related to types deprecation because they are *everywhere*. continue; diff --git a/test/framework/src/main/java/org/opensearch/upgrades/AbstractFullClusterRestartTestCase.java b/test/framework/src/main/java/org/opensearch/upgrades/AbstractFullClusterRestartTestCase.java index d79e1730e16f6..9fb693efa9f8b 100644 --- a/test/framework/src/main/java/org/opensearch/upgrades/AbstractFullClusterRestartTestCase.java +++ b/test/framework/src/main/java/org/opensearch/upgrades/AbstractFullClusterRestartTestCase.java @@ -32,7 +32,6 @@ package org.opensearch.upgrades; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.Booleans; import org.opensearch.common.xcontent.support.XContentMapValues; @@ -52,10 +51,10 @@ public static boolean isRunningAgainstOldCluster() { /** * @return true if test is running against an old cluster before that last major, in this case - * when System.getProperty("tests.is_old_cluster" == true) and oldClusterVersion is before {@link LegacyESVersion#V_7_0_0} + * when System.getProperty("tests.is_old_cluster" == true) and oldClusterVersion is before {@link Version#V_2_0_0} */ protected final boolean isRunningAgainstAncientCluster() { - return isRunningAgainstOldCluster() && oldClusterVersion.before(LegacyESVersion.V_7_0_0); + return isRunningAgainstOldCluster() && oldClusterVersion.before(Version.V_2_0_0); } public static Version getOldClusterVersion() { From 588db38c38b3166d6d4ad31bc4fa516e4304ac42 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Fri, 7 Oct 2022 09:47:09 -0400 Subject: [PATCH 03/39] Fixing Gradle warnings associated with publishPluginZipPublicationToXxx tasks (#4696) Signed-off-by: Andriy Redko Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + .../org/opensearch/gradle/PublishPlugin.java | 5 +++-- .../opensearch/gradle/pluginzip/Publish.java | 17 ++++++++++++----- 3 files changed, 16 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e1b23eaa5ac20..2f7893881fdf1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -124,6 +124,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fix flaky DecommissionControllerTests.testTimesOut ([4683](https://github.com/opensearch-project/OpenSearch/pull/4683)) - Fix new race condition in DecommissionControllerTests ([4688](https://github.com/opensearch-project/OpenSearch/pull/4688)) - Fix SearchStats (de)serialization (caused by https://github.com/opensearch-project/OpenSearch/pull/4616) ([#4697](https://github.com/opensearch-project/OpenSearch/pull/4697)) +- Fixing Gradle warnings associated with publishPluginZipPublicationToXxx tasks ([#4696](https://github.com/opensearch-project/OpenSearch/pull/4696)) ### Security diff --git a/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java index 2bdef8e4cd244..be12fdd99c1df 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java @@ -92,7 +92,7 @@ public String call() throws Exception { return String.format( "%s/distributions/%s-%s.pom", project.getBuildDir(), - getArchivesBaseName(project), + pomTask.getName().toLowerCase().contains("zip") ? project.getName() : getArchivesBaseName(project), project.getVersion() ); } @@ -130,7 +130,6 @@ public String call() throws Exception { publication.getPom().withXml(PublishPlugin::addScmInfo); if (!publication.getName().toLowerCase().contains("zip")) { - // have to defer this until archivesBaseName is set project.afterEvaluate(p -> publication.setArtifactId(getArchivesBaseName(project))); @@ -139,6 +138,8 @@ public String call() throws Exception { publication.artifact(project.getTasks().getByName("sourcesJar")); publication.artifact(project.getTasks().getByName("javadocJar")); } + } else { + project.afterEvaluate(p -> publication.setArtifactId(project.getName())); } generatePomTask.configure( diff --git a/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java b/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java index 6dc7d660922b2..6b581fcaa7774 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java @@ -13,12 +13,14 @@ import org.gradle.api.publish.maven.MavenPublication; import java.nio.file.Path; +import java.util.Set; +import java.util.stream.Collectors; + import org.gradle.api.Task; import org.gradle.api.publish.maven.plugins.MavenPublishPlugin; public class Publish implements Plugin { - // public final static String PLUGIN_ZIP_PUBLISH_POM_TASK = "generatePomFileForPluginZipPublication"; public final static String PUBLICATION_NAME = "pluginZip"; public final static String STAGING_REPO = "zipStaging"; public final static String LOCAL_STAGING_REPO_PATH = "/build/local-staging-repo"; @@ -67,10 +69,15 @@ public void apply(Project project) { if (validatePluginZipPom != null) { validatePluginZipPom.dependsOn("generatePomFileForNebulaPublication"); } - Task publishPluginZipPublicationToZipStagingRepository = project.getTasks() - .findByName("publishPluginZipPublicationToZipStagingRepository"); - if (publishPluginZipPublicationToZipStagingRepository != null) { - publishPluginZipPublicationToZipStagingRepository.dependsOn("generatePomFileForNebulaPublication"); + + // There are number of tasks prefixed by 'publishPluginZipPublication', f.e.: + // publishPluginZipPublicationToZipStagingRepository, publishPluginZipPublicationToMavenLocal + final Set publishPluginZipPublicationToTasks = project.getTasks() + .stream() + .filter(t -> t.getName().startsWith("publishPluginZipPublicationTo")) + .collect(Collectors.toSet()); + if (!publishPluginZipPublicationToTasks.isEmpty()) { + publishPluginZipPublicationToTasks.forEach(t -> t.dependsOn("generatePomFileForNebulaPublication")); } } else { project.getLogger() From ef50f78d27e78c9b367d3c44f2f46450ca2749ff Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 7 Oct 2022 08:24:05 -0700 Subject: [PATCH 04/39] Bump gson from 2.9.0 to 2.9.1 in /test/fixtures/hdfs-fixture (#4066) * Bump gson from 2.9.0 to 2.9.1 in /test/fixtures/hdfs-fixture Bumps [gson](https://github.com/google/gson) from 2.9.0 to 2.9.1. - [Release notes](https://github.com/google/gson/releases) - [Changelog](https://github.com/google/gson/blob/master/CHANGELOG.md) - [Commits](https://github.com/google/gson/compare/gson-parent-2.9.0...gson-parent-2.9.1) --- updated-dependencies: - dependency-name: com.google.code.gson:gson dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 4 +++- test/fixtures/hdfs-fixture/build.gradle | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2f7893881fdf1..621e64717338a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,8 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ## [Unreleased] +### Dependencies +- Bumps `gson` from 2.9.0 to 2.9.1 ### Added @@ -156,4 +158,4 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Security [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD -[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x +[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x \ No newline at end of file diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 40e3a1dc0587d..2e541572b7385 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -40,7 +40,7 @@ dependencies { api "commons-codec:commons-codec:${versions.commonscodec}" api "org.apache.logging.log4j:log4j-core:${versions.log4j}" api "io.netty:netty-all:${versions.netty}" - api 'com.google.code.gson:gson:2.9.0' + api 'com.google.code.gson:gson:2.9.1' api "org.bouncycastle:bcpkix-jdk15on:${versions.bouncycastle}" api "com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:${versions.jackson}" api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" From 109319e0724d5dfa46f5554a7ef819560eaa1fac Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Fri, 7 Oct 2022 11:25:39 -0500 Subject: [PATCH 05/39] Always auto release the flood stage block (#4703) * Always auto release the flood stage block Removes support for using a system property to disable the automatic release of the write block applied when a node exceeds the flood-stage watermark. Signed-off-by: Nicholas Walter Knize * update IAE message Signed-off-by: Nicholas Walter Knize Signed-off-by: Nicholas Walter Knize --- CHANGELOG.md | 1 + .../allocation/DiskThresholdMonitor.java | 37 ++----------------- .../allocation/DiskThresholdSettings.java | 18 +++------ .../DiskThresholdSettingsTests.java | 1 - 4 files changed, 11 insertions(+), 46 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 621e64717338a..e35d8c3eda0b3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -84,6 +84,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Remove deprecated code to add node name into log pattern of log4j property file ([#4568](https://github.com/opensearch-project/OpenSearch/pull/4568)) - Unused object and import within TransportClusterAllocationExplainAction ([#4639](https://github.com/opensearch-project/OpenSearch/pull/4639)) - Remove LegacyESVersion.V_7_0_* and V_7_1_* Constants ([#2768](https://https://github.com/opensearch-project/OpenSearch/pull/2768)) +- Always auto release the flood stage block ([#4703](https://github.com/opensearch-project/OpenSearch/pull/4703)) ### Fixed diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitor.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitor.java index a89271261ed14..0a6cfd8c04977 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitor.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitor.java @@ -37,7 +37,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionListener; import org.opensearch.action.support.GroupedActionListener; import org.opensearch.client.Client; @@ -54,7 +53,6 @@ import org.opensearch.common.Priority; import org.opensearch.common.Strings; import org.opensearch.common.collect.ImmutableOpenMap; -import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.set.Sets; @@ -88,7 +86,6 @@ public class DiskThresholdMonitor { private final RerouteService rerouteService; private final AtomicLong lastRunTimeMillis = new AtomicLong(Long.MIN_VALUE); private final AtomicBoolean checkInProgress = new AtomicBoolean(); - private final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(logger.getName()); /** * The IDs of the nodes that were over the low threshold in the last check (and maybe over another threshold too). Tracked so that we @@ -121,14 +118,6 @@ public DiskThresholdMonitor( this.rerouteService = rerouteService; this.diskThresholdSettings = new DiskThresholdSettings(settings, clusterSettings); this.client = client; - if (diskThresholdSettings.isAutoReleaseIndexEnabled() == false) { - deprecationLogger.deprecate( - DiskThresholdSettings.AUTO_RELEASE_INDEX_ENABLED_KEY.replace(".", "_"), - "[{}] will be removed in version {}", - DiskThresholdSettings.AUTO_RELEASE_INDEX_ENABLED_KEY, - LegacyESVersion.V_7_4_0.major + 1 - ); - } } private void checkFinished() { @@ -371,23 +360,7 @@ public void onNewInfo(ClusterInfo info) { .collect(Collectors.toSet()); if (indicesToAutoRelease.isEmpty() == false) { - if (diskThresholdSettings.isAutoReleaseIndexEnabled()) { - logger.info("releasing read-only-allow-delete block on indices: [{}]", indicesToAutoRelease); - updateIndicesReadOnly(indicesToAutoRelease, listener, false); - } else { - deprecationLogger.deprecate( - DiskThresholdSettings.AUTO_RELEASE_INDEX_ENABLED_KEY.replace(".", "_"), - "[{}] will be removed in version {}", - DiskThresholdSettings.AUTO_RELEASE_INDEX_ENABLED_KEY, - LegacyESVersion.V_7_4_0.major + 1 - ); - logger.debug( - "[{}] disabled, not releasing read-only-allow-delete block on indices: [{}]", - DiskThresholdSettings.AUTO_RELEASE_INDEX_ENABLED_KEY, - indicesToAutoRelease - ); - listener.onResponse(null); - } + updateIndicesReadOnly(indicesToAutoRelease, listener, false); } else { logger.trace("no auto-release required"); listener.onResponse(null); @@ -421,11 +394,9 @@ private void markNodesMissingUsageIneligibleForRelease( ) { for (RoutingNode routingNode : routingNodes) { if (usages.containsKey(routingNode.nodeId()) == false) { - if (routingNode != null) { - for (ShardRouting routing : routingNode) { - String indexName = routing.index().getName(); - indicesToMarkIneligibleForAutoRelease.add(indexName); - } + for (ShardRouting routing : routingNode) { + String indexName = routing.index().getName(); + indicesToMarkIneligibleForAutoRelease.add(indexName); } } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettings.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettings.java index 0ce0b1bd7b688..56a1ccad112c5 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettings.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettings.java @@ -33,6 +33,7 @@ package org.opensearch.cluster.routing.allocation; import org.opensearch.OpenSearchParseException; +import org.opensearch.Version; import org.opensearch.common.Strings; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; @@ -108,18 +109,15 @@ public class DiskThresholdSettings { private volatile TimeValue rerouteInterval; private volatile Double freeDiskThresholdFloodStage; private volatile ByteSizeValue freeBytesThresholdFloodStage; - private static final boolean autoReleaseIndexEnabled; - public static final String AUTO_RELEASE_INDEX_ENABLED_KEY = "opensearch.disk.auto_release_flood_stage_block"; static { + assert Version.CURRENT.major == Version.V_2_0_0.major + 1; // this check is unnecessary in v4 + final String AUTO_RELEASE_INDEX_ENABLED_KEY = "opensearch.disk.auto_release_flood_stage_block"; + final String property = System.getProperty(AUTO_RELEASE_INDEX_ENABLED_KEY); - if (property == null) { - autoReleaseIndexEnabled = true; - } else if (Boolean.FALSE.toString().equals(property)) { - autoReleaseIndexEnabled = false; - } else { + if (property != null) { throw new IllegalArgumentException( - AUTO_RELEASE_INDEX_ENABLED_KEY + " may only be unset or set to [false] but was [" + property + "]" + "system property [" + AUTO_RELEASE_INDEX_ENABLED_KEY + "] has been removed in 3.0.0 and is not supported anymore" ); } } @@ -371,10 +369,6 @@ public ByteSizeValue getFreeBytesThresholdFloodStage() { return freeBytesThresholdFloodStage; } - public boolean isAutoReleaseIndexEnabled() { - return autoReleaseIndexEnabled; - } - public boolean includeRelocations() { return includeRelocations; } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettingsTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettingsTests.java index 363484777fe66..5184ca7fe887d 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettingsTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettingsTests.java @@ -60,7 +60,6 @@ public void testDefaults() { assertTrue(diskThresholdSettings.includeRelocations()); assertEquals(zeroBytes, diskThresholdSettings.getFreeBytesThresholdFloodStage()); assertEquals(5.0D, diskThresholdSettings.getFreeDiskThresholdFloodStage(), 0.0D); - assertTrue(diskThresholdSettings.isAutoReleaseIndexEnabled()); } public void testUpdate() { From 2e4b27b243d8bd2c515f66cf86c6d1d6a601307f Mon Sep 17 00:00:00 2001 From: Rishab Nahata Date: Fri, 7 Oct 2022 22:26:38 +0530 Subject: [PATCH 06/39] Controlling discovery for decommissioned nodes (#4590) * Controlling discovery for decommissioned nodes Signed-off-by: Rishab Nahata --- CHANGELOG.md | 1 + .../cluster/coordination/Coordinator.java | 28 +++++++- .../cluster/coordination/JoinHelper.java | 16 ++++- .../coordination/JoinTaskExecutor.java | 32 ++++----- .../decommission/DecommissionService.java | 38 ++++++++-- .../common/settings/ClusterSettings.java | 1 + .../org/opensearch/discovery/PeerFinder.java | 29 +++++++- .../coordination/CoordinatorTests.java | 43 +++++++++++ .../cluster/coordination/JoinHelperTests.java | 9 ++- .../coordination/JoinTaskExecutorTests.java | 45 ++++++++++++ .../cluster/coordination/NodeJoinTests.java | 71 +++++++++++++++++++ .../opensearch/discovery/PeerFinderTests.java | 46 ++++++++++++ 12 files changed, 329 insertions(+), 30 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e35d8c3eda0b3..04b62c11a6865 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -76,6 +76,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Add APIs (GET/PUT) to decommission awareness attribute ([#4261](https://github.com/opensearch-project/OpenSearch/pull/4261)) - Improve Gradle pre-commit checks to pre-empt Jenkins build ([#4660](https://github.com/opensearch-project/OpenSearch/pull/4660)) - Update to Apache Lucene 9.4.0 ([#4661](https://github.com/opensearch-project/OpenSearch/pull/4661)) +- Controlling discovery for decommissioned nodes ([#4590](https://github.com/opensearch-project/OpenSearch/pull/4590)) - Backport Apache Lucene version change for 2.4.0 ([#4677](https://github.com/opensearch-project/OpenSearch/pull/4677)) ### Deprecated diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index 7ac716084793d..fbb345ea3a441 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -105,6 +105,7 @@ import java.util.stream.StreamSupport; import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ID; +import static org.opensearch.cluster.decommission.DecommissionService.nodeCommissioned; import static org.opensearch.gateway.ClusterStateUpdaters.hideStateIfNotRecovered; import static org.opensearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; import static org.opensearch.monitor.StatusInfo.Status.UNHEALTHY; @@ -138,6 +139,7 @@ public class Coordinator extends AbstractLifecycleComponent implements Discovery private final Settings settings; private final boolean singleNodeDiscovery; + private volatile boolean localNodeCommissioned; private final ElectionStrategy electionStrategy; private final TransportService transportService; private final ClusterManagerService clusterManagerService; @@ -218,7 +220,8 @@ public Coordinator( this::joinLeaderInTerm, this.onJoinValidators, rerouteService, - nodeHealthService + nodeHealthService, + this::onNodeCommissionStatusChange ); this.persistedStateSupplier = persistedStateSupplier; this.noClusterManagerBlockService = new NoClusterManagerBlockService(settings, clusterSettings); @@ -281,6 +284,7 @@ public Coordinator( joinHelper::logLastFailedJoinAttempt ); this.nodeHealthService = nodeHealthService; + this.localNodeCommissioned = true; } private ClusterFormationState getClusterFormationState() { @@ -596,6 +600,9 @@ private void handleJoinRequest(JoinRequest joinRequest, JoinHelper.JoinCallback joinRequest.getSourceNode().getVersion(), stateForJoinValidation.getNodes().getMinNodeVersion() ); + // we are checking source node commission status here to reject any join request coming from a decommissioned node + // even before executing the join task to fail fast + JoinTaskExecutor.ensureNodeCommissioned(joinRequest.getSourceNode(), stateForJoinValidation.metadata()); } sendValidateJoinRequest(stateForJoinValidation, joinRequest, joinCallback); } else { @@ -1424,6 +1431,17 @@ protected void onFoundPeersUpdated() { } } + // package-visible for testing + synchronized void onNodeCommissionStatusChange(boolean localNodeCommissioned) { + this.localNodeCommissioned = localNodeCommissioned; + peerFinder.onNodeCommissionStatusChange(localNodeCommissioned); + } + + // package-visible for testing + boolean localNodeCommissioned() { + return localNodeCommissioned; + } + private void startElectionScheduler() { assert electionScheduler == null : electionScheduler; @@ -1450,6 +1468,14 @@ public void run() { return; } + // if either the localNodeCommissioned flag or the last accepted state thinks it should skip pre voting, we will + // acknowledge it + if (nodeCommissioned(lastAcceptedState.nodes().getLocalNode(), lastAcceptedState.metadata()) == false + || localNodeCommissioned == false) { + logger.debug("skip prevoting as local node is decommissioned"); + return; + } + if (prevotingRound != null) { prevotingRound.close(); } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java index 656e6d220720f..a66152b8016ee 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java @@ -42,6 +42,7 @@ import org.opensearch.cluster.ClusterStateTaskListener; import org.opensearch.cluster.NotClusterManagerException; import org.opensearch.cluster.coordination.Coordinator.Mode; +import org.opensearch.cluster.decommission.NodeDecommissionedException; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.RerouteService; @@ -57,6 +58,7 @@ import org.opensearch.monitor.StatusInfo; import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool.Names; +import org.opensearch.transport.RemoteTransportException; import org.opensearch.transport.TransportChannel; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequest; @@ -78,6 +80,7 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; +import java.util.function.Consumer; import java.util.function.Function; import java.util.function.LongSupplier; import java.util.function.Supplier; @@ -118,6 +121,7 @@ public class JoinHelper { private final AtomicReference lastFailedJoinAttempt = new AtomicReference<>(); private final Supplier joinTaskExecutorGenerator; + private final Consumer nodeCommissioned; JoinHelper( Settings settings, @@ -130,12 +134,14 @@ public class JoinHelper { Function joinLeaderInTerm, Collection> joinValidators, RerouteService rerouteService, - NodeHealthService nodeHealthService + NodeHealthService nodeHealthService, + Consumer nodeCommissioned ) { this.clusterManagerService = clusterManagerService; this.transportService = transportService; this.nodeHealthService = nodeHealthService; this.joinTimeout = JOIN_TIMEOUT_SETTING.get(settings); + this.nodeCommissioned = nodeCommissioned; this.joinTaskExecutorGenerator = () -> new JoinTaskExecutor(settings, allocationService, logger, rerouteService, transportService) { private final long term = currentTermSupplier.getAsLong(); @@ -342,6 +348,7 @@ public void handleResponse(Empty response) { pendingOutgoingJoins.remove(dedupKey); logger.debug("successfully joined {} with {}", destination, joinRequest); lastFailedJoinAttempt.set(null); + nodeCommissioned.accept(true); onCompletion.run(); } @@ -352,6 +359,13 @@ public void handleException(TransportException exp) { FailedJoinAttempt attempt = new FailedJoinAttempt(destination, joinRequest, exp); attempt.logNow(); lastFailedJoinAttempt.set(attempt); + if (exp instanceof RemoteTransportException && (exp.getCause() instanceof NodeDecommissionedException)) { + logger.info( + "local node is decommissioned [{}]. Will not be able to join the cluster", + exp.getCause().getMessage() + ); + nodeCommissioned.accept(false); + } onCompletion.run(); } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java index 814aa17255931..ac237db85ee5b 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java @@ -39,9 +39,6 @@ import org.opensearch.cluster.ClusterStateTaskExecutor; import org.opensearch.cluster.NotClusterManagerException; import org.opensearch.cluster.block.ClusterBlocks; -import org.opensearch.cluster.decommission.DecommissionAttribute; -import org.opensearch.cluster.decommission.DecommissionAttributeMetadata; -import org.opensearch.cluster.decommission.DecommissionStatus; import org.opensearch.cluster.decommission.NodeDecommissionedException; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; @@ -64,6 +61,7 @@ import java.util.function.BiConsumer; import java.util.stream.Collectors; +import static org.opensearch.cluster.decommission.DecommissionService.nodeCommissioned; import static org.opensearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; /** @@ -196,6 +194,9 @@ public ClusterTasksResult execute(ClusterState currentState, List jo // we do this validation quite late to prevent race conditions between nodes joining and importing dangling indices // we have to reject nodes that don't support all indices we have in this cluster ensureIndexCompatibility(node.getVersion(), currentState.getMetadata()); + // we have added the same check in handleJoinRequest method and adding it here as this method + // would guarantee that a decommissioned node would never be able to join the cluster and ensures correctness + ensureNodeCommissioned(node, currentState.metadata()); nodesBuilder.add(node); nodesChanged = true; minClusterNodeVersion = Version.min(minClusterNodeVersion, node.getVersion()); @@ -203,7 +204,7 @@ public ClusterTasksResult execute(ClusterState currentState, List jo if (node.isClusterManagerNode()) { joiniedNodeNameIds.put(node.getName(), node.getId()); } - } catch (IllegalArgumentException | IllegalStateException e) { + } catch (IllegalArgumentException | IllegalStateException | NodeDecommissionedException e) { results.failure(joinTask, e); continue; } @@ -477,22 +478,13 @@ public static void ensureMajorVersionBarrier(Version joiningNodeVersion, Version } public static void ensureNodeCommissioned(DiscoveryNode node, Metadata metadata) { - DecommissionAttributeMetadata decommissionAttributeMetadata = metadata.decommissionAttributeMetadata(); - if (decommissionAttributeMetadata != null) { - DecommissionAttribute decommissionAttribute = decommissionAttributeMetadata.decommissionAttribute(); - DecommissionStatus status = decommissionAttributeMetadata.status(); - if (decommissionAttribute != null && status != null) { - // We will let the node join the cluster if the current status is in FAILED state - if (node.getAttributes().get(decommissionAttribute.attributeName()).equals(decommissionAttribute.attributeValue()) - && (status.equals(DecommissionStatus.IN_PROGRESS) || status.equals(DecommissionStatus.SUCCESSFUL))) { - throw new NodeDecommissionedException( - "node [{}] has decommissioned attribute [{}] with current status of decommissioning [{}]", - node.toString(), - decommissionAttribute.toString(), - status.status() - ); - } - } + if (nodeCommissioned(node, metadata) == false) { + throw new NodeDecommissionedException( + "node [{}] has decommissioned attribute [{}] with current status of decommissioning [{}]", + node.toString(), + metadata.decommissionAttributeMetadata().decommissionAttribute().toString(), + metadata.decommissionAttributeMetadata().status().status() + ); } } diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java index fcab411f073ba..5def2733b5ded 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java @@ -389,10 +389,6 @@ private Set filterNodesWithDecommissionAttribute( return nodesWithDecommissionAttribute; } - private static boolean nodeHasDecommissionedAttribute(DiscoveryNode discoveryNode, DecommissionAttribute decommissionAttribute) { - return discoveryNode.getAttributes().get(decommissionAttribute.attributeName()).equals(decommissionAttribute.attributeValue()); - } - private static void validateAwarenessAttribute( final DecommissionAttribute decommissionAttribute, List awarenessAttributes, @@ -531,4 +527,38 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS } }); } + + /** + * Utility method to check if the node has decommissioned attribute + * + * @param discoveryNode node to check on + * @param decommissionAttribute attribute to be checked with + * @return true or false based on whether node has decommissioned attribute + */ + public static boolean nodeHasDecommissionedAttribute(DiscoveryNode discoveryNode, DecommissionAttribute decommissionAttribute) { + String nodeAttributeValue = discoveryNode.getAttributes().get(decommissionAttribute.attributeName()); + return nodeAttributeValue != null && nodeAttributeValue.equals(decommissionAttribute.attributeValue()); + } + + /** + * Utility method to check if the node is commissioned or not + * + * @param discoveryNode node to check on + * @param metadata metadata present current which will be used to check the commissioning status of the node + * @return if the node is commissioned or not + */ + public static boolean nodeCommissioned(DiscoveryNode discoveryNode, Metadata metadata) { + DecommissionAttributeMetadata decommissionAttributeMetadata = metadata.decommissionAttributeMetadata(); + if (decommissionAttributeMetadata != null) { + DecommissionAttribute decommissionAttribute = decommissionAttributeMetadata.decommissionAttribute(); + DecommissionStatus status = decommissionAttributeMetadata.status(); + if (decommissionAttribute != null && status != null) { + if (nodeHasDecommissionedAttribute(discoveryNode, decommissionAttribute) + && (status.equals(DecommissionStatus.IN_PROGRESS) || status.equals(DecommissionStatus.SUCCESSFUL))) { + return false; + } + } + } + return true; + } } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 1665614c18496..54579031aac08 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -534,6 +534,7 @@ public void apply(Settings value, Settings current, Settings previous) { PersistentTasksClusterService.CLUSTER_TASKS_ALLOCATION_RECHECK_INTERVAL_SETTING, EnableAssignmentDecider.CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING, PeerFinder.DISCOVERY_FIND_PEERS_INTERVAL_SETTING, + PeerFinder.DISCOVERY_FIND_PEERS_INTERVAL_DURING_DECOMMISSION_SETTING, PeerFinder.DISCOVERY_REQUEST_PEERS_TIMEOUT_SETTING, ClusterFormationFailureHelper.DISCOVERY_CLUSTER_FORMATION_WARNING_TIMEOUT_SETTING, ElectionSchedulerFactory.ELECTION_INITIAL_TIMEOUT_SETTING, diff --git a/server/src/main/java/org/opensearch/discovery/PeerFinder.java b/server/src/main/java/org/opensearch/discovery/PeerFinder.java index a601a6fbe4d82..e8b6c72c512a2 100644 --- a/server/src/main/java/org/opensearch/discovery/PeerFinder.java +++ b/server/src/main/java/org/opensearch/discovery/PeerFinder.java @@ -84,6 +84,14 @@ public abstract class PeerFinder { Setting.Property.NodeScope ); + // the time between attempts to find all peers when node is in decommissioned state, default set to 2 minutes + public static final Setting DISCOVERY_FIND_PEERS_INTERVAL_DURING_DECOMMISSION_SETTING = Setting.timeSetting( + "discovery.find_peers_interval_during_decommission", + TimeValue.timeValueSeconds(120L), + TimeValue.timeValueMillis(1000), + Setting.Property.NodeScope + ); + public static final Setting DISCOVERY_REQUEST_PEERS_TIMEOUT_SETTING = Setting.timeSetting( "discovery.request_peers_timeout", TimeValue.timeValueMillis(3000), @@ -91,7 +99,8 @@ public abstract class PeerFinder { Setting.Property.NodeScope ); - private final TimeValue findPeersInterval; + private final Settings settings; + private TimeValue findPeersInterval; private final TimeValue requestPeersTimeout; private final Object mutex = new Object(); @@ -112,6 +121,7 @@ public PeerFinder( TransportAddressConnector transportAddressConnector, ConfiguredHostsResolver configuredHostsResolver ) { + this.settings = settings; findPeersInterval = DISCOVERY_FIND_PEERS_INTERVAL_SETTING.get(settings); requestPeersTimeout = DISCOVERY_REQUEST_PEERS_TIMEOUT_SETTING.get(settings); this.transportService = transportService; @@ -128,6 +138,23 @@ public PeerFinder( ); } + public synchronized void onNodeCommissionStatusChange(boolean localNodeCommissioned) { + findPeersInterval = localNodeCommissioned + ? DISCOVERY_FIND_PEERS_INTERVAL_SETTING.get(settings) + : DISCOVERY_FIND_PEERS_INTERVAL_DURING_DECOMMISSION_SETTING.get(settings); + logger.info( + "setting findPeersInterval to [{}] as node commission status = [{}] for local node [{}]", + findPeersInterval, + localNodeCommissioned, + transportService.getLocalNode() + ); + } + + // package private for tests + TimeValue getFindPeersInterval() { + return findPeersInterval; + } + public void activate(final DiscoveryNodes lastAcceptedNodes) { logger.trace("activating with {}", lastAcceptedNodes); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java index d96c972bc6021..74c5d0fcccbed 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java @@ -87,6 +87,7 @@ import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING; import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_WRITES; import static org.opensearch.cluster.coordination.Reconfigurator.CLUSTER_AUTO_SHRINK_VOTING_CONFIGURATION; +import static org.opensearch.discovery.PeerFinder.DISCOVERY_FIND_PEERS_INTERVAL_DURING_DECOMMISSION_SETTING; import static org.opensearch.discovery.PeerFinder.DISCOVERY_FIND_PEERS_INTERVAL_SETTING; import static org.opensearch.monitor.StatusInfo.Status.HEALTHY; import static org.opensearch.monitor.StatusInfo.Status.UNHEALTHY; @@ -1780,6 +1781,48 @@ public void testImproveConfigurationPerformsVotingConfigExclusionStateCheck() { } } + public void testLocalNodeAlwaysCommissionedWithoutDecommissionedException() { + try (Cluster cluster = new Cluster(randomIntBetween(1, 5))) { + cluster.runRandomly(); + cluster.stabilise(); + for (ClusterNode node : cluster.clusterNodes) { + assertTrue(node.coordinator.localNodeCommissioned()); + } + } + } + + public void testClusterStabilisesForPreviouslyDecommissionedNode() { + try (Cluster cluster = new Cluster(randomIntBetween(1, 5))) { + cluster.runRandomly(); + cluster.stabilise(); + for (ClusterNode node : cluster.clusterNodes) { + assertTrue(node.coordinator.localNodeCommissioned()); + } + final ClusterNode leader = cluster.getAnyLeader(); + + ClusterNode decommissionedNode = cluster.new ClusterNode( + nextNodeIndex.getAndIncrement(), true, leader.nodeSettings, () -> new StatusInfo(HEALTHY, "healthy-info") + ); + decommissionedNode.coordinator.onNodeCommissionStatusChange(false); + cluster.clusterNodes.add(decommissionedNode); + + assertFalse(decommissionedNode.coordinator.localNodeCommissioned()); + + cluster.stabilise( + // Interval is updated to decommissioned find peer interval + defaultMillis(DISCOVERY_FIND_PEERS_INTERVAL_DURING_DECOMMISSION_SETTING) + // One message delay to send a join + + DEFAULT_DELAY_VARIABILITY + // Commit a new cluster state with the new node(s). Might be split into multiple commits, and each might need a + // followup reconfiguration + + 3 * 2 * DEFAULT_CLUSTER_STATE_UPDATE_DELAY + ); + + // once cluster stabilises the node joins and would be commissioned + assertTrue(decommissionedNode.coordinator.localNodeCommissioned()); + } + } + private ClusterState buildNewClusterStateWithVotingConfigExclusion( ClusterState currentState, Set newVotingConfigExclusion diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java index a3c945cdbac3a..7b21042b2ed4a 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java @@ -90,7 +90,8 @@ public void testJoinDeduplication() { startJoinRequest -> { throw new AssertionError(); }, Collections.emptyList(), (s, p, r) -> {}, - () -> new StatusInfo(HEALTHY, "info") + () -> new StatusInfo(HEALTHY, "info"), + nodeCommissioned -> {} ); transportService.start(); @@ -230,7 +231,8 @@ private void assertJoinValidationRejectsMismatchedClusterUUID(String actionName, startJoinRequest -> { throw new AssertionError(); }, Collections.emptyList(), (s, p, r) -> {}, - null + null, + nodeCommissioned -> {} ); // registers request handler transportService.start(); transportService.acceptIncomingRequests(); @@ -284,7 +286,8 @@ public void testJoinFailureOnUnhealthyNodes() { startJoinRequest -> { throw new AssertionError(); }, Collections.emptyList(), (s, p, r) -> {}, - () -> nodeHealthServiceStatus.get() + () -> nodeHealthServiceStatus.get(), + nodeCommissioned -> {} ); transportService.start(); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java index 52922bc6a0e83..66a3b00f2979d 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java @@ -261,6 +261,51 @@ public void testJoinClusterWithDifferentDecommission() { JoinTaskExecutor.ensureNodeCommissioned(discoveryNode, metadata); } + public void testJoinFailedForDecommissionedNode() throws Exception { + final AllocationService allocationService = mock(AllocationService.class); + when(allocationService.adaptAutoExpandReplicas(any())).then(invocationOnMock -> invocationOnMock.getArguments()[0]); + final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); + + final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor(Settings.EMPTY, allocationService, logger, rerouteService, null); + + final DiscoveryNode clusterManagerNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); + + DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "zone1"); + DecommissionAttributeMetadata decommissionAttributeMetadata = new DecommissionAttributeMetadata( + decommissionAttribute, + DecommissionStatus.SUCCESSFUL + ); + final ClusterState clusterManagerClusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes( + DiscoveryNodes.builder() + .add(clusterManagerNode) + .localNodeId(clusterManagerNode.getId()) + .clusterManagerNodeId(clusterManagerNode.getId()) + ) + .metadata(Metadata.builder().decommissionAttributeMetadata(decommissionAttributeMetadata)) + .build(); + + final DiscoveryNode decommissionedNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + Collections.singletonMap("zone", "zone1"), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + String decommissionedNodeID = decommissionedNode.getId(); + + final ClusterStateTaskExecutor.ClusterTasksResult result = joinTaskExecutor.execute( + clusterManagerClusterState, + List.of(new JoinTaskExecutor.Task(decommissionedNode, "test")) + ); + assertThat(result.executionResults.entrySet(), hasSize(1)); + final ClusterStateTaskExecutor.TaskResult taskResult = result.executionResults.values().iterator().next(); + assertFalse(taskResult.isSuccess()); + assertTrue(taskResult.getFailure() instanceof NodeDecommissionedException); + assertFalse(result.resultingState.getNodes().nodeExists(decommissionedNodeID)); + } + public void testJoinClusterWithDecommissionFailed() { Settings.builder().build(); DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "zone-1"); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java index c77baba5fe167..18a7b892a424c 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java @@ -39,6 +39,10 @@ import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfiguration; +import org.opensearch.cluster.decommission.DecommissionAttribute; +import org.opensearch.cluster.decommission.DecommissionAttributeMetadata; +import org.opensearch.cluster.decommission.DecommissionStatus; +import org.opensearch.cluster.decommission.NodeDecommissionedException; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; @@ -775,6 +779,60 @@ public void testJoinElectedLeaderWithDeprecatedMasterRole() { assertTrue(clusterStateHasNode(node1)); } + public void testJoinFailsWhenDecommissioned() { + DiscoveryNode node0 = newNode(0, true); + DiscoveryNode node1 = newNode(1, true); + long initialTerm = randomLongBetween(1, 10); + long initialVersion = randomLongBetween(1, 10); + setupFakeClusterManagerServiceAndCoordinator( + initialTerm, + initialStateWithDecommissionedAttribute( + initialState(node0, initialTerm, initialVersion, VotingConfiguration.of(node0)), + new DecommissionAttribute("zone", "zone1") + ), + () -> new StatusInfo(HEALTHY, "healthy-info") + ); + assertFalse(isLocalNodeElectedMaster()); + long newTerm = initialTerm + randomLongBetween(1, 10); + joinNodeAndRun(new JoinRequest(node0, newTerm, Optional.of(new Join(node0, node0, newTerm, initialTerm, initialVersion)))); + assertTrue(isLocalNodeElectedMaster()); + assertFalse(clusterStateHasNode(node1)); + joinNodeAndRun(new JoinRequest(node1, newTerm, Optional.of(new Join(node1, node0, newTerm, initialTerm, initialVersion)))); + assertTrue(isLocalNodeElectedMaster()); + assertTrue(clusterStateHasNode(node1)); + DiscoveryNode decommissionedNode = new DiscoveryNode( + "data_2", + 2 + "", + buildNewFakeTransportAddress(), + Collections.singletonMap("zone", "zone1"), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + long anotherTerm = newTerm + randomLongBetween(1, 10); + + assertThat( + expectThrows( + NodeDecommissionedException.class, + () -> joinNodeAndRun(new JoinRequest(decommissionedNode, anotherTerm, Optional.empty())) + ).getMessage(), + containsString("with current status of decommissioning") + ); + assertFalse(clusterStateHasNode(decommissionedNode)); + + DiscoveryNode node3 = new DiscoveryNode( + "data_3", + 3 + "", + buildNewFakeTransportAddress(), + Collections.singletonMap("zone", "zone2"), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + long termForNode3 = anotherTerm + randomLongBetween(1, 10); + + joinNodeAndRun(new JoinRequest(node3, termForNode3, Optional.empty())); + assertTrue(clusterStateHasNode(node3)); + } + private boolean isLocalNodeElectedMaster() { return MasterServiceTests.discoveryState(clusterManagerService).nodes().isLocalNodeElectedMaster(); } @@ -782,4 +840,17 @@ private boolean isLocalNodeElectedMaster() { private boolean clusterStateHasNode(DiscoveryNode node) { return node.equals(MasterServiceTests.discoveryState(clusterManagerService).nodes().get(node.getId())); } + + private static ClusterState initialStateWithDecommissionedAttribute( + ClusterState clusterState, + DecommissionAttribute decommissionAttribute + ) { + DecommissionAttributeMetadata decommissionAttributeMetadata = new DecommissionAttributeMetadata( + decommissionAttribute, + DecommissionStatus.SUCCESSFUL + ); + return ClusterState.builder(clusterState) + .metadata(Metadata.builder(clusterState.metadata()).decommissionAttributeMetadata(decommissionAttributeMetadata)) + .build(); + } } diff --git a/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java b/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java index 5e7dede0309c6..7e7bb2f0a2911 100644 --- a/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java +++ b/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java @@ -807,6 +807,42 @@ public void testReconnectsToDisconnectedNodes() { assertFoundPeers(rebootedOtherNode); } + public void testConnectionAttemptDuringDecommissioning() { + boolean localNodeCommissioned = randomBoolean(); + peerFinder.onNodeCommissionStatusChange(localNodeCommissioned); + + long findPeersInterval = peerFinder.getFindPeersInterval().millis(); + + final DiscoveryNode otherNode = newDiscoveryNode("node-1"); + providedAddresses.add(otherNode.getAddress()); + transportAddressConnector.addReachableNode(otherNode); + + peerFinder.activate(lastAcceptedNodes); + runAllRunnableTasks(); + assertFoundPeers(otherNode); + + transportAddressConnector.reachableNodes.clear(); + final DiscoveryNode newNode = new DiscoveryNode("new-node", otherNode.getAddress(), Version.CURRENT); + transportAddressConnector.addReachableNode(newNode); + + connectedNodes.remove(otherNode); + disconnectedNodes.add(otherNode); + + // peer discovery will be delayed now + if (localNodeCommissioned == false) { + deterministicTaskQueue.advanceTime(); + runAllRunnableTasks(); + assertPeersNotDiscovered(newNode); + } + + final long expectedTime = CONNECTION_TIMEOUT_MILLIS + findPeersInterval; + while (deterministicTaskQueue.getCurrentTimeMillis() < expectedTime) { + deterministicTaskQueue.advanceTime(); + runAllRunnableTasks(); + } + assertFoundPeers(newNode); + } + private void respondToRequests(Function responseFactory) { final CapturedRequest[] capturedRequests = capturingTransport.getCapturedRequestsAndClear(); for (final CapturedRequest capturedRequest : capturedRequests) { @@ -828,6 +864,16 @@ private void assertFoundPeers(DiscoveryNode... expectedNodesArray) { assertNotifiedOfAllUpdates(); } + private void assertPeersNotDiscovered(DiscoveryNode... undiscoveredNodesArray) { + final Set undiscoveredNodes = Arrays.stream(undiscoveredNodesArray).collect(Collectors.toSet()); + final List actualNodesList = StreamSupport.stream(peerFinder.getFoundPeers().spliterator(), false) + .collect(Collectors.toList()); + final HashSet actualNodesSet = new HashSet<>(actualNodesList); + Set intersection = new HashSet<>(actualNodesSet); + intersection.retainAll(undiscoveredNodes); + assertEquals(intersection.size(), 0); + } + private void assertNotifiedOfAllUpdates() { final Stream actualNodes = StreamSupport.stream(peerFinder.getFoundPeers().spliterator(), false); final Stream notifiedNodes = StreamSupport.stream(foundPeersFromNotification.spliterator(), false); From fe3994ccdf62735907bc4834f93c71d97636d150 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Fri, 7 Oct 2022 15:41:16 -0500 Subject: [PATCH 07/39] [Remove] LegacyESVersion.V_7_2_* and V_7_3_* constants (#4702) Removes all usages of LegacyESVersion.V_7_2_ and LegacyESVersion.V_7_3 version constants along with related ancient APIs. Signed-off-by: Nicholas Walter Knize --- CHANGELOG.md | 3 +- .../common/CommonAnalysisModulePlugin.java | 45 ++++----- .../upgrades/FullClusterRestartIT.java | 8 +- .../org/opensearch/upgrades/RecoveryIT.java | 59 ++++-------- .../gateway/ReplicaShardAllocatorIT.java | 3 +- .../indices/IndicesLifecycleListenerIT.java | 11 +-- .../java/org/opensearch/LegacyESVersion.java | 5 - .../org/opensearch/OpenSearchException.java | 2 +- .../cluster/health/ClusterHealthRequest.java | 11 +-- .../TransportNodesHotThreadsAction.java | 4 +- .../node/info/TransportNodesInfoAction.java | 4 +- ...nsportNodesReloadSecureSettingsAction.java | 4 +- .../node/stats/TransportNodesStatsAction.java | 4 +- .../node/usage/TransportNodesUsageAction.java | 4 +- .../create/CreateSnapshotRequest.java | 9 +- .../status/TransportNodesSnapshotsStatus.java | 4 +- .../stats/TransportClusterStatsAction.java | 4 +- .../indices/alias/IndicesAliasesRequest.java | 12 +-- .../admin/indices/analyze/AnalyzeAction.java | 63 ++---------- .../indices/close/CloseIndexRequest.java | 11 +-- .../indices/close/CloseIndexResponse.java | 18 +--- ...TransportVerifyShardBeforeCloseAction.java | 11 +-- .../find/NodeFindDanglingIndexRequest.java | 4 +- .../list/NodeListDanglingIndicesRequest.java | 4 +- .../admin/indices/stats/CommonStatsFlags.java | 9 +- .../fieldcaps/FieldCapabilitiesRequest.java | 10 +- .../fieldcaps/FieldCapabilitiesResponse.java | 11 +-- .../action/search/GetAllPitNodeRequest.java | 4 +- .../action/support/nodes/BaseNodeRequest.java | 69 ------------- .../support/nodes/TransportNodesAction.java | 2 +- .../cluster/SnapshotsInProgress.java | 11 +-- .../cluster/metadata/IndexMetadata.java | 24 ++--- .../opensearch/cluster/metadata/Metadata.java | 19 +--- .../metadata/MetadataIndexStateService.java | 20 +--- .../cluster/node/DiscoveryNode.java | 96 ++++++------------- .../TransportNodesListGatewayMetaState.java | 4 +- ...ransportNodesListGatewayStartedShards.java | 4 +- .../index/engine/ReadOnlyEngine.java | 26 ++--- .../org/opensearch/index/get/GetResult.java | 25 +---- .../index/mapper/TextFieldMapper.java | 3 +- .../index/query/IntervalsSourceProvider.java | 11 +-- .../index/refresh/RefreshStats.java | 13 +-- .../index/seqno/ReplicationTracker.java | 2 +- .../recovery/RecoveryCleanFilesRequest.java | 12 +-- .../RecoveryTranslogOperationsRequest.java | 5 +- .../TransportNodesListShardStoreMetadata.java | 4 +- .../AutoDateHistogramAggregationBuilder.java | 25 +---- .../bucket/histogram/DateIntervalWrapper.java | 34 +------ .../metrics/InternalGeoCentroid.java | 17 +--- .../search/query/QuerySearchResult.java | 30 ------ .../search/sort/FieldSortBuilder.java | 9 +- .../opensearch/snapshots/SnapshotInfo.java | 15 +-- .../node/tasks/CancellableTasksTests.java | 4 +- .../node/tasks/ResourceAwareTasksTests.java | 4 +- .../node/tasks/TaskManagerTestCase.java | 6 +- .../cluster/node/tasks/TestTaskPlugin.java | 3 +- .../node/tasks/TransportTasksActionTests.java | 4 +- .../indices/close/CloseIndexRequestTests.java | 16 +--- .../nodes/TransportNodesActionTests.java | 3 +- .../termvectors/TermVectorsUnitTests.java | 4 +- 60 files changed, 212 insertions(+), 653 deletions(-) delete mode 100644 server/src/main/java/org/opensearch/action/support/nodes/BaseNodeRequest.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 04b62c11a6865..7ee30dc84e9fd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -85,6 +85,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Remove deprecated code to add node name into log pattern of log4j property file ([#4568](https://github.com/opensearch-project/OpenSearch/pull/4568)) - Unused object and import within TransportClusterAllocationExplainAction ([#4639](https://github.com/opensearch-project/OpenSearch/pull/4639)) - Remove LegacyESVersion.V_7_0_* and V_7_1_* Constants ([#2768](https://https://github.com/opensearch-project/OpenSearch/pull/2768)) +- Remove LegacyESVersion.V_7_2_ and V_7_3_ Constants ([#4702](https://github.com/opensearch-project/OpenSearch/pull/4702)) - Always auto release the flood stage block ([#4703](https://github.com/opensearch-project/OpenSearch/pull/4703)) ### Fixed @@ -160,4 +161,4 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Security [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD -[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x \ No newline at end of file +[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java index 7cad01c5cc00a..b0850d0b9144d 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java @@ -563,18 +563,22 @@ public List getPreConfiguredTokenFilters() { ) ) ); - filters.add(PreConfiguredTokenFilter.openSearchVersion("word_delimiter_graph", false, false, (input, version) -> { - boolean adjustOffsets = version.onOrAfter(LegacyESVersion.V_7_3_0); - return new WordDelimiterGraphFilter( - input, - adjustOffsets, - WordDelimiterIterator.DEFAULT_WORD_DELIM_TABLE, - WordDelimiterGraphFilter.GENERATE_WORD_PARTS | WordDelimiterGraphFilter.GENERATE_NUMBER_PARTS - | WordDelimiterGraphFilter.SPLIT_ON_CASE_CHANGE | WordDelimiterGraphFilter.SPLIT_ON_NUMERICS - | WordDelimiterGraphFilter.STEM_ENGLISH_POSSESSIVE, - null - ); - })); + filters.add( + PreConfiguredTokenFilter.openSearchVersion( + "word_delimiter_graph", + false, + false, + (input, version) -> new WordDelimiterGraphFilter( + input, + true, + WordDelimiterIterator.DEFAULT_WORD_DELIM_TABLE, + WordDelimiterGraphFilter.GENERATE_WORD_PARTS | WordDelimiterGraphFilter.GENERATE_NUMBER_PARTS + | WordDelimiterGraphFilter.SPLIT_ON_CASE_CHANGE | WordDelimiterGraphFilter.SPLIT_ON_NUMERICS + | WordDelimiterGraphFilter.STEM_ENGLISH_POSSESSIVE, + null + ) + ) + ); return filters; } @@ -588,12 +592,12 @@ public List getPreConfiguredTokenizers() { tokenizers.add(PreConfiguredTokenizer.singleton("letter", LetterTokenizer::new)); tokenizers.add(PreConfiguredTokenizer.singleton("whitespace", WhitespaceTokenizer::new)); tokenizers.add(PreConfiguredTokenizer.singleton("ngram", NGramTokenizer::new)); - tokenizers.add(PreConfiguredTokenizer.openSearchVersion("edge_ngram", (version) -> { - if (version.onOrAfter(LegacyESVersion.V_7_3_0)) { - return new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); - } - return new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE); - })); + tokenizers.add( + PreConfiguredTokenizer.openSearchVersion( + "edge_ngram", + (version) -> new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE) + ) + ); tokenizers.add(PreConfiguredTokenizer.singleton("pattern", () -> new PatternTokenizer(Regex.compile("\\W+", null), -1))); tokenizers.add(PreConfiguredTokenizer.singleton("thai", ThaiTokenizer::new)); // TODO deprecate and remove in API @@ -619,10 +623,7 @@ public List getPreConfiguredTokenizers() { + "Please change the tokenizer name to [edge_ngram] instead." ); } - if (version.onOrAfter(LegacyESVersion.V_7_3_0)) { - return new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); - } - return new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE); + return new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); })); tokenizers.add(PreConfiguredTokenizer.singleton("PathHierarchy", PathHierarchyTokenizer::new)); diff --git a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java index 0ed51b9d8a011..a3db7532c3a10 100644 --- a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java @@ -1003,12 +1003,8 @@ public void testClosedIndices() throws Exception { closeIndex(index); } - if (getOldClusterVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - ensureGreenLongWait(index); - assertClosedIndex(index, true); - } else { - assertClosedIndex(index, false); - } + ensureGreenLongWait(index); + assertClosedIndex(index, true); if (isRunningAgainstOldCluster() == false) { openIndex(index); diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java index 3d71f4a198aac..dbb18aec19fca 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java @@ -245,7 +245,6 @@ private String getNodeId(Predicate versionPredicate) throws IOException if (versionPredicate.test(version)) { return id; } - return id; } return null; } @@ -457,15 +456,10 @@ public void testRecoveryClosedIndex() throws Exception { closeIndex(indexName); } - final Version indexVersionCreated = indexVersionCreated(indexName); - if (indexVersionCreated.onOrAfter(LegacyESVersion.V_7_2_0)) { - // index was created on a version that supports the replication of closed indices, - // so we expect the index to be closed and replicated - ensureGreen(indexName); - assertClosedIndex(indexName, true); - } else { - assertClosedIndex(indexName, false); - } + // index was created on a version that supports the replication of closed indices, + // so we expect the index to be closed and replicated + ensureGreen(indexName); + assertClosedIndex(indexName, true); } /** @@ -491,14 +485,10 @@ public void testCloseIndexDuringRollingUpgrade() throws Exception { closeIndex(indexName); } - if (minimumNodeVersion.onOrAfter(LegacyESVersion.V_7_2_0)) { - // index is created on a version that supports the replication of closed indices, - // so we expect the index to be closed and replicated - ensureGreen(indexName); - assertClosedIndex(indexName, true); - } else { - assertClosedIndex(indexName, false); - } + // index is created on a version that supports the replication of closed indices, + // so we expect the index to be closed and replicated + ensureGreen(indexName); + assertClosedIndex(indexName, true); } /** @@ -525,27 +515,20 @@ public void testClosedIndexNoopRecovery() throws Exception { closeIndex(indexName); } - final Version indexVersionCreated = indexVersionCreated(indexName); - if (indexVersionCreated.onOrAfter(LegacyESVersion.V_7_2_0)) { - // index was created on a version that supports the replication of closed indices, - // so we expect the index to be closed and replicated - ensureGreen(indexName); - assertClosedIndex(indexName, true); - if (minimumNodeVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - switch (CLUSTER_TYPE) { - case OLD: break; - case MIXED: - assertNoopRecoveries(indexName, s -> s.startsWith(CLUSTER_NAME + "-0")); - break; - case UPGRADED: - assertNoopRecoveries(indexName, s -> s.startsWith(CLUSTER_NAME)); - break; - } - } - } else { - assertClosedIndex(indexName, false); - } + // index was created on a version that supports the replication of closed indices, + // so we expect the index to be closed and replicated + ensureGreen(indexName); + assertClosedIndex(indexName, true); + switch (CLUSTER_TYPE) { + case OLD: break; + case MIXED: + assertNoopRecoveries(indexName, s -> s.startsWith(CLUSTER_NAME + "-0")); + break; + case UPGRADED: + assertNoopRecoveries(indexName, s -> s.startsWith(CLUSTER_NAME)); + break; + } } /** * Returns the version in which the given index has been created diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/ReplicaShardAllocatorIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/ReplicaShardAllocatorIT.java index db46fb4424848..6d05ecd0b56b0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/ReplicaShardAllocatorIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/ReplicaShardAllocatorIT.java @@ -32,7 +32,6 @@ package org.opensearch.gateway; -import org.opensearch.LegacyESVersion; import org.opensearch.action.admin.indices.stats.ShardStats; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; @@ -512,7 +511,7 @@ public void testPeerRecoveryForClosedIndices() throws Exception { } /** - * If the recovery source is on an old node (before
{@link LegacyESVersion#V_7_2_0}
) then the recovery target + * If the recovery source is on an old node (before
{@code LegacyESVersion#V_7_2_0}
) then the recovery target * won't have the safe commit after phase1 because the recovery source does not send the global checkpoint in the clean_files * step. And if the recovery fails and retries, then the recovery stage might not transition properly. This test simulates * this behavior by changing the global checkpoint in phase1 to unassigned. diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesLifecycleListenerIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesLifecycleListenerIT.java index 72f28e94528ba..17a7d4c84b6fe 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesLifecycleListenerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesLifecycleListenerIT.java @@ -32,9 +32,7 @@ package org.opensearch.indices; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; -import org.opensearch.Version; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; @@ -246,13 +244,8 @@ public void testIndexStateShardChanged() throws Throwable { assertThat(stateChangeListenerNode1.afterCloseSettings.getAsInt(SETTING_NUMBER_OF_SHARDS, -1), equalTo(6)); assertThat(stateChangeListenerNode1.afterCloseSettings.getAsInt(SETTING_NUMBER_OF_REPLICAS, -1), equalTo(1)); - if (Version.CURRENT.onOrAfter(LegacyESVersion.V_7_2_0)) { - assertShardStatesMatch(stateChangeListenerNode1, 6, CLOSED, CREATED, RECOVERING, POST_RECOVERY, STARTED); - assertShardStatesMatch(stateChangeListenerNode2, 6, CLOSED, CREATED, RECOVERING, POST_RECOVERY, STARTED); - } else { - assertShardStatesMatch(stateChangeListenerNode1, 6, CLOSED); - assertShardStatesMatch(stateChangeListenerNode2, 6, CLOSED); - } + assertShardStatesMatch(stateChangeListenerNode1, 6, CLOSED, CREATED, RECOVERING, POST_RECOVERY, STARTED); + assertShardStatesMatch(stateChangeListenerNode2, 6, CLOSED, CREATED, RECOVERING, POST_RECOVERY, STARTED); } private static void assertShardStatesMatch( diff --git a/server/src/main/java/org/opensearch/LegacyESVersion.java b/server/src/main/java/org/opensearch/LegacyESVersion.java index 1eb22a6bef3b5..283a6581a64bb 100644 --- a/server/src/main/java/org/opensearch/LegacyESVersion.java +++ b/server/src/main/java/org/opensearch/LegacyESVersion.java @@ -48,11 +48,6 @@ */ public class LegacyESVersion extends Version { - public static final LegacyESVersion V_7_2_0 = new LegacyESVersion(7020099, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final LegacyESVersion V_7_2_1 = new LegacyESVersion(7020199, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final LegacyESVersion V_7_3_0 = new LegacyESVersion(7030099, org.apache.lucene.util.Version.LUCENE_8_1_0); - public static final LegacyESVersion V_7_3_1 = new LegacyESVersion(7030199, org.apache.lucene.util.Version.LUCENE_8_1_0); - public static final LegacyESVersion V_7_3_2 = new LegacyESVersion(7030299, org.apache.lucene.util.Version.LUCENE_8_1_0); public static final LegacyESVersion V_7_4_0 = new LegacyESVersion(7040099, org.apache.lucene.util.Version.LUCENE_8_2_0); public static final LegacyESVersion V_7_4_1 = new LegacyESVersion(7040199, org.apache.lucene.util.Version.LUCENE_8_2_0); public static final LegacyESVersion V_7_4_2 = new LegacyESVersion(7040299, org.apache.lucene.util.Version.LUCENE_8_2_0); diff --git a/server/src/main/java/org/opensearch/OpenSearchException.java b/server/src/main/java/org/opensearch/OpenSearchException.java index 17ece23f819a2..e9f0d831d2977 100644 --- a/server/src/main/java/org/opensearch/OpenSearchException.java +++ b/server/src/main/java/org/opensearch/OpenSearchException.java @@ -601,7 +601,7 @@ public static void generateFailureXContent(XContentBuilder builder, Params param } t = t.getCause(); } - builder.field(ERROR, ExceptionsHelper.summaryMessage(t)); + builder.field(ERROR, ExceptionsHelper.summaryMessage(e)); return; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java index 1dedf481dec56..84a7616fe6b06 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.health; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.ActiveShardCount; @@ -90,11 +89,7 @@ public ClusterHealthRequest(StreamInput in) throws IOException { waitForEvents = Priority.readFrom(in); } waitForNoInitializingShards = in.readBoolean(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - indicesOptions = IndicesOptions.readIndicesOptions(in); - } else { - indicesOptions = IndicesOptions.lenientExpandOpen(); - } + indicesOptions = IndicesOptions.readIndicesOptions(in); } @Override @@ -122,9 +117,7 @@ public void writeTo(StreamOutput out) throws IOException { Priority.writeTo(waitForEvents, out); } out.writeBoolean(waitForNoInitializingShards); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - indicesOptions.writeIndicesOptions(out); - } + indicesOptions.writeIndicesOptions(out); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java index e8429580ec887..4c71993251f4f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java @@ -35,7 +35,6 @@ import org.opensearch.OpenSearchException; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; @@ -43,6 +42,7 @@ import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.monitor.jvm.HotThreads; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -117,7 +117,7 @@ protected NodeHotThreads nodeOperation(NodeRequest request) { * * @opensearch.internal */ - public static class NodeRequest extends BaseNodeRequest { + public static class NodeRequest extends TransportRequest { NodesHotThreadsRequest request; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/TransportNodesInfoAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/TransportNodesInfoAction.java index 7bcf83ba28111..ee7b287b878e7 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/TransportNodesInfoAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/TransportNodesInfoAction.java @@ -34,7 +34,6 @@ import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; @@ -42,6 +41,7 @@ import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.node.NodeService; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -126,7 +126,7 @@ protected NodeInfo nodeOperation(NodeInfoRequest nodeRequest) { * * @opensearch.internal */ - public static class NodeInfoRequest extends BaseNodeRequest { + public static class NodeInfoRequest extends TransportRequest { NodesInfoRequest request; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java index d7ad4357fa046..920c66bc5c543 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java @@ -39,7 +39,6 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; @@ -54,6 +53,7 @@ import org.opensearch.plugins.ReloadablePlugin; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -188,7 +188,7 @@ protected NodesReloadSecureSettingsResponse.NodeResponse nodeOperation(NodeReque * * @opensearch.internal */ - public static class NodeRequest extends BaseNodeRequest { + public static class NodeRequest extends TransportRequest { NodesReloadSecureSettingsRequest request; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index 644c7f02d45f0..5d5d54c8fe7ed 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -34,7 +34,6 @@ import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; @@ -42,6 +41,7 @@ import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.node.NodeService; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -127,7 +127,7 @@ protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest) { * * @opensearch.internal */ - public static class NodeStatsRequest extends BaseNodeRequest { + public static class NodeStatsRequest extends TransportRequest { NodesStatsRequest request; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/TransportNodesUsageAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/TransportNodesUsageAction.java index c7612f7e15838..dbd3673149efe 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/TransportNodesUsageAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/TransportNodesUsageAction.java @@ -34,7 +34,6 @@ import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; @@ -42,6 +41,7 @@ import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.search.aggregations.support.AggregationUsageService; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import org.opensearch.usage.UsageService; @@ -117,7 +117,7 @@ protected NodeUsage nodeOperation(NodeUsageRequest nodeUsageRequest) { * * @opensearch.internal */ - public static class NodeUsageRequest extends BaseNodeRequest { + public static class NodeUsageRequest extends TransportRequest { NodesUsageRequest request; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java index d78a4c95246b4..cb64718ed5843 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java @@ -60,7 +60,6 @@ import static org.opensearch.common.settings.Settings.readSettingsFromStream; import static org.opensearch.common.settings.Settings.writeSettingsToStream; import static org.opensearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; -import static org.opensearch.snapshots.SnapshotInfo.METADATA_FIELD_INTRODUCED; /** * Create snapshot request @@ -124,9 +123,7 @@ public CreateSnapshotRequest(StreamInput in) throws IOException { includeGlobalState = in.readBoolean(); waitForCompletion = in.readBoolean(); partial = in.readBoolean(); - if (in.getVersion().onOrAfter(METADATA_FIELD_INTRODUCED)) { - userMetadata = in.readMap(); - } + userMetadata = in.readMap(); } @Override @@ -140,9 +137,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(includeGlobalState); out.writeBoolean(waitForCompletion); out.writeBoolean(partial); - if (out.getVersion().onOrAfter(METADATA_FIELD_INTRODUCED)) { - out.writeMap(userMetadata); - } + out.writeMap(userMetadata); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java index 86d0499a23f9e..e9bf564afaf32 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java @@ -36,7 +36,6 @@ import org.opensearch.action.ActionType; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.action.support.nodes.BaseNodesResponse; @@ -51,6 +50,7 @@ import org.opensearch.snapshots.Snapshot; import org.opensearch.snapshots.SnapshotShardsService; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -207,7 +207,7 @@ protected void writeNodesTo(StreamOutput out, List nodes) th * * @opensearch.internal */ - public static class NodeRequest extends BaseNodeRequest { + public static class NodeRequest extends TransportRequest { private final List snapshots; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java index a13932e137ab0..401813a6174fa 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -40,7 +40,6 @@ import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.admin.indices.stats.ShardStats; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.health.ClusterHealthStatus; @@ -57,6 +56,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.node.NodeService; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import org.opensearch.transport.Transports; @@ -216,7 +216,7 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq * * @opensearch.internal */ - public static class ClusterStatsNodeRequest extends BaseNodeRequest { + public static class ClusterStatsNodeRequest extends TransportRequest { ClusterStatsRequest request; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java index eb2d2706a6531..bb28623430f2d 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java @@ -88,11 +88,7 @@ public class IndicesAliasesRequest extends AcknowledgedRequest tokens, DetailAnalyzeResponse detail) { } public Response(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - AnalyzeToken[] tokenArray = in.readOptionalArray(AnalyzeToken::new, AnalyzeToken[]::new); - tokens = tokenArray != null ? Arrays.asList(tokenArray) : null; - } else { - int size = in.readVInt(); - if (size > 0) { - tokens = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - tokens.add(new AnalyzeToken(in)); - } - } else { - tokens = null; - } - } + AnalyzeToken[] tokenArray = in.readOptionalArray(AnalyzeToken::new, AnalyzeToken[]::new); + tokens = tokenArray != null ? Arrays.asList(tokenArray) : null; detail = in.readOptionalWriteable(DetailAnalyzeResponse::new); } @@ -371,22 +358,11 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - AnalyzeToken[] tokenArray = null; - if (tokens != null) { - tokenArray = tokens.toArray(new AnalyzeToken[0]); - } - out.writeOptionalArray(tokenArray); - } else { - if (tokens != null) { - out.writeVInt(tokens.size()); - for (AnalyzeToken token : tokens) { - token.writeTo(out); - } - } else { - out.writeVInt(0); - } + AnalyzeToken[] tokenArray = null; + if (tokens != null) { + tokenArray = tokens.toArray(new AnalyzeToken[0]); } + out.writeOptionalArray(tokenArray); out.writeOptionalWriteable(detail); } @@ -766,19 +742,7 @@ public AnalyzeTokenList(String name, AnalyzeToken[] tokens) { AnalyzeTokenList(StreamInput in) throws IOException { name = in.readString(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - tokens = in.readOptionalArray(AnalyzeToken::new, AnalyzeToken[]::new); - } else { - int size = in.readVInt(); - if (size > 0) { - tokens = new AnalyzeToken[size]; - for (int i = 0; i < size; i++) { - tokens[i] = new AnalyzeToken(in); - } - } else { - tokens = null; - } - } + tokens = in.readOptionalArray(AnalyzeToken::new, AnalyzeToken[]::new); } public String getName() { @@ -811,18 +775,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - out.writeOptionalArray(tokens); - } else { - if (tokens != null) { - out.writeVInt(tokens.length); - for (AnalyzeToken token : tokens) { - token.writeTo(out); - } - } else { - out.writeVInt(0); - } - } + out.writeOptionalArray(tokens); } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java index b16cabfda4d67..1095cec447442 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.indices.close; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.ActiveShardCount; @@ -61,11 +60,7 @@ public CloseIndexRequest(StreamInput in) throws IOException { super(in); indices = in.readStringArray(); indicesOptions = IndicesOptions.readIndicesOptions(in); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - waitForActiveShards = ActiveShardCount.readFrom(in); - } else { - waitForActiveShards = ActiveShardCount.NONE; - } + waitForActiveShards = ActiveShardCount.readFrom(in); } public CloseIndexRequest() {} @@ -143,8 +138,6 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeStringArray(indices); indicesOptions.writeIndicesOptions(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - waitForActiveShards.writeTo(out); - } + waitForActiveShards.writeTo(out); } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java index 1fc9017359a8c..0388ea47bfc69 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java @@ -31,7 +31,6 @@ package org.opensearch.action.admin.indices.close; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.master.ShardsAcknowledgedResponse; @@ -49,7 +48,6 @@ import java.util.List; import java.util.Objects; -import static java.util.Collections.emptyList; import static java.util.Collections.unmodifiableList; /** @@ -62,12 +60,8 @@ public class CloseIndexResponse extends ShardsAcknowledgedResponse { private final List indices; CloseIndexResponse(StreamInput in) throws IOException { - super(in, in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - indices = unmodifiableList(in.readList(IndexResult::new)); - } else { - indices = unmodifiableList(emptyList()); - } + super(in, true); + indices = unmodifiableList(in.readList(IndexResult::new)); } public CloseIndexResponse(final boolean acknowledged, final boolean shardsAcknowledged, final List indices) { @@ -82,12 +76,8 @@ public List getIndices() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - writeShardsAcknowledged(out); - } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - out.writeList(indices); - } + writeShardsAcknowledged(out); + out.writeList(indices); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java b/server/src/main/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java index fe39e2a254301..691b2c7c95730 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java @@ -33,7 +33,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.support.ActionFilters; @@ -205,11 +204,7 @@ public static class ShardRequest extends ReplicationRequest { ShardRequest(StreamInput in) throws IOException { super(in); clusterBlock = new ClusterBlock(in); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - phase1 = in.readBoolean(); - } else { - phase1 = false; - } + phase1 = in.readBoolean(); } public ShardRequest(final ShardId shardId, final ClusterBlock clusterBlock, final boolean phase1, final TaskId parentTaskId) { @@ -228,9 +223,7 @@ public String toString() { public void writeTo(final StreamOutput out) throws IOException { super.writeTo(out); clusterBlock.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - out.writeBoolean(phase1); - } + out.writeBoolean(phase1); } public ClusterBlock clusterBlock() { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/NodeFindDanglingIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/NodeFindDanglingIndexRequest.java index 6026dd10c607b..6885de74e4479 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/NodeFindDanglingIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/NodeFindDanglingIndexRequest.java @@ -34,16 +34,16 @@ import java.io.IOException; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.transport.TransportRequest; /** * Used when querying every node in the cluster for a specific dangling index. * * @opensearch.internal */ -public class NodeFindDanglingIndexRequest extends BaseNodeRequest { +public class NodeFindDanglingIndexRequest extends TransportRequest { private final String indexUUID; public NodeFindDanglingIndexRequest(String indexUUID) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/NodeListDanglingIndicesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/NodeListDanglingIndicesRequest.java index 9b737fff8316e..696daf75942fb 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/NodeListDanglingIndicesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/NodeListDanglingIndicesRequest.java @@ -32,9 +32,9 @@ package org.opensearch.action.admin.indices.dangling.list; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.transport.TransportRequest; import java.io.IOException; @@ -43,7 +43,7 @@ * * @opensearch.internal */ -public class NodeListDanglingIndicesRequest extends BaseNodeRequest { +public class NodeListDanglingIndicesRequest extends TransportRequest { /** * Filter the response by index UUID. Leave as null to find all indices. */ diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java index 9a24d8a42dc9d..fd3d6daa9c393 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.indices.stats; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; @@ -87,9 +86,7 @@ public CommonStatsFlags(StreamInput in) throws IOException { fieldDataFields = in.readStringArray(); completionDataFields = in.readStringArray(); includeSegmentFileSizes = in.readBoolean(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - includeUnloadedSegments = in.readBoolean(); - } + includeUnloadedSegments = in.readBoolean(); if (in.getVersion().onOrAfter(Version.V_1_2_0)) { includeAllShardIndexingPressureTrackers = in.readBoolean(); includeOnlyTopIndexingPressureMetrics = in.readBoolean(); @@ -111,9 +108,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringArrayNullable(fieldDataFields); out.writeStringArrayNullable(completionDataFields); out.writeBoolean(includeSegmentFileSizes); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeBoolean(includeUnloadedSegments); - } + out.writeBoolean(includeUnloadedSegments); if (out.getVersion().onOrAfter(Version.V_1_2_0)) { out.writeBoolean(includeAllShardIndexingPressureTrackers); out.writeBoolean(includeOnlyTopIndexingPressureMetrics); diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequest.java b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequest.java index 688568ba9a6d6..50d60560bdfe4 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequest.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequest.java @@ -72,11 +72,7 @@ public FieldCapabilitiesRequest(StreamInput in) throws IOException { indices = in.readStringArray(); indicesOptions = IndicesOptions.readIndicesOptions(in); mergeResults = in.readBoolean(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - includeUnmapped = in.readBoolean(); - } else { - includeUnmapped = false; - } + includeUnmapped = in.readBoolean(); indexFilter = in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0) ? in.readOptionalNamedWriteable(QueryBuilder.class) : null; nowInMillis = in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0) ? in.readOptionalLong() : null; } @@ -109,9 +105,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringArray(indices); indicesOptions.writeIndicesOptions(out); out.writeBoolean(mergeResults); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeBoolean(includeUnmapped); - } + out.writeBoolean(includeUnmapped); if (out.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { out.writeOptionalNamedWriteable(indexFilter); out.writeOptionalLong(nowInMillis); diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesResponse.java b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesResponse.java index e5f644987182c..847cca25ceb35 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesResponse.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesResponse.java @@ -32,7 +32,6 @@ package org.opensearch.action.fieldcaps; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionResponse; import org.opensearch.common.ParseField; import org.opensearch.common.Strings; @@ -87,11 +86,7 @@ private FieldCapabilitiesResponse( public FieldCapabilitiesResponse(StreamInput in) throws IOException { super(in); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - indices = in.readStringArray(); - } else { - indices = Strings.EMPTY_ARRAY; - } + indices = in.readStringArray(); this.responseMap = in.readMap(StreamInput::readString, FieldCapabilitiesResponse::readField); indexResponses = in.readList(FieldCapabilitiesIndexResponse::new); } @@ -138,9 +133,7 @@ private static Map readField(StreamInput in) throws I @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeStringArray(indices); - } + out.writeStringArray(indices); out.writeMap(responseMap, StreamOutput::writeString, FieldCapabilitiesResponse::writeField); out.writeList(indexResponses); } diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitNodeRequest.java b/server/src/main/java/org/opensearch/action/search/GetAllPitNodeRequest.java index c90f75e3c0aed..de0c0dd9bbfc3 100644 --- a/server/src/main/java/org/opensearch/action/search/GetAllPitNodeRequest.java +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitNodeRequest.java @@ -9,16 +9,16 @@ package org.opensearch.action.search; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.transport.TransportRequest; import java.io.IOException; /** * Inner node get all pits request */ -public class GetAllPitNodeRequest extends BaseNodeRequest { +public class GetAllPitNodeRequest extends TransportRequest { public GetAllPitNodeRequest() { super(); diff --git a/server/src/main/java/org/opensearch/action/support/nodes/BaseNodeRequest.java b/server/src/main/java/org/opensearch/action/support/nodes/BaseNodeRequest.java deleted file mode 100644 index b5ff1d60ff75b..0000000000000 --- a/server/src/main/java/org/opensearch/action/support/nodes/BaseNodeRequest.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.action.support.nodes; - -import org.opensearch.LegacyESVersion; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.transport.TransportRequest; - -import java.io.IOException; - -/** - * Base class for node transport requests - * - * @opensearch.internal - * - * @deprecated this class is deprecated and classes will extend TransportRequest directly - */ -// TODO: this class can be removed in main once 7.x is bumped to 7.4.0 -@Deprecated -public abstract class BaseNodeRequest extends TransportRequest { - - public BaseNodeRequest() {} - - public BaseNodeRequest(StreamInput in) throws IOException { - super(in); - if (in.getVersion().before(LegacyESVersion.V_7_3_0)) { - in.readString(); // previously nodeId - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - if (out.getVersion().before(LegacyESVersion.V_7_3_0)) { - out.writeString(""); // previously nodeId - } - } -} diff --git a/server/src/main/java/org/opensearch/action/support/nodes/TransportNodesAction.java b/server/src/main/java/org/opensearch/action/support/nodes/TransportNodesAction.java index 18fcdfad0bcc4..a12e9b753599d 100644 --- a/server/src/main/java/org/opensearch/action/support/nodes/TransportNodesAction.java +++ b/server/src/main/java/org/opensearch/action/support/nodes/TransportNodesAction.java @@ -70,7 +70,7 @@ public abstract class TransportNodesAction< NodesRequest extends BaseNodesRequest, NodesResponse extends BaseNodesResponse, - NodeRequest extends BaseNodeRequest, + NodeRequest extends TransportRequest, NodeResponse extends BaseNodeResponse> extends HandledTransportAction { protected final ThreadPool threadPool; diff --git a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java index 0ff373b6116de..4cbf0cfe70adb 100644 --- a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java @@ -67,7 +67,6 @@ import java.util.stream.Collectors; import static org.opensearch.snapshots.SnapshotInfo.DATA_STREAMS_IN_SNAPSHOT; -import static org.opensearch.snapshots.SnapshotInfo.METADATA_FIELD_INTRODUCED; /** * Meta data about snapshots that are currently executing @@ -296,11 +295,7 @@ private Entry(StreamInput in) throws IOException { shards = in.readImmutableMap(ShardId::new, ShardSnapshotStatus::readFrom); repositoryStateId = in.readLong(); failure = in.readOptionalString(); - if (in.getVersion().onOrAfter(METADATA_FIELD_INTRODUCED)) { - userMetadata = in.readMap(); - } else { - userMetadata = null; - } + userMetadata = in.readMap(); if (in.getVersion().onOrAfter(VERSION_IN_SNAPSHOT_VERSION)) { version = Version.readVersion(in); } else if (in.getVersion().onOrAfter(SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION)) { @@ -736,9 +731,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeMap(shards); out.writeLong(repositoryStateId); out.writeOptionalString(failure); - if (out.getVersion().onOrAfter(METADATA_FIELD_INTRODUCED)) { - out.writeMap(userMetadata); - } + out.writeMap(userMetadata); if (out.getVersion().onOrAfter(VERSION_IN_SNAPSHOT_VERSION)) { Version.writeVersion(version, out); } else if (out.getVersion().onOrAfter(SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION)) { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index cd1c92a8b109f..b6ca8c52cd818 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -1016,11 +1016,7 @@ private static class IndexMetadataDiff implements Diff { version = in.readLong(); mappingVersion = in.readVLong(); settingsVersion = in.readVLong(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - aliasesVersion = in.readVLong(); - } else { - aliasesVersion = 1; - } + aliasesVersion = in.readVLong(); state = State.fromId(in.readByte()); settings = Settings.readSettingsFromStream(in); primaryTerms = in.readVLongArray(); @@ -1051,9 +1047,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(version); out.writeVLong(mappingVersion); out.writeVLong(settingsVersion); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeVLong(aliasesVersion); - } + out.writeVLong(aliasesVersion); out.writeByte(state.id); Settings.writeSettingsToStream(settings, out); out.writeVLongArray(primaryTerms); @@ -1093,11 +1087,7 @@ public static IndexMetadata readFrom(StreamInput in) throws IOException { builder.version(in.readLong()); builder.mappingVersion(in.readVLong()); builder.settingsVersion(in.readVLong()); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - builder.aliasesVersion(in.readVLong()); - } else { - builder.aliasesVersion(1); - } + builder.aliasesVersion(in.readVLong()); builder.setRoutingNumShards(in.readInt()); builder.state(State.fromId(in.readByte())); builder.settings(readSettingsFromStream(in)); @@ -1140,9 +1130,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(version); out.writeVLong(mappingVersion); out.writeVLong(settingsVersion); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeVLong(aliasesVersion); - } + out.writeVLong(aliasesVersion); out.writeInt(routingNumShards); out.writeByte(state.id()); writeSettingsToStream(settings, out); @@ -1821,8 +1809,8 @@ public static IndexMetadata fromXContent(XContentParser parser) throws IOExcepti if (Assertions.ENABLED) { assert settingsVersion : "settings version should be present for indices"; } - if (Assertions.ENABLED && Version.indexCreated(builder.settings).onOrAfter(LegacyESVersion.V_7_2_0)) { - assert aliasesVersion : "aliases version should be present for indices created on or after 7.2.0"; + if (Assertions.ENABLED) { + assert aliasesVersion : "aliases version should be present for indices"; } return builder.build(); } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java index fb8123f20e904..8e73a72d43219 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java @@ -39,7 +39,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; import org.apache.lucene.util.CollectionUtil; -import org.opensearch.LegacyESVersion; import org.opensearch.action.AliasesRequest; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterState.FeatureAware; @@ -986,11 +985,7 @@ private static class MetadataDiff implements Diff { coordinationMetadata = new CoordinationMetadata(in); transientSettings = Settings.readSettingsFromStream(in); persistentSettings = Settings.readSettingsFromStream(in); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - hashesOfConsistentSettings = DiffableStringMap.readDiffFrom(in); - } else { - hashesOfConsistentSettings = DiffableStringMap.DiffableStringMapDiff.EMPTY; - } + hashesOfConsistentSettings = DiffableStringMap.readDiffFrom(in); indices = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), INDEX_METADATA_DIFF_VALUE_READER); templates = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), TEMPLATES_DIFF_VALUE_READER); customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), CUSTOM_VALUE_SERIALIZER); @@ -1004,9 +999,7 @@ public void writeTo(StreamOutput out) throws IOException { coordinationMetadata.writeTo(out); Settings.writeSettingsToStream(transientSettings, out); Settings.writeSettingsToStream(persistentSettings, out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - hashesOfConsistentSettings.writeTo(out); - } + hashesOfConsistentSettings.writeTo(out); indices.writeTo(out); templates.writeTo(out); customs.writeTo(out); @@ -1037,9 +1030,7 @@ public static Metadata readFrom(StreamInput in) throws IOException { builder.coordinationMetadata(new CoordinationMetadata(in)); builder.transientSettings(readSettingsFromStream(in)); builder.persistentSettings(readSettingsFromStream(in)); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - builder.hashesOfConsistentSettings(DiffableStringMap.readFrom(in)); - } + builder.hashesOfConsistentSettings(DiffableStringMap.readFrom(in)); int size = in.readVInt(); for (int i = 0; i < size; i++) { builder.put(IndexMetadata.readFrom(in), false); @@ -1064,9 +1055,7 @@ public void writeTo(StreamOutput out) throws IOException { coordinationMetadata.writeTo(out); writeSettingsToStream(transientSettings, out); writeSettingsToStream(persistentSettings, out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - hashesOfConsistentSettings.writeTo(out); - } + hashesOfConsistentSettings.writeTo(out); out.writeVInt(indices.size()); for (IndexMetadata indexMetadata : this) { indexMetadata.writeTo(out); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java index 4f1000e3407fd..6de69c5a6f8f4 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java @@ -36,7 +36,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.ActionListener; @@ -840,10 +839,6 @@ static Tuple> closeRoutingTable( final Map verifyResult ) { - // Remove the index routing table of closed indices if the cluster is in a mixed version - // that does not support the replication of closed indices - final boolean removeRoutingTable = currentState.nodes().getMinNodeVersion().before(LegacyESVersion.V_7_2_0); - final Metadata.Builder metadata = Metadata.builder(currentState.metadata()); final ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); final RoutingTable.Builder routingTable = RoutingTable.builder(currentState.routingTable()); @@ -916,16 +911,11 @@ static Tuple> closeRoutingTable( blocks.removeIndexBlockWithId(index.getName(), INDEX_CLOSED_BLOCK_ID); blocks.addIndexBlock(index.getName(), INDEX_CLOSED_BLOCK); final IndexMetadata.Builder updatedMetadata = IndexMetadata.builder(indexMetadata).state(IndexMetadata.State.CLOSE); - if (removeRoutingTable) { - metadata.put(updatedMetadata); - routingTable.remove(index.getName()); - } else { - metadata.put( - updatedMetadata.settingsVersion(indexMetadata.getSettingsVersion() + 1) - .settings(Settings.builder().put(indexMetadata.getSettings()).put(VERIFIED_BEFORE_CLOSE_SETTING.getKey(), true)) - ); - routingTable.addAsFromOpenToClose(metadata.getSafe(index)); - } + metadata.put( + updatedMetadata.settingsVersion(indexMetadata.getSettingsVersion() + 1) + .settings(Settings.builder().put(indexMetadata.getSettings()).put(VERIFIED_BEFORE_CLOSE_SETTING.getKey(), true)) + ); + routingTable.addAsFromOpenToClose(metadata.getSafe(index)); logger.debug("closing index {} succeeded", index); closedIndices.add(index.getName()); diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java index 199d79070c050..016c0aac44be6 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java @@ -49,7 +49,6 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; @@ -323,50 +322,30 @@ public DiscoveryNode(StreamInput in) throws IOException { } int rolesSize = in.readVInt(); final Set roles = new HashSet<>(rolesSize); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - for (int i = 0; i < rolesSize; i++) { - final String roleName = in.readString(); - final String roleNameAbbreviation = in.readString(); - final boolean canContainData; - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { - canContainData = in.readBoolean(); - } else { - canContainData = roleName.equals(DiscoveryNodeRole.DATA_ROLE.roleName()); - } - final DiscoveryNodeRole role = roleMap.get(roleName); - if (role == null) { - if (in.getVersion().onOrAfter(Version.V_2_1_0)) { - roles.add(new DiscoveryNodeRole.DynamicRole(roleName, roleNameAbbreviation, canContainData)); - } else { - roles.add(new DiscoveryNodeRole.UnknownRole(roleName, roleNameAbbreviation, canContainData)); - } - } else { - assert roleName.equals(role.roleName()) : "role name [" + roleName + "] does not match role [" + role.roleName() + "]"; - assert roleNameAbbreviation.equals(role.roleNameAbbreviation()) : "role name abbreviation [" - + roleName - + "] does not match role [" - + role.roleNameAbbreviation() - + "]"; - roles.add(role); - } + for (int i = 0; i < rolesSize; i++) { + final String roleName = in.readString(); + final String roleNameAbbreviation = in.readString(); + final boolean canContainData; + if (in.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { + canContainData = in.readBoolean(); + } else { + canContainData = roleName.equals(DiscoveryNodeRole.DATA_ROLE.roleName()); } - } else { - // an old node will only send us legacy roles since pluggable roles is a new concept - for (int i = 0; i < rolesSize; i++) { - final LegacyRole legacyRole = in.readEnum(LegacyRole.class); - switch (legacyRole) { - case MASTER: - roles.add(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); - break; - case DATA: - roles.add(DiscoveryNodeRole.DATA_ROLE); - break; - case INGEST: - roles.add(DiscoveryNodeRole.INGEST_ROLE); - break; - default: - throw new AssertionError(legacyRole.roleName()); + final DiscoveryNodeRole role = roleMap.get(roleName); + if (role == null) { + if (in.getVersion().onOrAfter(Version.V_2_1_0)) { + roles.add(new DiscoveryNodeRole.DynamicRole(roleName, roleNameAbbreviation, canContainData)); + } else { + roles.add(new DiscoveryNodeRole.UnknownRole(roleName, roleNameAbbreviation, canContainData)); } + } else { + assert roleName.equals(role.roleName()) : "role name [" + roleName + "] does not match role [" + role.roleName() + "]"; + assert roleNameAbbreviation.equals(role.roleNameAbbreviation()) : "role name abbreviation [" + + roleName + + "] does not match role [" + + role.roleNameAbbreviation() + + "]"; + roles.add(role); } } this.roles = Collections.unmodifiableSortedSet(new TreeSet<>(roles)); @@ -386,30 +365,13 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(entry.getKey()); out.writeString(entry.getValue()); } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - out.writeVInt(roles.size()); - for (final DiscoveryNodeRole role : roles) { - final DiscoveryNodeRole compatibleRole = role.getCompatibilityRole(out.getVersion()); - out.writeString(compatibleRole.roleName()); - out.writeString(compatibleRole.roleNameAbbreviation()); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { - out.writeBoolean(compatibleRole.canContainData()); - } - } - } else { - // an old node will only understand legacy roles since pluggable roles is a new concept - final List rolesToWrite = roles.stream() - .filter(DiscoveryNodeRole.LEGACY_ROLES::contains) - .collect(Collectors.toList()); - out.writeVInt(rolesToWrite.size()); - for (final DiscoveryNodeRole role : rolesToWrite) { - if (role.isClusterManager()) { - out.writeEnum(LegacyRole.MASTER); - } else if (role.equals(DiscoveryNodeRole.DATA_ROLE)) { - out.writeEnum(LegacyRole.DATA); - } else if (role.equals(DiscoveryNodeRole.INGEST_ROLE)) { - out.writeEnum(LegacyRole.INGEST); - } + out.writeVInt(roles.size()); + for (final DiscoveryNodeRole role : roles) { + final DiscoveryNodeRole compatibleRole = role.getCompatibilityRole(out.getVersion()); + out.writeString(compatibleRole.roleName()); + out.writeString(compatibleRole.roleNameAbbreviation()); + if (out.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { + out.writeBoolean(compatibleRole.canContainData()); } } if (out.getVersion().before(Version.V_1_0_0) && version.onOrAfter(Version.V_1_0_0)) { diff --git a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayMetaState.java b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayMetaState.java index be914c6a40a83..ba1490a7929bd 100644 --- a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayMetaState.java +++ b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayMetaState.java @@ -37,7 +37,6 @@ import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.action.support.nodes.BaseNodesResponse; @@ -52,6 +51,7 @@ import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.unit.TimeValue; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -168,7 +168,7 @@ protected void writeNodesTo(StreamOutput out, List nodes) * * @opensearch.internal */ - public static class NodeRequest extends BaseNodeRequest { + public static class NodeRequest extends TransportRequest { NodeRequest() {} NodeRequest(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java index c43f539243d7a..5b79ca5970e63 100644 --- a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java +++ b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java @@ -40,7 +40,6 @@ import org.opensearch.action.ActionType; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.action.support.nodes.BaseNodesResponse; @@ -65,6 +64,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -307,7 +307,7 @@ protected void writeNodesTo(StreamOutput out, List nod * * @opensearch.internal */ - public static class NodeRequest extends BaseNodeRequest { + public static class NodeRequest extends TransportRequest { private final ShardId shardId; @Nullable diff --git a/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java index f426768119c1d..dceb26bc33aa7 100644 --- a/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java @@ -39,8 +39,6 @@ import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.store.Directory; import org.apache.lucene.store.Lock; -import org.opensearch.LegacyESVersion; -import org.opensearch.Version; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; @@ -174,25 +172,21 @@ protected void ensureMaxSeqNoEqualsToGlobalCheckpoint(final SeqNoStats seqNoStat if (requireCompleteHistory == false) { return; } - // Before 8.0 the global checkpoint is not known and up to date when the engine is created after + // Before 3.0 the global checkpoint is not known and up to date when the engine is created after // peer recovery, so we only check the max seq no / global checkpoint coherency when the global // checkpoint is different from the unassigned sequence number value. // In addition to that we only execute the check if the index the engine belongs to has been // created after the refactoring of the Close Index API and its TransportVerifyShardBeforeCloseAction // that guarantee that all operations have been flushed to Lucene. - final Version indexVersionCreated = engineConfig.getIndexSettings().getIndexVersionCreated(); - if (indexVersionCreated.onOrAfter(LegacyESVersion.V_7_2_0) - || (seqNoStats.getGlobalCheckpoint() != SequenceNumbers.UNASSIGNED_SEQ_NO)) { - assert assertMaxSeqNoEqualsToGlobalCheckpoint(seqNoStats.getMaxSeqNo(), seqNoStats.getGlobalCheckpoint()); - if (seqNoStats.getMaxSeqNo() != seqNoStats.getGlobalCheckpoint()) { - throw new IllegalStateException( - "Maximum sequence number [" - + seqNoStats.getMaxSeqNo() - + "] from last commit does not match global checkpoint [" - + seqNoStats.getGlobalCheckpoint() - + "]" - ); - } + assert assertMaxSeqNoEqualsToGlobalCheckpoint(seqNoStats.getMaxSeqNo(), seqNoStats.getGlobalCheckpoint()); + if (seqNoStats.getMaxSeqNo() != seqNoStats.getGlobalCheckpoint()) { + throw new IllegalStateException( + "Maximum sequence number [" + + seqNoStats.getMaxSeqNo() + + "] from last commit does not match global checkpoint [" + + seqNoStats.getGlobalCheckpoint() + + "]" + ); } } diff --git a/server/src/main/java/org/opensearch/index/get/GetResult.java b/server/src/main/java/org/opensearch/index/get/GetResult.java index e2f23353f250e..5da4f8d5c7833 100644 --- a/server/src/main/java/org/opensearch/index/get/GetResult.java +++ b/server/src/main/java/org/opensearch/index/get/GetResult.java @@ -32,7 +32,6 @@ package org.opensearch.index.get; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.Version; import org.opensearch.common.Strings; @@ -105,20 +104,8 @@ public GetResult(StreamInput in) throws IOException { if (source.length() == 0) { source = null; } - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - documentFields = readFields(in); - metaFields = readFields(in); - } else { - Map fields = readFields(in); - documentFields = new HashMap<>(); - metaFields = new HashMap<>(); - fields.forEach( - (fieldName, docField) -> (MapperService.META_FIELDS_BEFORE_7DOT8.contains(fieldName) ? metaFields : documentFields).put( - fieldName, - docField - ) - ); - } + documentFields = readFields(in); + metaFields = readFields(in); } else { metaFields = Collections.emptyMap(); documentFields = Collections.emptyMap(); @@ -446,12 +433,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(exists); if (exists) { out.writeBytesReference(source); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - writeFields(out, documentFields); - writeFields(out, metaFields); - } else { - writeFields(out, this.getFields()); - } + writeFields(out, documentFields); + writeFields(out, metaFields); } } diff --git a/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java index 82bd8ebb4d362..15aae1774213a 100644 --- a/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java @@ -69,7 +69,6 @@ import org.apache.lucene.util.automaton.Automata; import org.apache.lucene.util.automaton.Automaton; import org.apache.lucene.util.automaton.Operations; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.collect.Iterators; import org.opensearch.common.lucene.Lucene; @@ -422,7 +421,7 @@ private PrefixFieldMapper buildPrefixMapper(BuilderContext context, FieldType fi * or a multi-field). This way search will continue to work on old indices and new indices * will use the expected full name. */ - String fullName = indexCreatedVersion.before(LegacyESVersion.V_7_2_1) ? name() : buildFullName(context); + String fullName = buildFullName(context); // Copy the index options of the main field to allow phrase queries on // the prefix field. FieldType pft = new FieldType(fieldType); diff --git a/server/src/main/java/org/opensearch/index/query/IntervalsSourceProvider.java b/server/src/main/java/org/opensearch/index/query/IntervalsSourceProvider.java index cc69a44c9c80c..e1cb0cdff4ebd 100644 --- a/server/src/main/java/org/opensearch/index/query/IntervalsSourceProvider.java +++ b/server/src/main/java/org/opensearch/index/query/IntervalsSourceProvider.java @@ -41,7 +41,6 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.automaton.CompiledAutomaton; import org.apache.lucene.util.automaton.RegExp; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.ParseField; import org.opensearch.common.ParsingException; @@ -166,11 +165,7 @@ public Match(StreamInput in) throws IOException { } this.analyzer = in.readOptionalString(); this.filter = in.readOptionalWriteable(IntervalFilter::new); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - this.useField = in.readOptionalString(); - } else { - this.useField = null; - } + this.useField = in.readOptionalString(); } @Override @@ -234,9 +229,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeOptionalString(analyzer); out.writeOptionalWriteable(filter); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeOptionalString(useField); - } + out.writeOptionalString(useField); } @Override diff --git a/server/src/main/java/org/opensearch/index/refresh/RefreshStats.java b/server/src/main/java/org/opensearch/index/refresh/RefreshStats.java index 8a54e5105c61e..fc874608fb3b1 100644 --- a/server/src/main/java/org/opensearch/index/refresh/RefreshStats.java +++ b/server/src/main/java/org/opensearch/index/refresh/RefreshStats.java @@ -32,7 +32,6 @@ package org.opensearch.index.refresh; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.io.stream.Writeable; @@ -68,10 +67,8 @@ public RefreshStats() {} public RefreshStats(StreamInput in) throws IOException { total = in.readVLong(); totalTimeInMillis = in.readVLong(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - externalTotal = in.readVLong(); - externalTotalTimeInMillis = in.readVLong(); - } + externalTotal = in.readVLong(); + externalTotalTimeInMillis = in.readVLong(); listeners = in.readVInt(); } @@ -79,10 +76,8 @@ public RefreshStats(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { out.writeVLong(total); out.writeVLong(totalTimeInMillis); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeVLong(externalTotal); - out.writeVLong(externalTotalTimeInMillis); - } + out.writeVLong(externalTotal); + out.writeVLong(externalTotalTimeInMillis); out.writeVInt(listeners); } diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java index 55d95381923b3..63995ae121abb 100644 --- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java @@ -1495,7 +1495,7 @@ public synchronized void activateWithPrimaryContext(PrimaryContext primaryContex assert primaryMode == false; if (primaryContext.checkpoints.containsKey(shardAllocationId) == false) { // can happen if the old primary was on an old version - assert indexSettings.getIndexVersionCreated().before(LegacyESVersion.V_7_3_0); + assert indexSettings.getIndexVersionCreated().before(LegacyESVersion.fromId(7000099)); throw new IllegalStateException("primary context [" + primaryContext + "] does not contain " + shardAllocationId); } final Runnable runAfter = getClusterManagerUpdateOperationFromCurrentState(); diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryCleanFilesRequest.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryCleanFilesRequest.java index 6c597fcd086c4..d346ec5c975f4 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryCleanFilesRequest.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryCleanFilesRequest.java @@ -32,10 +32,8 @@ package org.opensearch.indices.recovery; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.ShardId; import org.opensearch.index.store.Store; @@ -76,11 +74,7 @@ public RecoveryCleanFilesRequest( shardId = new ShardId(in); snapshotFiles = new Store.MetadataSnapshot(in); totalTranslogOps = in.readVInt(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - globalCheckpoint = in.readZLong(); - } else { - globalCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; - } + globalCheckpoint = in.readZLong(); } @Override @@ -90,9 +84,7 @@ public void writeTo(StreamOutput out) throws IOException { shardId.writeTo(out); snapshotFiles.writeTo(out); out.writeVInt(totalTranslogOps); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeZLong(globalCheckpoint); - } + out.writeZLong(globalCheckpoint); } public Store.MetadataSnapshot sourceMetaSnapshot() { diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTranslogOperationsRequest.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTranslogOperationsRequest.java index eacfb87ecc732..32560bc211669 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTranslogOperationsRequest.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTranslogOperationsRequest.java @@ -32,7 +32,6 @@ package org.opensearch.indices.recovery; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.index.seqno.RetentionLeases; @@ -139,8 +138,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeZLong(maxSeenAutoIdTimestampOnPrimary); out.writeZLong(maxSeqNoOfUpdatesOrDeletesOnPrimary); retentionLeases.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeVLong(mappingVersionOnPrimary); - } + out.writeVLong(mappingVersionOnPrimary); } } diff --git a/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java b/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java index b49cdcd127962..0e452fcbbc7a9 100644 --- a/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java +++ b/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java @@ -39,7 +39,6 @@ import org.opensearch.action.ActionType; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.action.support.nodes.BaseNodesResponse; @@ -68,6 +67,7 @@ import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.IndicesService; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -410,7 +410,7 @@ protected void writeNodesTo(StreamOutput out, List nodes * * @opensearch.internal */ - public static class NodeRequest extends BaseNodeRequest { + public static class NodeRequest extends TransportRequest { private final ShardId shardId; @Nullable diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java index 6b6d33717bab4..f7e3de4eb988f 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.bucket.histogram; -import org.opensearch.LegacyESVersion; import org.opensearch.common.ParseField; import org.opensearch.common.Rounding; import org.opensearch.common.io.stream.StreamInput; @@ -150,17 +149,13 @@ public AutoDateHistogramAggregationBuilder(String name) { public AutoDateHistogramAggregationBuilder(StreamInput in) throws IOException { super(in); numBuckets = in.readVInt(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - minimumIntervalExpression = in.readOptionalString(); - } + minimumIntervalExpression = in.readOptionalString(); } @Override protected void innerWriteTo(StreamOutput out) throws IOException { out.writeVInt(numBuckets); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - out.writeOptionalString(minimumIntervalExpression); - } + out.writeOptionalString(minimumIntervalExpression); } protected AutoDateHistogramAggregationBuilder( @@ -321,17 +316,7 @@ public RoundingInfo(StreamInput in) throws IOException { roughEstimateDurationMillis = in.readVLong(); innerIntervals = in.readIntArray(); unitAbbreviation = in.readString(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - dateTimeUnit = in.readString(); - } else { - /* - * This *should* be safe because we only deserialize RoundingInfo - * when reading result and results don't actually use this at all. - * We just set it to something non-null to line up with the normal - * ctor. "seconds" is the smallest unit anyway. - */ - dateTimeUnit = "second"; - } + dateTimeUnit = in.readString(); } @Override @@ -340,9 +325,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(roughEstimateDurationMillis); out.writeIntArray(innerIntervals); out.writeString(unitAbbreviation); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - out.writeString(dateTimeUnit); - } + out.writeString(dateTimeUnit); } public int getMaximumInnerInterval() { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java index d378bb2ec1bd2..f8a4c4ffd9cba 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.bucket.histogram; -import org.opensearch.LegacyESVersion; import org.opensearch.common.ParseField; import org.opensearch.common.Rounding; import org.opensearch.common.Rounding.DateTimeUnit; @@ -143,21 +142,8 @@ public static void declareIntervalFields(Object public DateIntervalWrapper() {} public DateIntervalWrapper(StreamInput in) throws IOException { - if (in.getVersion().before(LegacyESVersion.V_7_2_0)) { - long interval = in.readLong(); - DateHistogramInterval histoInterval = in.readOptionalWriteable(DateHistogramInterval::new); - - if (histoInterval != null) { - dateHistogramInterval = histoInterval; - intervalType = IntervalTypeEnum.LEGACY_DATE_HISTO; - } else { - dateHistogramInterval = new DateHistogramInterval(interval + "ms"); - intervalType = IntervalTypeEnum.LEGACY_INTERVAL; - } - } else { - dateHistogramInterval = in.readOptionalWriteable(DateHistogramInterval::new); - intervalType = IntervalTypeEnum.fromStream(in); - } + dateHistogramInterval = in.readOptionalWriteable(DateHistogramInterval::new); + intervalType = IntervalTypeEnum.fromStream(in); } public IntervalTypeEnum getIntervalType() { @@ -402,20 +388,8 @@ public boolean isEmpty() { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().before(LegacyESVersion.V_7_2_0)) { - if (intervalType.equals(IntervalTypeEnum.LEGACY_INTERVAL)) { - out.writeLong( - TimeValue.parseTimeValue(dateHistogramInterval.toString(), DateHistogramAggregationBuilder.NAME + ".innerWriteTo") - .getMillis() - ); - } else { - out.writeLong(0L); - } - out.writeOptionalWriteable(dateHistogramInterval); - } else { - out.writeOptionalWriteable(dateHistogramInterval); - intervalType.writeTo(out); - } + out.writeOptionalWriteable(dateHistogramInterval); + intervalType.writeTo(out); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalGeoCentroid.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalGeoCentroid.java index 96e7541de25d9..35449a28c9087 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalGeoCentroid.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalGeoCentroid.java @@ -33,7 +33,6 @@ package org.opensearch.search.aggregations.metrics; import org.apache.lucene.geo.GeoEncodingUtils; -import org.opensearch.LegacyESVersion; import org.opensearch.common.ParseField; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.io.stream.StreamInput; @@ -84,13 +83,7 @@ public InternalGeoCentroid(StreamInput in) throws IOException { super(in); count = in.readVLong(); if (in.readBoolean()) { - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - centroid = new GeoPoint(in.readDouble(), in.readDouble()); - } else { - final long hash = in.readLong(); - centroid = new GeoPoint(decodeLatitude(hash), decodeLongitude(hash)); - } - + centroid = new GeoPoint(in.readDouble(), in.readDouble()); } else { centroid = null; } @@ -101,12 +94,8 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeVLong(count); if (centroid != null) { out.writeBoolean(true); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeDouble(centroid.lat()); - out.writeDouble(centroid.lon()); - } else { - out.writeLong(encodeLatLon(centroid.lat(), centroid.lon())); - } + out.writeDouble(centroid.lat()); + out.writeDouble(centroid.lon()); } else { out.writeBoolean(false); } diff --git a/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java b/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java index dceacd57d623e..31fdc5c9d9e9d 100644 --- a/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java +++ b/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java @@ -45,7 +45,6 @@ import org.opensearch.search.SearchShardTarget; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.InternalAggregations; -import org.opensearch.search.aggregations.pipeline.PipelineAggregator; import org.opensearch.search.internal.ShardSearchContextId; import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.search.profile.NetworkTime; @@ -54,7 +53,6 @@ import java.io.IOException; -import static java.util.Collections.emptyList; import static org.opensearch.common.lucene.Lucene.readTopDocs; import static org.opensearch.common.lucene.Lucene.writeTopDocs; @@ -361,10 +359,6 @@ public void readFromWithId(ShardSearchContextId id, StreamInput in) throws IOExc if (hasAggs = in.readBoolean()) { aggregations = DelayableWriteable.referencing(InternalAggregations.readFrom(in)); } - if (in.getVersion().before(LegacyESVersion.V_7_2_0)) { - // The list of PipelineAggregators is sent by old versions. We don't need it anyway. - in.readNamedWriteableList(PipelineAggregator.class); - } } else { if (hasAggs = in.readBoolean()) { aggregations = DelayableWriteable.delayed(InternalAggregations::readFrom, in); @@ -410,35 +404,11 @@ public void writeToNoId(StreamOutput out) throws IOException { writeTopDocs(out, topDocsAndMaxScore); if (aggregations == null) { out.writeBoolean(false); - if (out.getVersion().before(LegacyESVersion.V_7_2_0)) { - /* - * Earlier versions expect sibling pipeline aggs separately - * as they used to be set to QuerySearchResult directly, while - * later versions expect them in InternalAggregations. Note - * that despite serializing sibling pipeline aggs as part of - * InternalAggregations is supported since 6.7.0, the shards - * set sibling pipeline aggs to InternalAggregations only from - * 7.1 on. - */ - out.writeNamedWriteableList(emptyList()); - } } else { out.writeBoolean(true); if (out.getVersion().before(LegacyESVersion.V_7_7_0)) { InternalAggregations aggs = aggregations.expand(); aggs.writeTo(out); - if (out.getVersion().before(LegacyESVersion.V_7_2_0)) { - /* - * Earlier versions expect sibling pipeline aggs separately - * as they used to be set to QuerySearchResult directly, while - * later versions expect them in InternalAggregations. Note - * that despite serializing sibling pipeline aggs as part of - * InternalAggregations is supported since 6.7.0, the shards - * set sibling pipeline aggs to InternalAggregations only from - * 7.1 on. - */ - out.writeNamedWriteableList(aggs.getTopLevelPipelineAggregators()); - } } else { aggregations.writeTo(out); } diff --git a/server/src/main/java/org/opensearch/search/sort/FieldSortBuilder.java b/server/src/main/java/org/opensearch/search/sort/FieldSortBuilder.java index b4a93ec9869e6..395ebaf2523e7 100644 --- a/server/src/main/java/org/opensearch/search/sort/FieldSortBuilder.java +++ b/server/src/main/java/org/opensearch/search/sort/FieldSortBuilder.java @@ -38,7 +38,6 @@ import org.apache.lucene.index.PointValues; import org.apache.lucene.index.Terms; import org.apache.lucene.search.SortField; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.common.ParseField; import org.opensearch.common.ParsingException; @@ -161,9 +160,7 @@ public FieldSortBuilder(StreamInput in) throws IOException { sortMode = in.readOptionalWriteable(SortMode::readFromStream); unmappedType = in.readOptionalString(); nestedSort = in.readOptionalWriteable(NestedSortBuilder::new); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - numericType = in.readOptionalString(); - } + numericType = in.readOptionalString(); } @Override @@ -176,9 +173,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(sortMode); out.writeOptionalString(unmappedType); out.writeOptionalWriteable(nestedSort); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeOptionalString(numericType); - } + out.writeOptionalString(numericType); } /** Returns the document field this sort should be based on. */ diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java b/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java index d7ebba721a52c..38d9df0e960e0 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java @@ -73,7 +73,6 @@ public final class SnapshotInfo implements Comparable, ToXContent, public static final String CONTEXT_MODE_PARAM = "context_mode"; public static final String CONTEXT_MODE_SNAPSHOT = "SNAPSHOT"; - public static final Version METADATA_FIELD_INTRODUCED = LegacyESVersion.V_7_3_0; private static final DateFormatter DATE_TIME_FORMATTER = DateFormatter.forPattern("strict_date_optional_time"); private static final String SNAPSHOT = "snapshot"; private static final String UUID = "uuid"; @@ -401,11 +400,7 @@ public SnapshotInfo(final StreamInput in) throws IOException { shardFailures = Collections.unmodifiableList(in.readList(SnapshotShardFailure::new)); version = in.readBoolean() ? Version.readVersion(in) : null; includeGlobalState = in.readOptionalBoolean(); - if (in.getVersion().onOrAfter(METADATA_FIELD_INTRODUCED)) { - userMetadata = in.readMap(); - } else { - userMetadata = null; - } + userMetadata = in.readMap(); if (in.getVersion().onOrAfter(DATA_STREAMS_IN_SNAPSHOT)) { dataStreams = in.readStringList(); } else { @@ -840,11 +835,9 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeBoolean(false); } out.writeOptionalBoolean(includeGlobalState); - if (out.getVersion().onOrAfter(METADATA_FIELD_INTRODUCED)) { - out.writeMap(userMetadata); - if (out.getVersion().onOrAfter(DATA_STREAMS_IN_SNAPSHOT)) { - out.writeStringCollection(dataStreams); - } + out.writeMap(userMetadata); + if (out.getVersion().onOrAfter(DATA_STREAMS_IN_SNAPSHOT)) { + out.writeStringCollection(dataStreams); } } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksTests.java index 5b2b4f361083b..ffd3a66ad1d48 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksTests.java @@ -40,7 +40,6 @@ import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.opensearch.action.support.ActionTestUtils; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.action.support.replication.ClusterStateCreationUtils; import org.opensearch.cluster.node.DiscoveryNode; @@ -55,6 +54,7 @@ import org.opensearch.tasks.TaskInfo; import org.opensearch.tasks.TaskManager; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; import java.util.ArrayList; @@ -78,7 +78,7 @@ public class CancellableTasksTests extends TaskManagerTestCase { - public static class CancellableNodeRequest extends BaseNodeRequest { + public static class CancellableNodeRequest extends TransportRequest { protected String requestName; public CancellableNodeRequest() { diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java index 5d947a743385f..d49dd14492327 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java @@ -16,7 +16,6 @@ import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.opensearch.action.support.ActionTestUtils; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.SuppressForbidden; @@ -32,6 +31,7 @@ import org.opensearch.test.tasks.MockTaskManager; import org.opensearch.test.tasks.MockTaskManagerListener; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -56,7 +56,7 @@ public class ResourceAwareTasksTests extends TaskManagerTestCase { private static final ThreadMXBean threadMXBean = (ThreadMXBean) ManagementFactory.getThreadMXBean(); - public static class ResourceAwareNodeRequest extends BaseNodeRequest { + public static class ResourceAwareNodeRequest extends TransportRequest { protected String requestName; public ResourceAwareNodeRequest() { diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java index 68cf69e30f8a6..8b0c2187d05af 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java @@ -37,7 +37,6 @@ import org.opensearch.action.admin.cluster.node.tasks.cancel.TransportCancelTasksAction; import org.opensearch.action.admin.cluster.node.tasks.list.TransportListTasksAction; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.action.support.nodes.BaseNodesResponse; @@ -66,6 +65,7 @@ import org.opensearch.threadpool.RunnableTaskExecutionListener; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import org.opensearch.transport.nio.MockNioTransport; import org.junit.After; @@ -156,8 +156,8 @@ public int failureCount() { /** * Simulates node-based task that can be used to block node tasks so they are guaranteed to be registered by task manager */ - abstract class AbstractTestNodesAction, NodeRequest extends BaseNodeRequest> extends - TransportNodesAction { + abstract class AbstractTestNodesAction, NodeRequest extends TransportRequest> + extends TransportNodesAction { AbstractTestNodesAction( String actionName, diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TestTaskPlugin.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TestTaskPlugin.java index 3bb1957c69fb4..aa0e9511f86ce 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TestTaskPlugin.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TestTaskPlugin.java @@ -41,7 +41,6 @@ import org.opensearch.action.ActionType; import org.opensearch.action.TaskOperationFailure; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.action.support.nodes.BaseNodesResponse; @@ -182,7 +181,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } } - public static class NodeRequest extends BaseNodeRequest { + public static class NodeRequest extends TransportRequest { protected final String requestName; protected final boolean shouldBlock; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java index 7590bf88eeca0..97a045872477d 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java @@ -44,7 +44,6 @@ import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.ActionTestUtils; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.action.support.tasks.BaseTasksRequest; import org.opensearch.action.support.tasks.BaseTasksResponse; @@ -67,6 +66,7 @@ import org.opensearch.tasks.TaskInfo; import org.opensearch.test.tasks.MockTaskManager; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -91,7 +91,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase { - public static class NodeRequest extends BaseNodeRequest { + public static class NodeRequest extends TransportRequest { protected String requestName; public NodeRequest(StreamInput in) throws IOException { diff --git a/server/src/test/java/org/opensearch/action/admin/indices/close/CloseIndexRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/close/CloseIndexRequestTests.java index 4a90a23cbd2f0..07246f144d95b 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/close/CloseIndexRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/close/CloseIndexRequestTests.java @@ -82,11 +82,7 @@ public void testBwcSerialization() throws Exception { if (out.getVersion().onOrAfter(LegacyESVersion.V_7_7_0) || request.indicesOptions().expandWildcardsHidden()) { assertEquals(request.indicesOptions(), indicesOptions); } - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - assertEquals(request.waitForActiveShards(), ActiveShardCount.readFrom(in)); - } else { - assertEquals(0, in.available()); - } + assertEquals(request.waitForActiveShards(), ActiveShardCount.readFrom(in)); } } } @@ -100,9 +96,7 @@ public void testBwcSerialization() throws Exception { out.writeTimeValue(sample.timeout()); out.writeStringArray(sample.indices()); sample.indicesOptions().writeIndicesOptions(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - sample.waitForActiveShards().writeTo(out); - } + sample.waitForActiveShards().writeTo(out); final CloseIndexRequest deserializedRequest; try (StreamInput in = out.bytes().streamInput()) { @@ -119,11 +113,7 @@ public void testBwcSerialization() throws Exception { if (out.getVersion().onOrAfter(LegacyESVersion.V_7_7_0) || sample.indicesOptions().expandWildcardsHidden()) { assertEquals(sample.indicesOptions(), deserializedRequest.indicesOptions()); } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - assertEquals(sample.waitForActiveShards(), deserializedRequest.waitForActiveShards()); - } else { - assertEquals(ActiveShardCount.NONE, deserializedRequest.waitForActiveShards()); - } + assertEquals(sample.waitForActiveShards(), deserializedRequest.waitForActiveShards()); } } } diff --git a/server/src/test/java/org/opensearch/action/support/nodes/TransportNodesActionTests.java b/server/src/test/java/org/opensearch/action/support/nodes/TransportNodesActionTests.java index 86657a98d9a1d..76142efc60b7d 100644 --- a/server/src/test/java/org/opensearch/action/support/nodes/TransportNodesActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/nodes/TransportNodesActionTests.java @@ -50,6 +50,7 @@ import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import org.junit.After; import org.junit.AfterClass; @@ -378,7 +379,7 @@ protected void writeNodesTo(StreamOutput out, List nodes) thro } } - private static class TestNodeRequest extends BaseNodeRequest { + private static class TestNodeRequest extends TransportRequest { TestNodeRequest() {} TestNodeRequest(StreamInput in) throws IOException { diff --git a/server/src/test/java/org/opensearch/action/termvectors/TermVectorsUnitTests.java b/server/src/test/java/org/opensearch/action/termvectors/TermVectorsUnitTests.java index 089dfcaf65517..bf33d7a45f4fb 100644 --- a/server/src/test/java/org/opensearch/action/termvectors/TermVectorsUnitTests.java +++ b/server/src/test/java/org/opensearch/action/termvectors/TermVectorsUnitTests.java @@ -286,7 +286,7 @@ public void testStreamRequestLegacyVersion() throws IOException { // write using older version which contains types ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer); - out.setVersion(LegacyESVersion.V_7_2_0); + out.setVersion(LegacyESVersion.fromId(7000099)); request.writeTo(out); // First check the type on the stream was written as "_doc" by manually parsing the stream until the type @@ -302,7 +302,7 @@ public void testStreamRequestLegacyVersion() throws IOException { // now read the stream as normal to check it is parsed correct if received from an older node opensearchInBuffer = new ByteArrayInputStream(outBuffer.toByteArray()); opensearchBuffer = new InputStreamStreamInput(opensearchInBuffer); - opensearchBuffer.setVersion(LegacyESVersion.V_7_2_0); + opensearchBuffer.setVersion(LegacyESVersion.fromId(7000099)); TermVectorsRequest req2 = new TermVectorsRequest(opensearchBuffer); assertThat(request.offsets(), equalTo(req2.offsets())); From 77cff55bd431c0e2f8fdea5a40c83f8e8bddeca1 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Fri, 7 Oct 2022 16:29:39 -0500 Subject: [PATCH 08/39] [Remove] LegacyESVersion.V_7_4_* and V_7_5_* constants (#4704) Removes all usages of LegacyESVersion.V_7_4_ and LegacyESVersion.V_7_5 version constants along with ancient API logic. Signed-off-by: Nicholas Walter Knize --- CHANGELOG.md | 1 + .../org/opensearch/upgrades/IndexingIT.java | 13 +- .../DedicatedClusterSnapshotRestoreIT.java | 40 -- .../java/org/opensearch/LegacyESVersion.java | 6 - .../org/opensearch/OpenSearchException.java | 9 +- .../NodesReloadSecureSettingsRequest.java | 18 +- .../TransportCleanupRepositoryAction.java | 16 +- .../create/TransportCreateSnapshotAction.java | 14 +- .../snapshots/status/SnapshotStatus.java | 18 +- .../shards/IndicesShardStoresResponse.java | 15 +- .../admin/indices/shrink/ResizeRequest.java | 4 - .../opensearch/action/index/IndexRequest.java | 18 +- .../ingest/SimulateDocumentBaseResult.java | 29 +- .../cluster/RepositoryCleanupInProgress.java | 2 +- .../cluster/routing/UnassignedInfo.java | 11 +- .../org/opensearch/index/IndexSettings.java | 4 +- .../query/VectorGeoShapeQueryProcessor.java | 2 +- .../ScriptScoreQueryBuilder.java | 13 +- .../index/seqno/ReplicationTracker.java | 10 +- .../opensearch/index/shard/IndexShard.java | 3 +- .../RecoveryFinalizeRecoveryRequest.java | 12 +- ...ryPrepareForTranslogOperationsRequest.java | 7 - .../recovery/RecoverySourceHandler.java | 4 +- .../indices/recovery/RecoveryState.java | 9 +- .../TransportNodesListShardStoreMetadata.java | 10 +- .../repositories/FilterRepository.java | 6 - .../opensearch/repositories/Repository.java | 14 - .../blobstore/BlobStoreRepository.java | 16 - .../repositories/blobstore/package-info.java | 16 +- .../rest/action/document/RestIndexAction.java | 3 +- .../CompositeValuesSourceParserHelper.java | 10 - .../MovFnPipelineAggregationBuilder.java | 11 +- .../pipeline/MovFnPipelineAggregator.java | 11 +- .../snapshots/SnapshotsService.java | 368 +----------------- .../GeoShapeQueryBuilderGeoShapeTests.java | 20 +- .../RepositoriesServiceTests.java | 5 - .../index/shard/RestoreOnlyRepository.java | 4 - .../test/rest/OpenSearchRestTestCase.java | 4 +- 38 files changed, 64 insertions(+), 712 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7ee30dc84e9fd..37e16aecff69a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -87,6 +87,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Remove LegacyESVersion.V_7_0_* and V_7_1_* Constants ([#2768](https://https://github.com/opensearch-project/OpenSearch/pull/2768)) - Remove LegacyESVersion.V_7_2_ and V_7_3_ Constants ([#4702](https://github.com/opensearch-project/OpenSearch/pull/4702)) - Always auto release the flood stage block ([#4703](https://github.com/opensearch-project/OpenSearch/pull/4703)) +- Remove LegacyESVersion.V_7_4_ and V_7_5_ Constants ([#4704](https://github.com/opensearch-project/OpenSearch/pull/4704)) ### Fixed diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java index f34e5f7bc121a..888fa886c3c5e 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java @@ -181,18 +181,7 @@ public void testAutoIdWithOpTypeCreate() throws IOException { } } - if (minNodeVersion.before(LegacyESVersion.V_7_5_0)) { - ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(bulk)); - assertEquals(400, e.getResponse().getStatusLine().getStatusCode()); - assertThat(e.getMessage(), - // if request goes to 7.5+ node - either(containsString("optype create not supported for indexing requests without explicit id until")) - // if request goes to < 7.5 node - .or(containsString("an id must be provided if version type or value are set") - )); - } else { - client().performRequest(bulk); - } + client().performRequest(bulk); break; case UPGRADED: client().performRequest(bulk); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index b4287f201489b..c0fabb8becf6b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -37,7 +37,6 @@ import org.opensearch.Version; import org.opensearch.action.ActionFuture; -import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; @@ -47,7 +46,6 @@ import org.opensearch.action.admin.indices.stats.ShardStats; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.support.ActiveShardCount; -import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.client.node.NodeClient; @@ -1386,44 +1384,6 @@ public void testPartialSnapshotAllShardsMissing() throws Exception { assertThat(createSnapshotResponse.getSnapshotInfo().state(), is(SnapshotState.PARTIAL)); } - /** - * Tests for the legacy snapshot path that is normally executed if the cluster contains any nodes older than - * {@link SnapshotsService#NO_REPO_INITIALIZE_VERSION}. - * Makes sure that blocking as well as non-blocking snapshot create paths execute cleanly as well as that error handling works out - * correctly by testing a snapshot name collision. - */ - public void testCreateSnapshotLegacyPath() throws Exception { - final String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); - internalCluster().startDataOnlyNode(); - final String repoName = "test-repo"; - createRepository(repoName, "fs"); - createIndex("some-index"); - - final SnapshotsService snapshotsService = internalCluster().getClusterManagerNodeInstance(SnapshotsService.class); - final Snapshot snapshot1 = PlainActionFuture.get( - f -> snapshotsService.createSnapshotLegacy(new CreateSnapshotRequest(repoName, "snap-1"), f) - ); - awaitNoMoreRunningOperations(clusterManagerNode); - - final InvalidSnapshotNameException sne = expectThrows( - InvalidSnapshotNameException.class, - () -> PlainActionFuture.get( - f -> snapshotsService.executeSnapshotLegacy(new CreateSnapshotRequest(repoName, snapshot1.getSnapshotId().getName()), f) - ) - ); - - assertThat(sne.getMessage(), containsString("snapshot with the same name already exists")); - final SnapshotInfo snapshot2 = PlainActionFuture.get( - f -> snapshotsService.executeSnapshotLegacy(new CreateSnapshotRequest(repoName, "snap-2"), f) - ); - assertThat(snapshot2.state(), is(SnapshotState.SUCCESS)); - - final SnapshotInfo snapshot3 = PlainActionFuture.get( - f -> snapshotsService.executeSnapshotLegacy(new CreateSnapshotRequest(repoName, "snap-3").indices("does-not-exist-*"), f) - ); - assertThat(snapshot3.state(), is(SnapshotState.SUCCESS)); - } - public void testSnapshotDeleteRelocatingPrimaryIndex() throws Exception { internalCluster().startClusterManagerOnlyNode(); final List dataNodes = internalCluster().startDataOnlyNodes(2); diff --git a/server/src/main/java/org/opensearch/LegacyESVersion.java b/server/src/main/java/org/opensearch/LegacyESVersion.java index 283a6581a64bb..6e2aeaa5f7b4f 100644 --- a/server/src/main/java/org/opensearch/LegacyESVersion.java +++ b/server/src/main/java/org/opensearch/LegacyESVersion.java @@ -48,12 +48,6 @@ */ public class LegacyESVersion extends Version { - public static final LegacyESVersion V_7_4_0 = new LegacyESVersion(7040099, org.apache.lucene.util.Version.LUCENE_8_2_0); - public static final LegacyESVersion V_7_4_1 = new LegacyESVersion(7040199, org.apache.lucene.util.Version.LUCENE_8_2_0); - public static final LegacyESVersion V_7_4_2 = new LegacyESVersion(7040299, org.apache.lucene.util.Version.LUCENE_8_2_0); - public static final LegacyESVersion V_7_5_0 = new LegacyESVersion(7050099, org.apache.lucene.util.Version.LUCENE_8_3_0); - public static final LegacyESVersion V_7_5_1 = new LegacyESVersion(7050199, org.apache.lucene.util.Version.LUCENE_8_3_0); - public static final LegacyESVersion V_7_5_2 = new LegacyESVersion(7050299, org.apache.lucene.util.Version.LUCENE_8_3_0); public static final LegacyESVersion V_7_6_0 = new LegacyESVersion(7060099, org.apache.lucene.util.Version.LUCENE_8_4_0); public static final LegacyESVersion V_7_6_1 = new LegacyESVersion(7060199, org.apache.lucene.util.Version.LUCENE_8_4_0); public static final LegacyESVersion V_7_6_2 = new LegacyESVersion(7060299, org.apache.lucene.util.Version.LUCENE_8_4_0); diff --git a/server/src/main/java/org/opensearch/OpenSearchException.java b/server/src/main/java/org/opensearch/OpenSearchException.java index e9f0d831d2977..78bda1cf088cd 100644 --- a/server/src/main/java/org/opensearch/OpenSearchException.java +++ b/server/src/main/java/org/opensearch/OpenSearchException.java @@ -49,7 +49,6 @@ import org.opensearch.index.Index; import org.opensearch.index.shard.ShardId; import org.opensearch.rest.RestStatus; -import org.opensearch.search.SearchException; import org.opensearch.search.aggregations.MultiBucketConsumerService; import org.opensearch.transport.TcpTransport; @@ -317,10 +316,6 @@ public void writeTo(StreamOutput out) throws IOException { public static OpenSearchException readException(StreamInput input, int id) throws IOException { CheckedFunction opensearchException = ID_TO_SUPPLIER.get(id); if (opensearchException == null) { - if (id == 127 && input.getVersion().before(LegacyESVersion.V_7_5_0)) { - // was SearchContextException - return new SearchException(input); - } throw new IllegalStateException("unknown exception for id: " + id); } return opensearchException.apply(input); @@ -1569,13 +1564,13 @@ private enum OpenSearchExceptionHandle { org.opensearch.index.seqno.RetentionLeaseInvalidRetainingSeqNoException.class, org.opensearch.index.seqno.RetentionLeaseInvalidRetainingSeqNoException::new, 156, - LegacyESVersion.V_7_5_0 + UNKNOWN_VERSION_ADDED ), INGEST_PROCESSOR_EXCEPTION( org.opensearch.ingest.IngestProcessorException.class, org.opensearch.ingest.IngestProcessorException::new, 157, - LegacyESVersion.V_7_5_0 + UNKNOWN_VERSION_ADDED ), PEER_RECOVERY_NOT_FOUND_EXCEPTION( org.opensearch.indices.recovery.PeerRecoveryNotFound.class, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java index e31f5f304c836..b721c8f005974 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java @@ -114,16 +114,14 @@ boolean hasPassword() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - if (this.secureSettingsPassword == null) { - out.writeOptionalBytesReference(null); - } else { - final byte[] passwordBytes = CharArrays.toUtf8Bytes(this.secureSettingsPassword.getChars()); - try { - out.writeOptionalBytesReference(new BytesArray(passwordBytes)); - } finally { - Arrays.fill(passwordBytes, (byte) 0); - } + if (this.secureSettingsPassword == null) { + out.writeOptionalBytesReference(null); + } else { + final byte[] passwordBytes = CharArrays.toUtf8Bytes(this.secureSettingsPassword.getChars()); + try { + out.writeOptionalBytesReference(new BytesArray(passwordBytes)); + } finally { + Arrays.fill(passwordBytes, (byte) 0); } } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java index a3804db687a2d..07b918e427784 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java @@ -34,8 +34,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.LegacyESVersion; -import org.opensearch.Version; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; import org.opensearch.action.StepListener; @@ -91,8 +89,6 @@ public final class TransportCleanupRepositoryAction extends TransportClusterMana private static final Logger logger = LogManager.getLogger(TransportCleanupRepositoryAction.class); - private static final Version MIN_VERSION = LegacyESVersion.V_7_4_0; - private final RepositoriesService repositoriesService; private final SnapshotsService snapshotsService; @@ -179,17 +175,7 @@ protected void clusterManagerOperation( ClusterState state, ActionListener listener ) { - if (state.nodes().getMinNodeVersion().onOrAfter(MIN_VERSION)) { - cleanupRepo(request.name(), ActionListener.map(listener, CleanupRepositoryResponse::new)); - } else { - throw new IllegalArgumentException( - "Repository cleanup is only supported from version [" - + MIN_VERSION - + "] but the oldest node version in the cluster is [" - + state.nodes().getMinNodeVersion() - + ']' - ); - } + cleanupRepo(request.name(), ActionListener.map(listener, CleanupRepositoryResponse::new)); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java index ed4af6d915792..f604a30121797 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java @@ -103,18 +103,10 @@ protected void clusterManagerOperation( ClusterState state, final ActionListener listener ) { - if (state.nodes().getMinNodeVersion().before(SnapshotsService.NO_REPO_INITIALIZE_VERSION)) { - if (request.waitForCompletion()) { - snapshotsService.executeSnapshotLegacy(request, ActionListener.map(listener, CreateSnapshotResponse::new)); - } else { - snapshotsService.createSnapshotLegacy(request, ActionListener.map(listener, snapshot -> new CreateSnapshotResponse())); - } + if (request.waitForCompletion()) { + snapshotsService.executeSnapshot(request, ActionListener.map(listener, CreateSnapshotResponse::new)); } else { - if (request.waitForCompletion()) { - snapshotsService.executeSnapshot(request, ActionListener.map(listener, CreateSnapshotResponse::new)); - } else { - snapshotsService.createSnapshot(request, ActionListener.map(listener, snapshot -> new CreateSnapshotResponse())); - } + snapshotsService.createSnapshot(request, ActionListener.map(listener, snapshot -> new CreateSnapshotResponse())); } } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java index 8fd1ed22a0d14..5fa908a039887 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.snapshots.status; -import org.opensearch.LegacyESVersion; import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.cluster.SnapshotsInProgress.State; import org.opensearch.common.Nullable; @@ -92,15 +91,8 @@ public class SnapshotStatus implements ToXContentObject, Writeable { state = State.fromValue(in.readByte()); shards = Collections.unmodifiableList(in.readList(SnapshotIndexShardStatus::new)); includeGlobalState = in.readOptionalBoolean(); - final long startTime; - final long time; - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - startTime = in.readLong(); - time = in.readLong(); - } else { - startTime = 0L; - time = 0L; - } + final long startTime = in.readLong(); + final long time = in.readLong(); updateShardStats(startTime, time); } @@ -207,10 +199,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeByte(state.value()); out.writeList(shards); out.writeOptionalBoolean(includeGlobalState); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - out.writeLong(stats.getStartTime()); - out.writeLong(stats.getTime()); - } + out.writeLong(stats.getStartTime()); + out.writeLong(stats.getTime()); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java index bd5d9c651af7a..484bc93496fc8 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java @@ -34,7 +34,6 @@ import com.carrotsearch.hppc.cursors.IntObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionResponse; import org.opensearch.action.support.DefaultShardOperationFailedException; @@ -247,13 +246,8 @@ public Failure(String nodeId, String index, int shardId, Throwable reason) { } private Failure(StreamInput in) throws IOException { - if (in.getVersion().before(LegacyESVersion.V_7_4_0)) { - nodeId = in.readString(); - } readFrom(in, this); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - nodeId = in.readString(); - } + nodeId = in.readString(); } public String nodeId() { @@ -266,13 +260,8 @@ static Failure readFailure(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().before(LegacyESVersion.V_7_4_0)) { - out.writeString(nodeId); - } super.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - out.writeString(nodeId); - } + out.writeString(nodeId); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java index 50784e60a3f19..f5d9528422b58 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java @@ -31,7 +31,6 @@ package org.opensearch.action.admin.indices.shrink; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.admin.indices.alias.Alias; @@ -122,9 +121,6 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); targetIndexRequest.writeTo(out); out.writeString(sourceIndex); - if (type == ResizeType.CLONE && out.getVersion().before(LegacyESVersion.V_7_4_0)) { - throw new IllegalArgumentException("can't send clone request to a node that's older than " + LegacyESVersion.V_7_4_0); - } out.writeEnum(type); out.writeOptionalBoolean(copySettings); } diff --git a/server/src/main/java/org/opensearch/action/index/IndexRequest.java b/server/src/main/java/org/opensearch/action/index/IndexRequest.java index 381eca2dc716f..ceff8dcbc4b55 100644 --- a/server/src/main/java/org/opensearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/opensearch/action/index/IndexRequest.java @@ -153,12 +153,8 @@ public IndexRequest(@Nullable ShardId shardId, StreamInput in) throws IOExceptio version = in.readLong(); versionType = VersionType.fromValue(in.readByte()); pipeline = in.readOptionalString(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - finalPipeline = in.readOptionalString(); - } - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - isPipelineResolved = in.readBoolean(); - } + finalPipeline = in.readOptionalString(); + isPipelineResolved = in.readBoolean(); isRetry = in.readBoolean(); autoGeneratedTimestamp = in.readLong(); if (in.readBoolean()) { @@ -639,7 +635,7 @@ public void resolveRouting(Metadata metadata) { } public void checkAutoIdWithOpTypeCreateSupportedByVersion(Version version) { - if (id == null && opType == OpType.CREATE && version.before(LegacyESVersion.V_7_5_0)) { + if (id == null && opType == OpType.CREATE && version.before(LegacyESVersion.fromId(7050099))) { throw new IllegalArgumentException( "optype create not supported for indexing requests without explicit id until all nodes " + "are on version 7.5.0 or higher" ); @@ -671,12 +667,8 @@ private void writeBody(StreamOutput out) throws IOException { out.writeLong(version); out.writeByte(versionType.getValue()); out.writeOptionalString(pipeline); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - out.writeOptionalString(finalPipeline); - } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - out.writeBoolean(isPipelineResolved); - } + out.writeOptionalString(finalPipeline); + out.writeBoolean(isPipelineResolved); out.writeBoolean(isRetry); out.writeLong(autoGeneratedTimestamp); if (contentType != null) { diff --git a/server/src/main/java/org/opensearch/action/ingest/SimulateDocumentBaseResult.java b/server/src/main/java/org/opensearch/action/ingest/SimulateDocumentBaseResult.java index 2440a1802912b..f36ca0e7d7379 100644 --- a/server/src/main/java/org/opensearch/action/ingest/SimulateDocumentBaseResult.java +++ b/server/src/main/java/org/opensearch/action/ingest/SimulateDocumentBaseResult.java @@ -31,7 +31,6 @@ package org.opensearch.action.ingest; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; @@ -94,34 +93,14 @@ public SimulateDocumentBaseResult(Exception failure) { * Read from a stream. */ public SimulateDocumentBaseResult(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - failure = in.readException(); - ingestDocument = in.readOptionalWriteable(WriteableIngestDocument::new); - } else { - if (in.readBoolean()) { - ingestDocument = null; - failure = in.readException(); - } else { - ingestDocument = new WriteableIngestDocument(in); - failure = null; - } - } + failure = in.readException(); + ingestDocument = in.readOptionalWriteable(WriteableIngestDocument::new); } @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - out.writeException(failure); - out.writeOptionalWriteable(ingestDocument); - } else { - if (failure == null) { - out.writeBoolean(false); - ingestDocument.writeTo(out); - } else { - out.writeBoolean(true); - out.writeException(failure); - } - } + out.writeException(failure); + out.writeOptionalWriteable(ingestDocument); } public IngestDocument getIngestDocument() { diff --git a/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java b/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java index 2cf9d66fee2bd..291aa88a3fb3e 100644 --- a/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java @@ -114,7 +114,7 @@ public String toString() { @Override public Version getMinimalSupportedVersion() { - return LegacyESVersion.V_7_4_0; + return LegacyESVersion.fromId(7040099); } /** diff --git a/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java b/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java index 489c6125f7d13..ed5a23cd09e40 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java +++ b/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java @@ -33,7 +33,6 @@ package org.opensearch.cluster.routing; import org.opensearch.ExceptionsHelper; -import org.opensearch.LegacyESVersion; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.routing.allocation.RoutingAllocation; @@ -314,11 +313,7 @@ public UnassignedInfo(StreamInput in) throws IOException { this.failure = in.readException(); this.failedAllocations = in.readVInt(); this.lastAllocationStatus = AllocationStatus.readFrom(in); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - this.failedNodeIds = Collections.unmodifiableSet(in.readSet(StreamInput::readString)); - } else { - this.failedNodeIds = Collections.emptySet(); - } + this.failedNodeIds = Collections.unmodifiableSet(in.readSet(StreamInput::readString)); } public void writeTo(StreamOutput out) throws IOException { @@ -330,9 +325,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeException(failure); out.writeVInt(failedAllocations); lastAllocationStatus.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - out.writeCollection(failedNodeIds, StreamOutput::writeString); - } + out.writeCollection(failedNodeIds, StreamOutput::writeString); } /** diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 9c7f4804755d4..00daea147f16f 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -34,7 +34,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.index.MergePolicy; import org.apache.lucene.sandbox.index.MergeOnFlushMergePolicy; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Strings; @@ -1122,8 +1121,7 @@ public int getTranslogRetentionTotalFiles() { } private static boolean shouldDisableTranslogRetention(Settings settings) { - return INDEX_SOFT_DELETES_SETTING.get(settings) - && IndexMetadata.SETTING_INDEX_VERSION_CREATED.get(settings).onOrAfter(LegacyESVersion.V_7_4_0); + return INDEX_SOFT_DELETES_SETTING.get(settings); } /** diff --git a/server/src/main/java/org/opensearch/index/query/VectorGeoShapeQueryProcessor.java b/server/src/main/java/org/opensearch/index/query/VectorGeoShapeQueryProcessor.java index e11b22e9296cf..d50585ae0aebf 100644 --- a/server/src/main/java/org/opensearch/index/query/VectorGeoShapeQueryProcessor.java +++ b/server/src/main/java/org/opensearch/index/query/VectorGeoShapeQueryProcessor.java @@ -67,7 +67,7 @@ public class VectorGeoShapeQueryProcessor { public Query geoShapeQuery(Geometry shape, String fieldName, ShapeRelation relation, QueryShardContext context) { // CONTAINS queries are not supported by VECTOR strategy for indices created before version 7.5.0 (Lucene 8.3.0) - if (relation == ShapeRelation.CONTAINS && context.indexVersionCreated().before(LegacyESVersion.V_7_5_0)) { + if (relation == ShapeRelation.CONTAINS && context.indexVersionCreated().before(LegacyESVersion.fromId(7050099))) { throw new QueryShardException(context, ShapeRelation.CONTAINS + " query relation not supported for Field [" + fieldName + "]."); } // wrap geoQuery as a ConstantScoreQuery diff --git a/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreQueryBuilder.java index 9605ba424bfb0..ef606ce35b84f 100644 --- a/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreQueryBuilder.java @@ -33,7 +33,6 @@ package org.opensearch.index.query.functionscore; import org.apache.lucene.search.Query; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; @@ -123,22 +122,14 @@ public ScriptScoreQueryBuilder(QueryBuilder query, Script script) { public ScriptScoreQueryBuilder(StreamInput in) throws IOException { super(in); query = in.readNamedWriteable(QueryBuilder.class); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - script = new Script(in); - } else { - script = in.readNamedWriteable(ScriptScoreFunctionBuilder.class).getScript(); - } + script = new Script(in); minScore = in.readOptionalFloat(); } @Override protected void doWriteTo(StreamOutput out) throws IOException { out.writeNamedWriteable(query); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - script.writeTo(out); - } else { - out.writeNamedWriteable(new ScriptScoreFunctionBuilder(script)); - } + script.writeTo(out); out.writeOptionalFloat(minScore); } diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java index 63995ae121abb..ea1604c16190b 100644 --- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java @@ -223,7 +223,7 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L /** * Whether there should be a peer recovery retention lease (PRRL) for every tracked shard copy. Always true on indices created from - * {@link LegacyESVersion#V_7_4_0} onwards, because these versions create PRRLs properly. May be false on indices created in an + * {@code LegacyESVersion#V_7_4_0} onwards, because these versions create PRRLs properly. May be false on indices created in an * earlier version if we recently did a rolling upgrade and * {@link ReplicationTracker#createMissingPeerRecoveryRetentionLeases(ActionListener)} has not yet completed. Is only permitted * to change from false to true; can be removed once support for pre-PRRL indices is no longer needed. @@ -996,9 +996,7 @@ public ReplicationTracker( this.routingTable = null; this.replicationGroup = null; this.hasAllPeerRecoveryRetentionLeases = indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_6_0) - || (indexSettings.isSoftDeleteEnabled() - && indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_4_0) - && indexSettings.getIndexMetadata().getState() == IndexMetadata.State.OPEN); + || (indexSettings.isSoftDeleteEnabled() && indexSettings.getIndexMetadata().getState() == IndexMetadata.State.OPEN); this.fileBasedRecoveryThreshold = IndexSettings.FILE_BASED_RECOVERY_THRESHOLD_SETTING.get(indexSettings.getSettings()); this.safeCommitInfoSupplier = safeCommitInfoSupplier; this.onReplicationGroupUpdated = onReplicationGroupUpdated; @@ -1126,7 +1124,7 @@ public synchronized void activatePrimaryMode(final long localCheckpoint) { /** * Creates a peer recovery retention lease for this shard, if one does not already exist and this shard is the sole shard copy in the * replication group. If one does not already exist and yet there are other shard copies in this group then we must have just done - * a rolling upgrade from a version before {@link LegacyESVersion#V_7_4_0}, in which case the missing leases should be created + * a rolling upgrade from a version before {@code LegacyESVersion#V_7_4_0}, in which case the missing leases should be created * asynchronously by the caller using {@link ReplicationTracker#createMissingPeerRecoveryRetentionLeases(ActionListener)}. */ private void addPeerRecoveryRetentionLeaseForSolePrimary() { @@ -1528,7 +1526,7 @@ public synchronized boolean hasAllPeerRecoveryRetentionLeases() { /** * Create any required peer-recovery retention leases that do not currently exist because we just did a rolling upgrade from a version - * prior to {@link LegacyESVersion#V_7_4_0} that does not create peer-recovery retention leases. + * prior to {@code LegacyESVersion#V_7_4_0} that does not create peer-recovery retention leases. */ public synchronized void createMissingPeerRecoveryRetentionLeases(ActionListener listener) { if (hasAllPeerRecoveryRetentionLeases == false) { diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index d05f7c34f80ce..52ecc5bc66607 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -52,7 +52,6 @@ import org.apache.lucene.util.ThreadInterruptedException; import org.opensearch.Assertions; import org.opensearch.ExceptionsHelper; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; @@ -3187,7 +3186,7 @@ public RetentionLease addPeerRecoveryRetentionLease( ) { assert assertPrimaryMode(); // only needed for BWC reasons involving rolling upgrades from versions that do not support PRRLs: - assert indexSettings.getIndexVersionCreated().before(LegacyESVersion.V_7_4_0) || indexSettings.isSoftDeleteEnabled() == false; + assert indexSettings.isSoftDeleteEnabled() == false; return replicationTracker.addPeerRecoveryRetentionLease(nodeId, globalCheckpoint, listener); } diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java index a7334fba15664..446fb78958db4 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java @@ -32,10 +32,8 @@ package org.opensearch.indices.recovery; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.ShardId; import java.io.IOException; @@ -57,11 +55,7 @@ final class RecoveryFinalizeRecoveryRequest extends RecoveryTransportRequest { recoveryId = in.readLong(); shardId = new ShardId(in); globalCheckpoint = in.readZLong(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - trimAboveSeqNo = in.readZLong(); - } else { - trimAboveSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; - } + trimAboveSeqNo = in.readZLong(); } RecoveryFinalizeRecoveryRequest( @@ -100,9 +94,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(recoveryId); shardId.writeTo(out); out.writeZLong(globalCheckpoint); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - out.writeZLong(trimAboveSeqNo); - } + out.writeZLong(trimAboveSeqNo); } } diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java index bdacb0b724884..68979fa4b69bc 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java @@ -32,7 +32,6 @@ package org.opensearch.indices.recovery; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.index.shard.ShardId; @@ -62,9 +61,6 @@ class RecoveryPrepareForTranslogOperationsRequest extends RecoveryTransportReque recoveryId = in.readLong(); shardId = new ShardId(in); totalTranslogOps = in.readVInt(); - if (in.getVersion().before(LegacyESVersion.V_7_4_0)) { - in.readBoolean(); // was fileBasedRecovery - } } public long recoveryId() { @@ -85,8 +81,5 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(recoveryId); shardId.writeTo(out); out.writeVInt(totalTranslogOps); - if (out.getVersion().before(LegacyESVersion.V_7_4_0)) { - out.writeBoolean(true); // was fileBasedRecovery - } } } diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java index 665e79722770e..505d3c7adfb3f 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java @@ -40,7 +40,6 @@ import org.apache.lucene.store.RateLimiter; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.SetOnce; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; import org.opensearch.action.StepListener; @@ -720,8 +719,7 @@ void createRetentionLease(final long startingSeqNo, ActionListener addRetentionLeaseStep = new StepListener<>(); final long estimatedGlobalCheckpoint = startingSeqNo - 1; final RetentionLease newLease = shard.addPeerRecoveryRetentionLease( diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java index 57208ab029bf4..de2ee1b8512b4 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java @@ -32,7 +32,6 @@ package org.opensearch.indices.recovery; -import org.opensearch.LegacyESVersion; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.ShardRouting; @@ -430,9 +429,7 @@ public Translog(StreamInput in) throws IOException { recovered = in.readVInt(); total = in.readVInt(); totalOnStart = in.readVInt(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - totalLocal = in.readVInt(); - } + totalLocal = in.readVInt(); } @Override @@ -441,9 +438,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(recovered); out.writeVInt(total); out.writeVInt(totalOnStart); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - out.writeVInt(totalLocal); - } + out.writeVInt(totalLocal); } public synchronized void reset() { diff --git a/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java b/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java index 0e452fcbbc7a9..6189c983e3c8a 100644 --- a/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java +++ b/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java @@ -253,20 +253,14 @@ public StoreFilesMetadata( public StoreFilesMetadata(StreamInput in) throws IOException { this.shardId = new ShardId(in); this.metadataSnapshot = new Store.MetadataSnapshot(in); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - this.peerRecoveryRetentionLeases = in.readList(RetentionLease::new); - } else { - this.peerRecoveryRetentionLeases = Collections.emptyList(); - } + this.peerRecoveryRetentionLeases = in.readList(RetentionLease::new); } @Override public void writeTo(StreamOutput out) throws IOException { shardId.writeTo(out); metadataSnapshot.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - out.writeList(peerRecoveryRetentionLeases); - } + out.writeList(peerRecoveryRetentionLeases); } public ShardId shardId() { diff --git a/server/src/main/java/org/opensearch/repositories/FilterRepository.java b/server/src/main/java/org/opensearch/repositories/FilterRepository.java index aaa021a0e8b93..a6a649fa2cd44 100644 --- a/server/src/main/java/org/opensearch/repositories/FilterRepository.java +++ b/server/src/main/java/org/opensearch/repositories/FilterRepository.java @@ -52,7 +52,6 @@ import java.io.IOException; import java.util.Collection; -import java.util.List; import java.util.Map; import java.util.function.Consumer; import java.util.function.Function; @@ -95,11 +94,6 @@ public void getRepositoryData(ActionListener listener) { in.getRepositoryData(listener); } - @Override - public void initializeSnapshot(SnapshotId snapshotId, List indices, Metadata metadata) { - in.initializeSnapshot(snapshotId, indices, metadata); - } - @Override public void finalizeSnapshot( ShardGenerations shardGenerations, diff --git a/server/src/main/java/org/opensearch/repositories/Repository.java b/server/src/main/java/org/opensearch/repositories/Repository.java index a16e0e8d441bc..1826fe1aa51da 100644 --- a/server/src/main/java/org/opensearch/repositories/Repository.java +++ b/server/src/main/java/org/opensearch/repositories/Repository.java @@ -53,7 +53,6 @@ import java.io.IOException; import java.util.Collection; -import java.util.List; import java.util.Map; import java.util.function.Consumer; import java.util.function.Function; @@ -129,19 +128,6 @@ default Repository create(RepositoryMetadata metadata, Function listener); - /** - * Starts snapshotting process - * - * @param snapshotId snapshot id - * @param indices list of indices to be snapshotted - * @param metadata cluster metadata - * - * @deprecated this method is only used when taking snapshots in a mixed version cluster where a cluster-manager node older than - * {@link org.opensearch.snapshots.SnapshotsService#NO_REPO_INITIALIZE_VERSION} is present. - */ - @Deprecated - void initializeSnapshot(SnapshotId snapshotId, List indices, Metadata metadata); - /** * Finalizes snapshotting process *

diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index c36d92abcf498..bf06191bdc8d3 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -123,7 +123,6 @@ import org.opensearch.repositories.RepositoryVerificationException; import org.opensearch.repositories.ShardGenerations; import org.opensearch.snapshots.AbortedSnapshotException; -import org.opensearch.snapshots.SnapshotCreationException; import org.opensearch.snapshots.SnapshotException; import org.opensearch.snapshots.SnapshotId; import org.opensearch.snapshots.SnapshotInfo; @@ -713,21 +712,6 @@ public RepositoryStats stats() { return new RepositoryStats(store.stats()); } - @Override - public void initializeSnapshot(SnapshotId snapshotId, List indices, Metadata clusterMetadata) { - try { - // Write Global Metadata - GLOBAL_METADATA_FORMAT.write(clusterMetadata, blobContainer(), snapshotId.getUUID(), compress); - - // write the index metadata for each index in the snapshot - for (IndexId index : indices) { - INDEX_METADATA_FORMAT.write(clusterMetadata.index(index.getName()), indexContainer(index), snapshotId.getUUID(), compress); - } - } catch (IOException ex) { - throw new SnapshotCreationException(metadata.name(), snapshotId, ex); - } - } - @Override public void deleteSnapshots( Collection snapshotIds, diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/package-info.java b/server/src/main/java/org/opensearch/repositories/blobstore/package-info.java index aacd386cd4bd7..b13a63ef77f6a 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/package-info.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/package-info.java @@ -163,23 +163,9 @@ * *

Creating a snapshot in the repository happens in the three steps described in detail below.

* - *

Initializing a Snapshot in the Repository (Mixed Version Clusters only)

- * - *

In mixed version clusters that contain a node older than - * {@link org.opensearch.snapshots.SnapshotsService#NO_REPO_INITIALIZE_VERSION}, creating a snapshot in the repository starts with a - * call to {@link org.opensearch.repositories.Repository#initializeSnapshot} which the blob store repository implements via the - * following actions:

- *
    - *
  1. Verify that no snapshot by the requested name exists.
  2. - *
  3. Write a blob containing the cluster metadata to the root of the blob store repository at {@code /meta-${snapshot-uuid}.dat}
  4. - *
  5. Write the metadata for each index to a blob in that index's directory at - * {@code /indices/${index-snapshot-uuid}/meta-${snapshot-uuid}.dat}
  6. - *
- * TODO: Remove this section once BwC logic it references is removed - * *

Writing Shard Data (Segments)

* - *

Once all the metadata has been written by the snapshot initialization, the snapshot process moves on to writing the actual shard data + *

The snapshot process writes the actual shard data * to the repository by invoking {@link org.opensearch.repositories.Repository#snapshotShard} on the data-nodes that hold the primaries * for the shards in the current snapshot. It is implemented as follows:

* diff --git a/server/src/main/java/org/opensearch/rest/action/document/RestIndexAction.java b/server/src/main/java/org/opensearch/rest/action/document/RestIndexAction.java index e7b1da91aba8f..bd2c11cf71ff1 100644 --- a/server/src/main/java/org/opensearch/rest/action/document/RestIndexAction.java +++ b/server/src/main/java/org/opensearch/rest/action/document/RestIndexAction.java @@ -32,7 +32,6 @@ package org.opensearch.rest.action.document; -import org.opensearch.LegacyESVersion; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.support.ActiveShardCount; @@ -128,7 +127,7 @@ public List routes() { @Override public RestChannelConsumer prepareRequest(RestRequest request, final NodeClient client) throws IOException { assert request.params().get("id") == null : "non-null id: " + request.params().get("id"); - if (request.params().get("op_type") == null && nodesInCluster.get().getMinNodeVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { + if (request.params().get("op_type") == null) { // default to op_type create request.params().put("op_type", "create"); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceParserHelper.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceParserHelper.java index d8526e684f391..6ca64c2186cb8 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceParserHelper.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceParserHelper.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.bucket.composite; -import org.opensearch.LegacyESVersion; import org.opensearch.common.ParseField; import org.opensearch.common.ParsingException; import org.opensearch.common.io.stream.StreamInput; @@ -103,15 +102,6 @@ public static void writeTo(CompositeValuesSourceBuilder builder, StreamOutput aggregationType = BUILDER_CLASS_TO_AGGREGATION_TYPE.get(builder.getClass()); if (BUILDER_CLASS_TO_BYTE_CODE.containsKey(builder.getClass())) { code = BUILDER_CLASS_TO_BYTE_CODE.get(builder.getClass()); - if (code == 3 && out.getVersion().before(LegacyESVersion.V_7_5_0)) { - throw new IOException( - "Attempting to serialize [" - + builder.getClass().getSimpleName() - + "] to a node with unsupported version [" - + out.getVersion() - + "]" - ); - } } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregationBuilder.java index 33c09e04bd4b0..501f1af63b3d9 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregationBuilder.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.LegacyESVersion; import org.opensearch.common.ParseField; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; @@ -114,11 +113,7 @@ public MovFnPipelineAggregationBuilder(StreamInput in) throws IOException { format = in.readOptionalString(); gapPolicy = GapPolicy.readFrom(in); window = in.readInt(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - shift = in.readInt(); - } else { - shift = 0; - } + shift = in.readInt(); } @Override @@ -128,9 +123,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeOptionalString(format); gapPolicy.writeTo(out); out.writeInt(window); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - out.writeInt(shift); - } + out.writeInt(shift); } /** diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java index 70652e7ddce44..7b20a796b8134 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.script.Script; @@ -106,11 +105,7 @@ public MovFnPipelineAggregator(StreamInput in) throws IOException { gapPolicy = BucketHelpers.GapPolicy.readFrom(in); bucketsPath = in.readString(); window = in.readInt(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - shift = in.readInt(); - } else { - shift = 0; - } + shift = in.readInt(); } @Override @@ -120,9 +115,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { gapPolicy.writeTo(out); out.writeString(bucketsPath); out.writeInt(window); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - out.writeInt(shift); - } + out.writeInt(shift); } @Override diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java index 4f672c9813d64..e53c2889f88e6 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java @@ -90,7 +90,6 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.index.Index; import org.opensearch.index.shard.ShardId; import org.opensearch.repositories.IndexId; @@ -142,12 +141,6 @@ */ public class SnapshotsService extends AbstractLifecycleComponent implements ClusterStateApplier { - /** - * Minimum node version which does not use {@link Repository#initializeSnapshot(SnapshotId, List, Metadata)} to write snapshot metadata - * when starting a snapshot. - */ - public static final Version NO_REPO_INITIALIZE_VERSION = LegacyESVersion.V_7_5_0; - public static final Version FULL_CONCURRENCY_VERSION = LegacyESVersion.V_7_9_0; public static final Version CLONE_SNAPSHOT_VERSION = LegacyESVersion.V_7_10_0; @@ -156,7 +149,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus public static final Version INDEX_GEN_IN_REPO_DATA_VERSION = LegacyESVersion.V_7_9_0; - public static final Version OLD_SNAPSHOT_FORMAT = LegacyESVersion.V_7_5_0; + public static final Version OLD_SNAPSHOT_FORMAT = LegacyESVersion.fromId(7050099); public static final Version MULTI_DELETE_VERSION = LegacyESVersion.V_7_8_0; @@ -244,144 +237,6 @@ public SnapshotsService( } } - /** - * Same as {@link #createSnapshot(CreateSnapshotRequest, ActionListener)} but invokes its callback on completion of - * the snapshot. - * Note: This method is only used in clusters that contain a node older than {@link #NO_REPO_INITIALIZE_VERSION} to ensure a backwards - * compatible path for initializing the snapshot in the repository is executed. - * - * @param request snapshot request - * @param listener snapshot completion listener - */ - public void executeSnapshotLegacy(final CreateSnapshotRequest request, final ActionListener listener) { - createSnapshotLegacy( - request, - ActionListener.wrap(snapshot -> addListener(snapshot, ActionListener.map(listener, Tuple::v2)), listener::onFailure) - ); - } - - /** - * Initializes the snapshotting process. - *

- * This method is used by clients to start snapshot. It makes sure that there is no snapshots are currently running and - * creates a snapshot record in cluster state metadata. - * Note: This method is only used in clusters that contain a node older than {@link #NO_REPO_INITIALIZE_VERSION} to ensure a backwards - * compatible path for initializing the snapshot in the repository is executed. - * - * @param request snapshot request - * @param listener snapshot creation listener - */ - public void createSnapshotLegacy(final CreateSnapshotRequest request, final ActionListener listener) { - final String repositoryName = request.repository(); - final String snapshotName = indexNameExpressionResolver.resolveDateMathExpression(request.snapshot()); - validate(repositoryName, snapshotName); - final SnapshotId snapshotId = new SnapshotId(snapshotName, UUIDs.randomBase64UUID()); // new UUID for the snapshot - Repository repository = repositoriesService.repository(request.repository()); - final Map userMeta = repository.adaptUserMetadata(request.userMetadata()); - clusterService.submitStateUpdateTask("create_snapshot [" + snapshotName + ']', new ClusterStateUpdateTask() { - - private List indices; - - private SnapshotsInProgress.Entry newEntry; - - @Override - public ClusterState execute(ClusterState currentState) { - validate(repositoryName, snapshotName, currentState); - SnapshotDeletionsInProgress deletionsInProgress = currentState.custom(SnapshotDeletionsInProgress.TYPE); - if (deletionsInProgress != null && deletionsInProgress.hasDeletionsInProgress()) { - throw new ConcurrentSnapshotExecutionException( - repositoryName, - snapshotName, - "cannot snapshot while a snapshot deletion is in-progress in [" + deletionsInProgress + "]" - ); - } - final RepositoryCleanupInProgress repositoryCleanupInProgress = currentState.custom(RepositoryCleanupInProgress.TYPE); - if (repositoryCleanupInProgress != null && repositoryCleanupInProgress.hasCleanupInProgress()) { - throw new ConcurrentSnapshotExecutionException( - repositoryName, - snapshotName, - "cannot snapshot while a repository cleanup is in-progress in [" + repositoryCleanupInProgress + "]" - ); - } - SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); - // Fail if there are any concurrently running snapshots. The only exception to this being a snapshot in INIT state from a - // previous cluster-manager that we can simply ignore and remove from the cluster state because we would clean it up from - // the cluster state anyway in #applyClusterState. - if (snapshots != null - && snapshots.entries() - .stream() - .anyMatch( - entry -> (entry.state() == State.INIT && initializingSnapshots.contains(entry.snapshot()) == false) == false - )) { - throw new ConcurrentSnapshotExecutionException(repositoryName, snapshotName, " a snapshot is already running"); - } - // Store newSnapshot here to be processed in clusterStateProcessed - indices = Arrays.asList(indexNameExpressionResolver.concreteIndexNames(currentState, request)); - - final List dataStreams = indexNameExpressionResolver.dataStreamNames( - currentState, - request.indicesOptions(), - request.indices() - ); - - logger.trace("[{}][{}] creating snapshot for indices [{}]", repositoryName, snapshotName, indices); - newEntry = new SnapshotsInProgress.Entry( - new Snapshot(repositoryName, snapshotId), - request.includeGlobalState(), - request.partial(), - State.INIT, - Collections.emptyList(), // We'll resolve the list of indices when moving to the STARTED state in #beginSnapshot - dataStreams, - threadPool.absoluteTimeInMillis(), - RepositoryData.UNKNOWN_REPO_GEN, - ImmutableOpenMap.of(), - userMeta, - Version.CURRENT - ); - initializingSnapshots.add(newEntry.snapshot()); - snapshots = SnapshotsInProgress.of(Collections.singletonList(newEntry)); - return ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, snapshots).build(); - } - - @Override - public void onFailure(String source, Exception e) { - logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to create snapshot", repositoryName, snapshotName), e); - if (newEntry != null) { - initializingSnapshots.remove(newEntry.snapshot()); - } - newEntry = null; - listener.onFailure(e); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, final ClusterState newState) { - if (newEntry != null) { - final Snapshot current = newEntry.snapshot(); - assert initializingSnapshots.contains(current); - assert indices != null; - beginSnapshot(newState, newEntry, request.partial(), indices, repository, new ActionListener() { - @Override - public void onResponse(final Snapshot snapshot) { - initializingSnapshots.remove(snapshot); - listener.onResponse(snapshot); - } - - @Override - public void onFailure(final Exception e) { - initializingSnapshots.remove(current); - listener.onFailure(e); - } - }); - } - } - - @Override - public TimeValue timeout() { - return request.clusterManagerNodeTimeout(); - } - }); - } - /** * Same as {@link #createSnapshot(CreateSnapshotRequest, ActionListener)} but invokes its callback on completion of * the snapshot. @@ -946,227 +801,6 @@ private static void validate(final String repositoryName, final String snapshotN } } - /** - * Starts snapshot. - *

- * Creates snapshot in repository and updates snapshot metadata record with list of shards that needs to be processed. - * Note: This method is only used in clusters that contain a node older than {@link #NO_REPO_INITIALIZE_VERSION} to ensure a backwards - * compatible path for initializing the snapshot in the repository is executed. - * - * @param clusterState cluster state - * @param snapshot snapshot meta data - * @param partial allow partial snapshots - * @param userCreateSnapshotListener listener - */ - private void beginSnapshot( - final ClusterState clusterState, - final SnapshotsInProgress.Entry snapshot, - final boolean partial, - final List indices, - final Repository repository, - final ActionListener userCreateSnapshotListener - ) { - threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(new AbstractRunnable() { - - boolean hadAbortedInitializations; - - @Override - protected void doRun() { - assert initializingSnapshots.contains(snapshot.snapshot()); - if (repository.isReadOnly()) { - throw new RepositoryException(repository.getMetadata().name(), "cannot create snapshot in a readonly repository"); - } - final String snapshotName = snapshot.snapshot().getSnapshotId().getName(); - final StepListener repositoryDataListener = new StepListener<>(); - repository.getRepositoryData(repositoryDataListener); - repositoryDataListener.whenComplete(repositoryData -> { - // check if the snapshot name already exists in the repository - if (repositoryData.getSnapshotIds().stream().anyMatch(s -> s.getName().equals(snapshotName))) { - throw new InvalidSnapshotNameException( - repository.getMetadata().name(), - snapshotName, - "snapshot with the same name already exists" - ); - } - if (clusterState.nodes().getMinNodeVersion().onOrAfter(NO_REPO_INITIALIZE_VERSION) == false) { - // In mixed version clusters we initialize the snapshot in the repository so that in case of a cluster-manager - // failover to an - // older version cluster-manager node snapshot finalization (that assumes initializeSnapshot was called) produces a - // valid - // snapshot. - repository.initializeSnapshot( - snapshot.snapshot().getSnapshotId(), - snapshot.indices(), - metadataForSnapshot(snapshot, clusterState.metadata()) - ); - } - - logger.info("snapshot [{}] started", snapshot.snapshot()); - final Version version = minCompatibleVersion(clusterState.nodes().getMinNodeVersion(), repositoryData, null); - if (indices.isEmpty()) { - // No indices in this snapshot - we are done - userCreateSnapshotListener.onResponse(snapshot.snapshot()); - endSnapshot( - SnapshotsInProgress.startedEntry( - snapshot.snapshot(), - snapshot.includeGlobalState(), - snapshot.partial(), - Collections.emptyList(), - Collections.emptyList(), - threadPool.absoluteTimeInMillis(), - repositoryData.getGenId(), - ImmutableOpenMap.of(), - snapshot.userMetadata(), - version - ), - clusterState.metadata(), - repositoryData - ); - return; - } - clusterService.submitStateUpdateTask("update_snapshot [" + snapshot.snapshot() + "]", new ClusterStateUpdateTask() { - - @Override - public ClusterState execute(ClusterState currentState) { - SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); - List entries = new ArrayList<>(); - for (SnapshotsInProgress.Entry entry : snapshots.entries()) { - if (entry.snapshot().equals(snapshot.snapshot()) == false) { - entries.add(entry); - continue; - } - - if (entry.state() == State.ABORTED) { - entries.add(entry); - assert entry.shards().isEmpty(); - hadAbortedInitializations = true; - } else { - final List indexIds = repositoryData.resolveNewIndices(indices, Collections.emptyMap()); - // Replace the snapshot that was just initialized - ImmutableOpenMap shards = shards( - snapshots, - currentState.custom(SnapshotDeletionsInProgress.TYPE, SnapshotDeletionsInProgress.EMPTY), - currentState.metadata(), - currentState.routingTable(), - indexIds, - useShardGenerations(version), - repositoryData, - entry.repository() - ); - if (!partial) { - Tuple, Set> indicesWithMissingShards = indicesWithMissingShards( - shards, - currentState.metadata() - ); - Set missing = indicesWithMissingShards.v1(); - Set closed = indicesWithMissingShards.v2(); - if (missing.isEmpty() == false || closed.isEmpty() == false) { - final StringBuilder failureMessage = new StringBuilder(); - if (missing.isEmpty() == false) { - failureMessage.append("Indices don't have primary shards "); - failureMessage.append(missing); - } - if (closed.isEmpty() == false) { - if (failureMessage.length() > 0) { - failureMessage.append("; "); - } - failureMessage.append("Indices are closed "); - failureMessage.append(closed); - } - entries.add( - new SnapshotsInProgress.Entry( - entry, - State.FAILED, - indexIds, - repositoryData.getGenId(), - shards, - version, - failureMessage.toString() - ) - ); - continue; - } - } - entries.add( - new SnapshotsInProgress.Entry( - entry, - State.STARTED, - indexIds, - repositoryData.getGenId(), - shards, - version, - null - ) - ); - } - } - return ClusterState.builder(currentState) - .putCustom(SnapshotsInProgress.TYPE, SnapshotsInProgress.of(unmodifiableList(entries))) - .build(); - } - - @Override - public void onFailure(String source, Exception e) { - logger.warn( - () -> new ParameterizedMessage("[{}] failed to create snapshot", snapshot.snapshot().getSnapshotId()), - e - ); - removeFailedSnapshotFromClusterState( - snapshot.snapshot(), - e, - null, - new CleanupAfterErrorListener(userCreateSnapshotListener, e) - ); - } - - @Override - public void onNoLongerClusterManager(String source) { - // We are not longer a cluster-manager - we shouldn't try to do any cleanup - // The new cluster-manager will take care of it - logger.warn( - "[{}] failed to create snapshot - no longer a cluster-manager", - snapshot.snapshot().getSnapshotId() - ); - userCreateSnapshotListener.onFailure( - new SnapshotException(snapshot.snapshot(), "cluster-manager changed during snapshot initialization") - ); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - // The userCreateSnapshotListener.onResponse() notifies caller that the snapshot was accepted - // for processing. If client wants to wait for the snapshot completion, it can register snapshot - // completion listener in this method. For the snapshot completion to work properly, the snapshot - // should still exist when listener is registered. - userCreateSnapshotListener.onResponse(snapshot.snapshot()); - - if (hadAbortedInitializations) { - final SnapshotsInProgress snapshotsInProgress = newState.custom(SnapshotsInProgress.TYPE); - assert snapshotsInProgress != null; - final SnapshotsInProgress.Entry entry = snapshotsInProgress.snapshot(snapshot.snapshot()); - assert entry != null; - endSnapshot(entry, newState.metadata(), repositoryData); - } else { - endCompletedSnapshots(newState); - } - } - }); - }, this::onFailure); - } - - @Override - public void onFailure(Exception e) { - logger.warn(() -> new ParameterizedMessage("failed to create snapshot [{}]", snapshot.snapshot().getSnapshotId()), e); - removeFailedSnapshotFromClusterState( - snapshot.snapshot(), - e, - null, - new CleanupAfterErrorListener(userCreateSnapshotListener, e) - ); - } - }); - } - private static class CleanupAfterErrorListener { private final ActionListener userCreateSnapshotListener; diff --git a/server/src/test/java/org/opensearch/index/query/GeoShapeQueryBuilderGeoShapeTests.java b/server/src/test/java/org/opensearch/index/query/GeoShapeQueryBuilderGeoShapeTests.java index f588767d5336d..f8f512e5aefc6 100644 --- a/server/src/test/java/org/opensearch/index/query/GeoShapeQueryBuilderGeoShapeTests.java +++ b/server/src/test/java/org/opensearch/index/query/GeoShapeQueryBuilderGeoShapeTests.java @@ -31,7 +31,6 @@ package org.opensearch.index.query; -import org.opensearch.LegacyESVersion; import org.opensearch.common.geo.ShapeRelation; import org.opensearch.common.geo.builders.ShapeBuilder; import org.opensearch.test.geo.RandomShapeGenerator; @@ -73,21 +72,12 @@ protected GeoShapeQueryBuilder doCreateTestQueryBuilder(boolean indexedShape) { } } if (randomBoolean()) { - QueryShardContext context = createShardContext(); - if (context.indexVersionCreated().onOrAfter(LegacyESVersion.V_7_5_0)) { // CONTAINS is only supported from version 7.5 - if (shapeType == RandomShapeGenerator.ShapeType.LINESTRING || shapeType == RandomShapeGenerator.ShapeType.MULTILINESTRING) { - builder.relation(randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.CONTAINS)); - } else { - builder.relation( - randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.WITHIN, ShapeRelation.CONTAINS) - ); - } + if (shapeType == RandomShapeGenerator.ShapeType.LINESTRING || shapeType == RandomShapeGenerator.ShapeType.MULTILINESTRING) { + builder.relation(randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.CONTAINS)); } else { - if (shapeType == RandomShapeGenerator.ShapeType.LINESTRING || shapeType == RandomShapeGenerator.ShapeType.MULTILINESTRING) { - builder.relation(randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS)); - } else { - builder.relation(randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.WITHIN)); - } + builder.relation( + randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.WITHIN, ShapeRelation.CONTAINS) + ); } } diff --git a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java index 6a8999a205be2..da44643de98a5 100644 --- a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java @@ -246,11 +246,6 @@ public void getRepositoryData(ActionListener listener) { listener.onResponse(null); } - @Override - public void initializeSnapshot(SnapshotId snapshotId, List indices, Metadata metadata) { - - } - @Override public void finalizeSnapshot( ShardGenerations shardGenerations, diff --git a/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java b/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java index 5ed85fedc8cea..2a85fffa8699a 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java @@ -55,7 +55,6 @@ import java.util.Collection; import java.util.Collections; -import java.util.List; import java.util.Map; import java.util.function.Consumer; import java.util.function.Function; @@ -116,9 +115,6 @@ public void getRepositoryData(ActionListener listener) { ); } - @Override - public void initializeSnapshot(SnapshotId snapshotId, List indices, Metadata metadata) {} - @Override public void finalizeSnapshot( ShardGenerations shardGenerations, diff --git a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java index f2b68b6fdaca0..bbf7763551bcd 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java @@ -531,8 +531,8 @@ protected boolean waitForAllSnapshotsWiped() { private void wipeCluster() throws Exception { // Clean up SLM policies before trying to wipe snapshots so that no new ones get started by SLM after wiping - if (nodeVersions.first().onOrAfter(LegacyESVersion.V_7_4_0) && nodeVersions.first().before(Version.V_1_0_0)) { // SLM was introduced - // in version 7.4 + if (nodeVersions.first().before(Version.V_1_0_0)) { // SLM was introduced + // in version 7.4 if (preserveSLMPoliciesUponCompletion() == false) { // Clean up SLM policies before trying to wipe snapshots so that no new ones get started by SLM after wiping deleteAllSLMPolicies(); From e207dfcfa1daeac509971bc5ed94f297137592a9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Oct 2022 13:55:00 -0700 Subject: [PATCH 09/39] Bump protobuf-java from 3.21.2 to 3.21.7 in /test/fixtures/hdfs-fixture (#4727) * Bump protobuf-java from 3.21.2 to 3.21.7 in /test/fixtures/hdfs-fixture Bumps [protobuf-java](https://github.com/protocolbuffers/protobuf) from 3.21.2 to 3.21.7. - [Release notes](https://github.com/protocolbuffers/protobuf/releases) - [Changelog](https://github.com/protocolbuffers/protobuf/blob/main/generate_changelog.py) - [Commits](https://github.com/protocolbuffers/protobuf/compare/v3.21.2...v3.21.7) --- updated-dependencies: - dependency-name: com.google.protobuf:protobuf-java dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 3 ++- test/fixtures/hdfs-fixture/build.gradle | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 37e16aecff69a..34dcef6077b85 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ## [Unreleased] ### Dependencies - Bumps `gson` from 2.9.0 to 2.9.1 +- Bumps `protobuf-java` from 3.21.2 to 3.21.7 ### Added @@ -162,4 +163,4 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Security [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD -[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x +[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x \ No newline at end of file diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 2e541572b7385..d01db6376db42 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -46,5 +46,5 @@ dependencies { api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api 'net.minidev:json-smart:2.4.8' api "org.mockito:mockito-core:${versions.mockito}" - api "com.google.protobuf:protobuf-java:3.21.2" + api "com.google.protobuf:protobuf-java:3.21.7" } From 9f415ebe87c1e26cde4f59da8fc93764c0ffa27d Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Mon, 10 Oct 2022 22:00:37 -0500 Subject: [PATCH 10/39] [Refactor] Base Action class javadocs to OpenSearch.API (#4732) Refactors base action classes implemented by external plugins through ActionPlugin from opensearch.internal to opensearch.api to signal extensibility to third-party developers. Signed-off-by: Nicholas Walter Knize --- CHANGELOG.md | 3 ++- server/src/main/java/org/opensearch/action/ActionFuture.java | 2 +- .../src/main/java/org/opensearch/action/ActionListener.java | 2 +- .../org/opensearch/action/ActionListenerResponseHandler.java | 2 +- server/src/main/java/org/opensearch/action/ActionRequest.java | 4 ++-- .../main/java/org/opensearch/action/ActionRequestBuilder.java | 2 +- .../opensearch/action/ActionRequestValidationException.java | 4 ++-- .../src/main/java/org/opensearch/action/ActionResponse.java | 4 ++-- .../src/main/java/org/opensearch/action/ActionRunnable.java | 2 +- server/src/main/java/org/opensearch/action/ActionType.java | 2 +- 10 files changed, 14 insertions(+), 13 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 34dcef6077b85..75fe1e844fa74 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -79,6 +79,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Update to Apache Lucene 9.4.0 ([#4661](https://github.com/opensearch-project/OpenSearch/pull/4661)) - Controlling discovery for decommissioned nodes ([#4590](https://github.com/opensearch-project/OpenSearch/pull/4590)) - Backport Apache Lucene version change for 2.4.0 ([#4677](https://github.com/opensearch-project/OpenSearch/pull/4677)) +- Refactor Base Action class javadocs to OpenSearch.API ([#4732](https://github.com/opensearch-project/OpenSearch/pull/4732)) ### Deprecated @@ -163,4 +164,4 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Security [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD -[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x \ No newline at end of file +[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x diff --git a/server/src/main/java/org/opensearch/action/ActionFuture.java b/server/src/main/java/org/opensearch/action/ActionFuture.java index 77b748f50bfbf..d796180eda021 100644 --- a/server/src/main/java/org/opensearch/action/ActionFuture.java +++ b/server/src/main/java/org/opensearch/action/ActionFuture.java @@ -40,7 +40,7 @@ /** * An extension to {@link Future} allowing for simplified "get" operations. * - * @opensearch.internal + * @opensearch.api */ public interface ActionFuture extends Future { diff --git a/server/src/main/java/org/opensearch/action/ActionListener.java b/server/src/main/java/org/opensearch/action/ActionListener.java index 8f632449c7d91..645ed4deec006 100644 --- a/server/src/main/java/org/opensearch/action/ActionListener.java +++ b/server/src/main/java/org/opensearch/action/ActionListener.java @@ -46,7 +46,7 @@ /** * A listener for action responses or failures. * - * @opensearch.internal + * @opensearch.api */ public interface ActionListener { /** diff --git a/server/src/main/java/org/opensearch/action/ActionListenerResponseHandler.java b/server/src/main/java/org/opensearch/action/ActionListenerResponseHandler.java index 26e9ba8621c53..af8fde4c9893c 100644 --- a/server/src/main/java/org/opensearch/action/ActionListenerResponseHandler.java +++ b/server/src/main/java/org/opensearch/action/ActionListenerResponseHandler.java @@ -46,7 +46,7 @@ * A simple base class for action response listeners, defaulting to using the SAME executor (as its * very common on response handlers). * - * @opensearch.internal + * @opensearch.api */ public class ActionListenerResponseHandler implements TransportResponseHandler { diff --git a/server/src/main/java/org/opensearch/action/ActionRequest.java b/server/src/main/java/org/opensearch/action/ActionRequest.java index c6d8eb9f273d6..a6879dd98691a 100644 --- a/server/src/main/java/org/opensearch/action/ActionRequest.java +++ b/server/src/main/java/org/opensearch/action/ActionRequest.java @@ -39,9 +39,9 @@ import java.io.IOException; /** - * Base action request + * Base action request implemented by plugins. * - * @opensearch.internal + * @opensearch.api */ public abstract class ActionRequest extends TransportRequest { diff --git a/server/src/main/java/org/opensearch/action/ActionRequestBuilder.java b/server/src/main/java/org/opensearch/action/ActionRequestBuilder.java index d1fddb076b350..27358a0412468 100644 --- a/server/src/main/java/org/opensearch/action/ActionRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/ActionRequestBuilder.java @@ -40,7 +40,7 @@ /** * Base Action Request Builder * - * @opensearch.internal + * @opensearch.api */ public abstract class ActionRequestBuilder { diff --git a/server/src/main/java/org/opensearch/action/ActionRequestValidationException.java b/server/src/main/java/org/opensearch/action/ActionRequestValidationException.java index d7da932c4dfc2..ffba4d2eb50c0 100644 --- a/server/src/main/java/org/opensearch/action/ActionRequestValidationException.java +++ b/server/src/main/java/org/opensearch/action/ActionRequestValidationException.java @@ -35,8 +35,8 @@ import org.opensearch.common.ValidationException; /** - * Base exception for an action request validation + * Base exception for an action request validation extendable by plugins * - * @opensearch.internal + * @opensearch.api */ public class ActionRequestValidationException extends ValidationException {} diff --git a/server/src/main/java/org/opensearch/action/ActionResponse.java b/server/src/main/java/org/opensearch/action/ActionResponse.java index c72fc87ccfaf8..ab0544365c0b9 100644 --- a/server/src/main/java/org/opensearch/action/ActionResponse.java +++ b/server/src/main/java/org/opensearch/action/ActionResponse.java @@ -38,9 +38,9 @@ import java.io.IOException; /** - * Base class for responses to action requests. + * Base class for responses to action requests implemented by plugins. * - * @opensearch.internal + * @opensearch.api */ public abstract class ActionResponse extends TransportResponse { diff --git a/server/src/main/java/org/opensearch/action/ActionRunnable.java b/server/src/main/java/org/opensearch/action/ActionRunnable.java index c718b33bd404a..2c3f70afda75d 100644 --- a/server/src/main/java/org/opensearch/action/ActionRunnable.java +++ b/server/src/main/java/org/opensearch/action/ActionRunnable.java @@ -41,7 +41,7 @@ * Base class for {@link Runnable}s that need to call {@link ActionListener#onFailure(Exception)} in case an uncaught * exception or error is thrown while the actual action is run. * - * @opensearch.internal + * @opensearch.api */ public abstract class ActionRunnable extends AbstractRunnable { diff --git a/server/src/main/java/org/opensearch/action/ActionType.java b/server/src/main/java/org/opensearch/action/ActionType.java index 9c17061990abe..c22cddd6fad71 100644 --- a/server/src/main/java/org/opensearch/action/ActionType.java +++ b/server/src/main/java/org/opensearch/action/ActionType.java @@ -39,7 +39,7 @@ /** * A generic action. Should strive to make it a singleton. * - * @opensearch.internal + * @opensearch.api */ public class ActionType { From 21d89b28839d26068dceb162fb92a4188ed0b67c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 11 Oct 2022 07:07:50 -0700 Subject: [PATCH 11/39] Bump azure-core from 1.31.0 to 1.33.0 in /plugins/repository-azure (#4726) * Bump azure-core from 1.31.0 to 1.33.0 in /plugins/repository-azure Bumps [azure-core](https://github.com/Azure/azure-sdk-for-java) from 1.31.0 to 1.33.0. - [Release notes](https://github.com/Azure/azure-sdk-for-java/releases) - [Commits](https://github.com/Azure/azure-sdk-for-java/compare/azure-core_1.31.0...azure-core_1.33.0) --- updated-dependencies: - dependency-name: com.azure:azure-core dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + plugins/repository-azure/build.gradle | 2 +- plugins/repository-azure/licenses/azure-core-1.31.0.jar.sha1 | 1 - plugins/repository-azure/licenses/azure-core-1.33.0.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-azure/licenses/azure-core-1.31.0.jar.sha1 create mode 100644 plugins/repository-azure/licenses/azure-core-1.33.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 75fe1e844fa74..d37e38b38fdf9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Dependencies - Bumps `gson` from 2.9.0 to 2.9.1 - Bumps `protobuf-java` from 3.21.2 to 3.21.7 +- Bumps `azure-core` from 1.31.0 to 1.33.0 ### Added diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 8ca151d1e90db..6be815e4a0f46 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -44,7 +44,7 @@ opensearchplugin { } dependencies { - api 'com.azure:azure-core:1.31.0' + api 'com.azure:azure-core:1.33.0' api 'com.azure:azure-storage-common:12.18.1' api 'com.azure:azure-core-http-netty:1.12.4' api "io.netty:netty-codec-dns:${versions.netty}" diff --git a/plugins/repository-azure/licenses/azure-core-1.31.0.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.31.0.jar.sha1 deleted file mode 100644 index 6a5076b3da301..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-1.31.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -39f18dae02237f90f1cd23b56701d7f9d9525531 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-1.33.0.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.33.0.jar.sha1 new file mode 100644 index 0000000000000..9077fc4ebf84b --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-1.33.0.jar.sha1 @@ -0,0 +1 @@ +93f105c2e923f0ab90521cc0e6e729b9c8304ad8 \ No newline at end of file From cd8f0fadd0cea5035a325a647c161e17ff5d7384 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 11 Oct 2022 08:46:57 -0700 Subject: [PATCH 12/39] Bump avro from 1.11.0 to 1.11.1 in /plugins/repository-hdfs (#4076) * Bump avro from 1.11.0 to 1.11.1 in /plugins/repository-hdfs Bumps avro from 1.11.0 to 1.11.1. --- updated-dependencies: - dependency-name: org.apache.avro:avro dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 3 ++- plugins/repository-hdfs/build.gradle | 2 +- plugins/repository-hdfs/licenses/avro-1.11.0.jar.sha1 | 1 - plugins/repository-hdfs/licenses/avro-1.11.1.jar.sha1 | 1 + 4 files changed, 4 insertions(+), 3 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/avro-1.11.0.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/avro-1.11.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index d37e38b38fdf9..273c7d2eb088c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Bumps `gson` from 2.9.0 to 2.9.1 - Bumps `protobuf-java` from 3.21.2 to 3.21.7 - Bumps `azure-core` from 1.31.0 to 1.33.0 +- Bumps `avro` from 1.11.0 to 1.11.1 ### Added @@ -165,4 +166,4 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Security [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD -[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x +[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x \ No newline at end of file diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 6fd91f78a63e6..3cdac4c2d2560 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -65,7 +65,7 @@ dependencies { } api 'org.apache.htrace:htrace-core4:4.2.0-incubating' api "org.apache.logging.log4j:log4j-core:${versions.log4j}" - api 'org.apache.avro:avro:1.11.0' + api 'org.apache.avro:avro:1.11.1' api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api 'com.google.code.gson:gson:2.9.0' runtimeOnly 'com.google.guava:guava:31.1-jre' diff --git a/plugins/repository-hdfs/licenses/avro-1.11.0.jar.sha1 b/plugins/repository-hdfs/licenses/avro-1.11.0.jar.sha1 deleted file mode 100644 index 9a0601879a1fc..0000000000000 --- a/plugins/repository-hdfs/licenses/avro-1.11.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2b0c58e5b450d4f4931456952ad9520cae9c896c \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/avro-1.11.1.jar.sha1 b/plugins/repository-hdfs/licenses/avro-1.11.1.jar.sha1 new file mode 100644 index 0000000000000..f03424516b44e --- /dev/null +++ b/plugins/repository-hdfs/licenses/avro-1.11.1.jar.sha1 @@ -0,0 +1 @@ +81af5d4b9bdaaf4ba41bcb0df5241355ec34c630 \ No newline at end of file From c1272c181e6bd96d1db725dfbbcd5ac8e6058502 Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Tue, 11 Oct 2022 10:20:34 -0700 Subject: [PATCH 13/39] Add a new node role 'search' which is dedicated to provide search capability (#4689) Signed-off-by: Tianli Feng --- CHANGELOG.md | 1 + .../src/main/java/org/opensearch/client/Node.java | 7 +++++++ .../java/org/opensearch/client/NodeTests.java | 7 +++++++ .../admin/cluster/stats/ClusterStatsIT.java | 2 ++ .../opensearch/cluster/node/DiscoveryNode.java | 9 +++++++++ .../cluster/node/DiscoveryNodeRole.java | 15 ++++++++++++++- .../cluster/node/DiscoveryNodeTests.java | 7 +++++++ 7 files changed, 47 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 273c7d2eb088c..73cf43a765cdc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Added precommit support for MacOS ([#4682](https://github.com/opensearch-project/OpenSearch/pull/4682)) - Recommission API changes for service layer ([#4320](https://github.com/opensearch-project/OpenSearch/pull/4320)) - Update GeoGrid base class access modifier to support extensibility ([#4572](https://github.com/opensearch-project/OpenSearch/pull/4572)) +- Add a new node role 'search' which is dedicated to provide search capability ([#4689](https://github.com/opensearch-project/OpenSearch/pull/4689)) ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 diff --git a/client/rest/src/main/java/org/opensearch/client/Node.java b/client/rest/src/main/java/org/opensearch/client/Node.java index c02ac6c68718f..2fa6605d57ad2 100644 --- a/client/rest/src/main/java/org/opensearch/client/Node.java +++ b/client/rest/src/main/java/org/opensearch/client/Node.java @@ -239,6 +239,13 @@ public boolean isIngest() { return roles.contains("ingest"); } + /** + * Returns whether the node is dedicated to provide search capability. + */ + public boolean isSearch() { + return roles.contains("search"); + } + @Override public String toString() { return String.join(",", roles); diff --git a/client/rest/src/test/java/org/opensearch/client/NodeTests.java b/client/rest/src/test/java/org/opensearch/client/NodeTests.java index 352296fa3024a..296c4a1f09122 100644 --- a/client/rest/src/test/java/org/opensearch/client/NodeTests.java +++ b/client/rest/src/test/java/org/opensearch/client/NodeTests.java @@ -48,7 +48,9 @@ import static java.util.Collections.singletonMap; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; +import static org.hamcrest.CoreMatchers.equalTo; public class NodeTests extends RestClientTestCase { public void testToString() { @@ -161,4 +163,9 @@ public void testEqualsAndHashCode() { ) ); } + + public void testIsSearchNode() { + Roles searchRole = new Roles(Collections.singleton("search")); + assertThat(searchRole.isSearch(), equalTo(true)); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java index a44cf05a4bdc4..11d1af608fbee 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java @@ -89,6 +89,7 @@ public void testNodeCounts() { expectedCounts.put(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE.roleName(), 1); expectedCounts.put(DiscoveryNodeRole.INGEST_ROLE.roleName(), 1); expectedCounts.put(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE.roleName(), 1); + expectedCounts.put(DiscoveryNodeRole.SEARCH_ROLE.roleName(), 0); expectedCounts.put(ClusterStatsNodes.Counts.COORDINATING_ONLY, 0); int numNodes = randomIntBetween(1, 5); @@ -160,6 +161,7 @@ public void testNodeCountsWithDeprecatedMasterRole() { expectedCounts.put(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE.roleName(), 1); expectedCounts.put(DiscoveryNodeRole.INGEST_ROLE.roleName(), 0); expectedCounts.put(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE.roleName(), 0); + expectedCounts.put(DiscoveryNodeRole.SEARCH_ROLE.roleName(), 0); expectedCounts.put(ClusterStatsNodes.Counts.COORDINATING_ONLY, 0); ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get(); diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java index 016c0aac44be6..ec72ea8b49829 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java @@ -460,6 +460,15 @@ public boolean isRemoteClusterClient() { return roles.contains(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE); } + /** + * Returns whether the node is dedicated to provide search capability. + * + * @return true if the node contains search role, false otherwise + */ + public boolean isSearchNode() { + return roles.contains(DiscoveryNodeRole.SEARCH_ROLE); + } + /** * Returns a set of all the roles that the node has. The roles are returned in sorted order by the role name. *

diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java index 5685667c05b1a..bfc44378632d8 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java @@ -290,11 +290,24 @@ public Setting legacySetting() { }; + /** + * Represents the role for a search node, which is dedicated to provide search capability. + */ + public static final DiscoveryNodeRole SEARCH_ROLE = new DiscoveryNodeRole("search", "s", true) { + + @Override + public Setting legacySetting() { + // search role is added in 2.4 so doesn't need to configure legacy setting + return null; + } + + }; + /** * The built-in node roles. */ public static SortedSet BUILT_IN_ROLES = Collections.unmodifiableSortedSet( - new TreeSet<>(Arrays.asList(DATA_ROLE, INGEST_ROLE, CLUSTER_MANAGER_ROLE, REMOTE_CLUSTER_CLIENT_ROLE)) + new TreeSet<>(Arrays.asList(DATA_ROLE, INGEST_ROLE, CLUSTER_MANAGER_ROLE, REMOTE_CLUSTER_CLIENT_ROLE, SEARCH_ROLE)) ); /** diff --git a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java index 70a64fc60bdb4..8c30a8ff19c89 100644 --- a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java +++ b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java @@ -39,6 +39,7 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.transport.TransportAddress; +import org.opensearch.test.NodeRoles; import org.opensearch.test.OpenSearchTestCase; import java.net.InetAddress; @@ -204,4 +205,10 @@ public void testGetRoleFromRoleNameIsCaseInsensitive() { assertEquals(dynamicRoleName.toLowerCase(Locale.ROOT), dynamicNodeRole.roleName()); assertEquals(dynamicRoleName.toLowerCase(Locale.ROOT), dynamicNodeRole.roleNameAbbreviation()); } + + public void testDiscoveryNodeIsSearchNode() { + final Settings settingWithSearchRole = NodeRoles.onlyRole(DiscoveryNodeRole.SEARCH_ROLE); + final DiscoveryNode node = DiscoveryNode.createLocal(settingWithSearchRole, buildNewFakeTransportAddress(), "node"); + assertThat(node.isSearchNode(), equalTo(true)); + } } From 4833e080cc506237fee5aa90767d6f49d952bec3 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 11 Oct 2022 16:59:43 -0400 Subject: [PATCH 14/39] Migrate client transports to Apache HttpClient / Core 5.x (#4459) Signed-off-by: Andriy Redko Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + .../resources/forbidden/http-signatures.txt | 27 +- buildSrc/version.properties | 2 + .../benchmark/rest/RestClientBenchmark.java | 8 +- client/rest-high-level/build.gradle | 6 + .../client/ClusterRequestConverters.java | 8 +- .../client/IndicesRequestConverters.java | 10 +- .../client/IngestRequestConverters.java | 8 +- .../opensearch/client/RequestConverters.java | 26 +- .../client/RestHighLevelClient.java | 6 +- .../client/SnapshotRequestConverters.java | 8 +- .../client/TasksRequestConverters.java | 4 +- .../forbidden/rest-high-level-signatures.txt | 11 +- .../opensearch/client/ClusterClientIT.java | 7 +- .../client/ClusterRequestConvertersTests.java | 4 +- .../CustomRestHighLevelClientTests.java | 23 +- .../HighLevelRestClientCompressionIT.java | 6 +- .../opensearch/client/IndicesClientIT.java | 6 +- .../client/IndicesRequestConvertersTests.java | 10 +- .../client/IngestRequestConvertersTests.java | 8 +- .../client/MockRestHighLevelTests.java | 15 +- ...OpenSearchRestHighLevelClientTestCase.java | 6 +- .../org/opensearch/client/PingAndInfoIT.java | 2 +- .../java/org/opensearch/client/PitIT.java | 4 +- .../client/RequestConvertersTests.java | 44 +- .../client/RestHighLevelClientExtTests.java | 10 +- .../client/RestHighLevelClientTests.java | 112 ++-- .../java/org/opensearch/client/SearchIT.java | 4 +- .../SnapshotRequestConvertersTests.java | 8 +- .../client/TasksRequestConvertersTests.java | 4 +- .../documentation/CRUDDocumentationIT.java | 2 +- .../MiscellaneousDocumentationIT.java | 6 +- client/rest/build.gradle | 15 +- .../licenses/httpasyncclient-4.1.5.jar.sha1 | 1 - .../rest/licenses/httpasyncclient-LICENSE.txt | 182 ------ .../rest/licenses/httpasyncclient-NOTICE.txt | 5 - .../rest/licenses/httpclient-4.5.13.jar.sha1 | 1 - .../rest/licenses/httpclient5-5.1.3.jar.sha1 | 1 + ...nt-LICENSE.txt => httpclient5-LICENSE.txt} | 0 ...ient-NOTICE.txt => httpclient5-NOTICE.txt} | 2 +- client/rest/licenses/httpcore-4.4.15.jar.sha1 | 1 - client/rest/licenses/httpcore-LICENSE.txt | 178 ------ .../licenses/httpcore-nio-4.4.15.jar.sha1 | 1 - client/rest/licenses/httpcore-nio-LICENSE.txt | 202 ------- client/rest/licenses/httpcore5-5.1.4.jar.sha1 | 1 + client/rest/licenses/httpcore5-LICENSE.txt | 558 ++++++++++++++++++ ...re-nio-NOTICE.txt => httpcore5-NOTICE.txt} | 6 +- .../rest/licenses/httpcore5-h2-5.1.4.jar.sha1 | 1 + client/rest/licenses/httpcore5-h2-LICENSE.txt | 558 ++++++++++++++++++ ...ore-NOTICE.txt => httpcore5-h2-NOTICE.txt} | 5 +- .../rest/licenses/slf4j-api-1.7.36.jar.sha1 | 1 + client/rest/licenses/slf4j-api-LICENSE.txt | 21 + client/rest/licenses/slf4j-api-NOTICE.txt | 0 .../org/opensearch/client/Cancellable.java | 28 +- .../client/HasAttributeNodeSelector.java | 14 + .../HeapBufferedAsyncResponseConsumer.java | 125 ---- .../HttpAsyncResponseConsumerFactory.java | 20 +- .../client/HttpDeleteWithEntity.java | 54 -- .../opensearch/client/HttpGetWithEntity.java | 54 -- .../main/java/org/opensearch/client/Node.java | 22 +- ...tentCredentialsAuthenticationStrategy.java | 77 --- .../PreferHasAttributeNodeSelector.java | 14 + .../java/org/opensearch/client/Request.java | 18 +- .../org/opensearch/client/RequestLogger.java | 76 ++- .../org/opensearch/client/RequestOptions.java | 24 +- .../java/org/opensearch/client/Response.java | 28 +- .../opensearch/client/ResponseException.java | 13 +- .../org/opensearch/client/RestClient.java | 302 +++++++--- .../opensearch/client/RestClientBuilder.java | 58 +- .../client/http/HttpUriRequestProducer.java | 63 ++ .../opensearch/client/http/package-info.java | 12 + .../nio/HeapBufferedAsyncEntityConsumer.java | 139 +++++ .../HeapBufferedAsyncResponseConsumer.java | 123 ++++ .../nio/HttpEntityAsyncEntityProducer.java | 182 ++++++ .../opensearch/client/nio/package-info.java | 12 + .../FailureTrackingResponseListenerTests.java | 18 +- .../client/HasAttributeNodeSelectorTests.java | 2 +- ...eapBufferedAsyncResponseConsumerTests.java | 93 ++- .../client/HostsTrackingFailureListener.java | 2 +- .../opensearch/client/NodeSelectorTests.java | 2 +- .../java/org/opensearch/client/NodeTests.java | 2 +- .../PreferHasAttributeNodeSelectorTests.java | 2 +- .../opensearch/client/RequestLoggerTests.java | 69 +-- .../client/RequestOptionsTests.java | 13 +- .../org/opensearch/client/RequestTests.java | 19 +- .../client/ResponseExceptionTests.java | 31 +- .../client/RestClientBuilderIntegTests.java | 5 +- .../client/RestClientBuilderTests.java | 13 +- .../client/RestClientCompressionTests.java | 11 +- .../RestClientGzipCompressionTests.java | 13 +- .../RestClientMultipleHostsIntegTests.java | 8 +- .../client/RestClientMultipleHostsTests.java | 7 +- .../RestClientSingleHostIntegTests.java | 98 +-- .../client/RestClientSingleHostTests.java | 243 ++++---- .../opensearch/client/RestClientTests.java | 17 +- .../RestClientDocumentation.java | 126 ++-- client/sniffer/build.gradle | 5 +- .../licenses/httpclient-4.5.13.jar.sha1 | 1 - .../licenses/httpclient5-5.1.3.jar.sha1 | 1 + .../sniffer/licenses/httpcore-4.4.15.jar.sha1 | 1 - .../sniffer/licenses/httpcore5-5.1.4.jar.sha1 | 1 + .../client/sniff/OpenSearchNodesSniffer.java | 8 +- .../client/sniff/MockNodesSniffer.java | 2 +- .../OpenSearchNodesSnifferParseTests.java | 9 +- .../sniff/OpenSearchNodesSnifferTests.java | 12 +- .../sniff/SniffOnFailureListenerTests.java | 2 +- .../client/sniff/SnifferBuilderTests.java | 3 +- .../opensearch/client/sniff/SnifferTests.java | 2 +- .../documentation/SnifferDocumentation.java | 12 +- client/test/build.gradle | 2 +- .../opensearch/client/RestClientTestCase.java | 3 +- .../opensearch/client/RestClientTestUtil.java | 5 +- .../test/rest/WaitForRefreshAndCloseIT.java | 5 +- .../OpenSearchDashboardsSystemIndexIT.java | 19 +- .../AbstractAsyncBulkByScrollAction.java | 3 +- .../index/reindex/ReindexSslConfig.java | 26 +- .../opensearch/index/reindex/Reindexer.java | 50 +- .../reindex/remote/RemoteRequestBuilders.java | 8 +- .../remote/RemoteScrollableHitSource.java | 17 +- .../spi/ReindexRestInterceptorProvider.java | 3 +- .../remote/RemoteRequestBuildersTests.java | 16 +- .../RemoteScrollableHitSourceTests.java | 191 +++--- .../RepositoryURLClientYamlTestSuiteIT.java | 8 +- .../rest/discovery/Zen2RestApiIT.java | 5 +- .../azure/AzureBlobContainerRetriesTests.java | 4 +- ...CloudStorageBlobContainerRetriesTests.java | 2 +- ...rossClusterSearchUnavailableClusterIT.java | 8 +- .../upgrades/FullClusterRestartIT.java | 23 +- .../upgrades/QueryBuilderBWCIT.java | 5 +- .../org/opensearch/backwards/ExceptionIT.java | 10 +- .../org/opensearch/backwards/IndexingIT.java | 7 +- .../AbstractMultiClusterRemoteTestCase.java | 6 +- .../org/opensearch/upgrades/IndexingIT.java | 7 +- .../org/opensearch/upgrades/RecoveryIT.java | 2 +- .../http/DetailedErrorsDisabledIT.java | 5 +- .../http/DetailedErrorsEnabledIT.java | 5 +- .../opensearch/http/HttpCompressionIT.java | 9 +- .../java/org/opensearch/http/NoHandlerIT.java | 7 +- .../http/RestHttpResponseHeadersIT.java | 2 +- .../http/SearchRestCancellationIT.java | 8 +- .../RestHighLevelClientProducer.java | 6 +- .../org/opensearch/wildfly/WildflyIT.java | 21 +- .../bootstrap/test-framework.policy | 6 + .../client/RestClientBuilderTestCase.java | 3 +- .../AbstractBlobContainerRetriesTestCase.java | 4 +- ...chMockAPIBasedRepositoryIntegTestCase.java | 3 +- .../test/OpenSearchIntegTestCase.java | 5 +- .../test/rest/OpenSearchRestTestCase.java | 47 +- .../rest/yaml/ClientYamlDocsTestClient.java | 4 +- .../test/rest/yaml/ClientYamlTestClient.java | 34 +- .../yaml/ClientYamlTestExecutionContext.java | 6 +- .../rest/yaml/ClientYamlTestResponse.java | 6 +- .../opensearch/test/rest/yaml/ObjectPath.java | 2 +- .../OpenSearchClientYamlSuiteTestCase.java | 3 +- .../ClientYamlTestExecutionContextTests.java | 2 +- .../rest/yaml/section/DoSectionTests.java | 2 +- 156 files changed, 3142 insertions(+), 1940 deletions(-) delete mode 100644 client/rest/licenses/httpasyncclient-4.1.5.jar.sha1 delete mode 100644 client/rest/licenses/httpasyncclient-LICENSE.txt delete mode 100644 client/rest/licenses/httpasyncclient-NOTICE.txt delete mode 100644 client/rest/licenses/httpclient-4.5.13.jar.sha1 create mode 100644 client/rest/licenses/httpclient5-5.1.3.jar.sha1 rename client/rest/licenses/{httpclient-LICENSE.txt => httpclient5-LICENSE.txt} (100%) rename client/rest/licenses/{httpclient-NOTICE.txt => httpclient5-NOTICE.txt} (72%) delete mode 100644 client/rest/licenses/httpcore-4.4.15.jar.sha1 delete mode 100644 client/rest/licenses/httpcore-LICENSE.txt delete mode 100644 client/rest/licenses/httpcore-nio-4.4.15.jar.sha1 delete mode 100644 client/rest/licenses/httpcore-nio-LICENSE.txt create mode 100644 client/rest/licenses/httpcore5-5.1.4.jar.sha1 create mode 100644 client/rest/licenses/httpcore5-LICENSE.txt rename client/rest/licenses/{httpcore-nio-NOTICE.txt => httpcore5-NOTICE.txt} (56%) create mode 100644 client/rest/licenses/httpcore5-h2-5.1.4.jar.sha1 create mode 100644 client/rest/licenses/httpcore5-h2-LICENSE.txt rename client/rest/licenses/{httpcore-NOTICE.txt => httpcore5-h2-NOTICE.txt} (55%) create mode 100644 client/rest/licenses/slf4j-api-1.7.36.jar.sha1 create mode 100644 client/rest/licenses/slf4j-api-LICENSE.txt create mode 100644 client/rest/licenses/slf4j-api-NOTICE.txt delete mode 100644 client/rest/src/main/java/org/opensearch/client/HeapBufferedAsyncResponseConsumer.java delete mode 100644 client/rest/src/main/java/org/opensearch/client/HttpDeleteWithEntity.java delete mode 100644 client/rest/src/main/java/org/opensearch/client/HttpGetWithEntity.java delete mode 100644 client/rest/src/main/java/org/opensearch/client/PersistentCredentialsAuthenticationStrategy.java create mode 100644 client/rest/src/main/java/org/opensearch/client/http/HttpUriRequestProducer.java create mode 100644 client/rest/src/main/java/org/opensearch/client/http/package-info.java create mode 100644 client/rest/src/main/java/org/opensearch/client/nio/HeapBufferedAsyncEntityConsumer.java create mode 100644 client/rest/src/main/java/org/opensearch/client/nio/HeapBufferedAsyncResponseConsumer.java create mode 100644 client/rest/src/main/java/org/opensearch/client/nio/HttpEntityAsyncEntityProducer.java create mode 100644 client/rest/src/main/java/org/opensearch/client/nio/package-info.java delete mode 100644 client/sniffer/licenses/httpclient-4.5.13.jar.sha1 create mode 100644 client/sniffer/licenses/httpclient5-5.1.3.jar.sha1 delete mode 100644 client/sniffer/licenses/httpcore-4.4.15.jar.sha1 create mode 100644 client/sniffer/licenses/httpcore5-5.1.4.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 73cf43a765cdc..e321efc39a2bf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -83,6 +83,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Controlling discovery for decommissioned nodes ([#4590](https://github.com/opensearch-project/OpenSearch/pull/4590)) - Backport Apache Lucene version change for 2.4.0 ([#4677](https://github.com/opensearch-project/OpenSearch/pull/4677)) - Refactor Base Action class javadocs to OpenSearch.API ([#4732](https://github.com/opensearch-project/OpenSearch/pull/4732)) +- Migrate client transports to Apache HttpClient / Core 5.x ([#4459](https://github.com/opensearch-project/OpenSearch/pull/4459)) ### Deprecated diff --git a/buildSrc/src/main/resources/forbidden/http-signatures.txt b/buildSrc/src/main/resources/forbidden/http-signatures.txt index dcf20bbb09387..bfd81b3521a40 100644 --- a/buildSrc/src/main/resources/forbidden/http-signatures.txt +++ b/buildSrc/src/main/resources/forbidden/http-signatures.txt @@ -15,31 +15,14 @@ # language governing permissions and limitations under the License. @defaultMessage Explicitly specify the ContentType of HTTP entities when creating -org.apache.http.entity.StringEntity#(java.lang.String) -org.apache.http.entity.StringEntity#(java.lang.String,java.lang.String) -org.apache.http.entity.StringEntity#(java.lang.String,java.nio.charset.Charset) -org.apache.http.entity.ByteArrayEntity#(byte[]) -org.apache.http.entity.ByteArrayEntity#(byte[],int,int) -org.apache.http.entity.FileEntity#(java.io.File) -org.apache.http.entity.InputStreamEntity#(java.io.InputStream) -org.apache.http.entity.InputStreamEntity#(java.io.InputStream,long) -org.apache.http.nio.entity.NByteArrayEntity#(byte[]) -org.apache.http.nio.entity.NByteArrayEntity#(byte[],int,int) -org.apache.http.nio.entity.NFileEntity#(java.io.File) -org.apache.http.nio.entity.NStringEntity#(java.lang.String) -org.apache.http.nio.entity.NStringEntity#(java.lang.String,java.lang.String) +org.apache.hc.core5.http.io.entity.StringEntity#(java.lang.String) +org.apache.hc.core5.http.io.entity.StringEntity#(java.lang.String,java.nio.charset.Charset) @defaultMessage Use non-deprecated constructors -org.apache.http.nio.entity.NFileEntity#(java.io.File,java.lang.String) -org.apache.http.nio.entity.NFileEntity#(java.io.File,java.lang.String,boolean) -org.apache.http.entity.FileEntity#(java.io.File,java.lang.String) -org.apache.http.entity.StringEntity#(java.lang.String,java.lang.String,java.lang.String) +org.apache.hc.core5.http.io.entity.FileEntity#(java.io.File,org.apache.hc.core5.http.ContentType) @defaultMessage BasicEntity is easy to mess up and forget to set content type -org.apache.http.entity.BasicHttpEntity#() - -@defaultMessage EntityTemplate is easy to mess up and forget to set content type -org.apache.http.entity.EntityTemplate#(org.apache.http.entity.ContentProducer) +org.apache.hc.core5.http.io.entity.BasicHttpEntity#(java.io.InputStream,org.apache.hc.core5.http.ContentType) @defaultMessage SerializableEntity uses java serialization and makes it easy to forget to set content type -org.apache.http.entity.SerializableEntity#(java.io.Serializable) +org.apache.hc.core5.http.io.entity.SerializableEntity#(java.io.Serializable,org.apache.hc.core5.http.ContentType) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index bf72245c63918..a779389b3ca82 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -25,6 +25,8 @@ netty = 4.1.79.Final joda = 2.10.13 # client dependencies +httpclient5 = 5.1.3 +httpcore5 = 5.1.4 httpclient = 4.5.13 httpcore = 4.4.15 httpasyncclient = 4.1.5 diff --git a/client/benchmark/src/main/java/org/opensearch/client/benchmark/rest/RestClientBenchmark.java b/client/benchmark/src/main/java/org/opensearch/client/benchmark/rest/RestClientBenchmark.java index d2d7163b8dee2..e8dcff814603d 100644 --- a/client/benchmark/src/main/java/org/opensearch/client/benchmark/rest/RestClientBenchmark.java +++ b/client/benchmark/src/main/java/org/opensearch/client/benchmark/rest/RestClientBenchmark.java @@ -31,10 +31,10 @@ package org.opensearch.client.benchmark.rest; -import org.apache.http.HttpHeaders; -import org.apache.http.HttpHost; -import org.apache.http.HttpStatus; -import org.apache.http.message.BasicHeader; +import org.apache.hc.core5.http.HttpHeaders; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.HttpStatus; +import org.apache.hc.core5.http.message.BasicHeader; import org.opensearch.OpenSearchException; import org.opensearch.client.Request; import org.opensearch.client.Response; diff --git a/client/rest-high-level/build.gradle b/client/rest-high-level/build.gradle index 07147ce81b72e..7fa2855d85487 100644 --- a/client/rest-high-level/build.gradle +++ b/client/rest-high-level/build.gradle @@ -104,3 +104,9 @@ testClusters.all { extraConfigFile nodeTrustStore.name, nodeTrustStore extraConfigFile pkiTrustCert.name, pkiTrustCert } + +thirdPartyAudit.ignoreMissingClasses( + 'org.conscrypt.Conscrypt', + 'org.slf4j.Logger', + 'org.slf4j.LoggerFactory' +) diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/ClusterRequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/ClusterRequestConverters.java index 37a1ab8812845..4ff8e75b521b6 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/ClusterRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/ClusterRequestConverters.java @@ -32,10 +32,10 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; import org.opensearch.action.admin.cluster.settings.ClusterGetSettingsRequest; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/IndicesRequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/IndicesRequestConverters.java index 3a5384f23b90e..ca9154340a660 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/IndicesRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/IndicesRequestConverters.java @@ -32,11 +32,11 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest; import org.opensearch.action.admin.indices.alias.get.GetAliasesRequest; import org.opensearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/IngestRequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/IngestRequestConverters.java index 2504dec3af36e..4c044413642ac 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/IngestRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/IngestRequestConverters.java @@ -32,10 +32,10 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.opensearch.action.ingest.DeletePipelineRequest; import org.opensearch.action.ingest.GetPipelineRequest; import org.opensearch.action.ingest.PutPipelineRequest; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java index 91c339cc92c1b..88e3a3a904830 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java @@ -32,14 +32,14 @@ package org.opensearch.client; -import org.apache.http.HttpEntity; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.entity.NByteArrayEntity; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; import org.apache.lucene.util.BytesRef; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; @@ -269,7 +269,7 @@ static Request bulk(BulkRequest bulkRequest) throws IOException { } } request.addParameters(parameters.asMap()); - request.setEntity(new NByteArrayEntity(content.toByteArray(), 0, content.size(), requestContentType)); + request.setEntity(new ByteArrayEntity(content.toByteArray(), 0, content.size(), requestContentType)); return request; } @@ -358,7 +358,7 @@ static Request index(IndexRequest indexRequest) { BytesRef source = indexRequest.source().toBytesRef(); ContentType contentType = createContentType(indexRequest.getContentType()); request.addParameters(parameters.asMap()); - request.setEntity(new NByteArrayEntity(source.bytes, source.offset, source.length, contentType)); + request.setEntity(new ByteArrayEntity(source.bytes, source.offset, source.length, contentType)); return request; } @@ -514,7 +514,7 @@ static Request multiSearch(MultiSearchRequest multiSearchRequest) throws IOExcep XContent xContent = REQUEST_BODY_CONTENT_TYPE.xContent(); byte[] source = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, xContent); request.addParameters(params.asMap()); - request.setEntity(new NByteArrayEntity(source, createContentType(xContent.type()))); + request.setEntity(new ByteArrayEntity(source, createContentType(xContent.type()))); return request; } @@ -549,7 +549,7 @@ static Request multiSearchTemplate(MultiSearchTemplateRequest multiSearchTemplat XContent xContent = REQUEST_BODY_CONTENT_TYPE.xContent(); byte[] source = MultiSearchTemplateRequest.writeMultiLineFormat(multiSearchTemplateRequest, xContent); - request.setEntity(new NByteArrayEntity(source, createContentType(xContent.type()))); + request.setEntity(new ByteArrayEntity(source, createContentType(xContent.type()))); return request; } @@ -817,7 +817,7 @@ static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType, ToXContent.Params toXContentParams) throws IOException { BytesRef source = XContentHelper.toXContent(toXContent, xContentType, toXContentParams, false).toBytesRef(); - return new NByteArrayEntity(source.bytes, source.offset, source.length, createContentType(xContentType)); + return new ByteArrayEntity(source.bytes, source.offset, source.length, createContentType(xContentType)); } static String endpoint(String index, String id) { diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java index 0a5880b778942..27f13fc3c00c4 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java @@ -32,7 +32,7 @@ package org.opensearch.client; -import org.apache.http.HttpEntity; +import org.apache.hc.core5.http.HttpEntity; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchStatusException; import org.opensearch.action.ActionListener; @@ -2220,9 +2220,9 @@ protected final Resp parseEntity(final HttpEntity entity, final CheckedFu if (entity.getContentType() == null) { throw new IllegalStateException("OpenSearch didn't return the [Content-Type] header, unable to parse response body"); } - XContentType xContentType = XContentType.fromMediaType(entity.getContentType().getValue()); + XContentType xContentType = XContentType.fromMediaType(entity.getContentType()); if (xContentType == null) { - throw new IllegalStateException("Unsupported Content-Type: " + entity.getContentType().getValue()); + throw new IllegalStateException("Unsupported Content-Type: " + entity.getContentType()); } try (XContentParser parser = xContentType.xContent().createParser(registry, DEPRECATION_HANDLER, entity.getContent())) { return entityParser.apply(parser); diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotRequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotRequestConverters.java index 3d44820966608..263d7db82ba08 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotRequestConverters.java @@ -32,10 +32,10 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequest; import org.opensearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/TasksRequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/TasksRequestConverters.java index ff89950f37cb9..78a74ca04ff9b 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/TasksRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/TasksRequestConverters.java @@ -32,8 +32,8 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPost; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.opensearch.client.RequestConverters.EndpointBuilder; import org.opensearch.client.tasks.CancelTasksRequest; diff --git a/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt b/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt index 68dc509e5ff27..0d7749b39fb91 100644 --- a/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt +++ b/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt @@ -15,10 +15,9 @@ # language governing permissions and limitations under the License. @defaultMessage Use Request#createContentType(XContentType) to be sure to pass the right MIME type -org.apache.http.entity.ContentType#create(java.lang.String) -org.apache.http.entity.ContentType#create(java.lang.String,java.lang.String) -org.apache.http.entity.ContentType#create(java.lang.String,java.nio.charset.Charset) -org.apache.http.entity.ContentType#create(java.lang.String,org.apache.http.NameValuePair[]) +org.apache.hc.core5.http.ContentType#create(java.lang.String) +org.apache.hc.core5.http.ContentType#create(java.lang.String,java.lang.String) +org.apache.hc.core5.http.ContentType#create(java.lang.String,java.nio.charset.Charset) @defaultMessage ES's logging infrastructure uses log4j2 which we don't want to force on high level rest client users org.opensearch.common.logging.DeprecationLogger @@ -30,7 +29,3 @@ org.opensearch.common.logging.PrefixLogger @defaultMessage We can't rely on log4j2 being on the classpath so don't log deprecations! org.opensearch.common.xcontent.LoggingDeprecationHandler - -@defaultMessage Use Nonblocking org.apache.http.nio.entity.NByteArrayEntity -org.apache.http.entity.ByteArrayEntity -org.apache.http.entity.StringEntity diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterClientIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterClientIT.java index 71b869fb59e7b..82d2cbe9149ca 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterClientIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterClientIT.java @@ -32,7 +32,8 @@ package org.opensearch.client; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchStatusException; import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; @@ -220,7 +221,7 @@ public void testClusterHealthGreen() throws IOException { assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN)); } - public void testClusterHealthYellowClusterLevel() throws IOException { + public void testClusterHealthYellowClusterLevel() throws IOException, ParseException { createIndex("index", Settings.EMPTY); createIndex("index2", Settings.EMPTY); ClusterHealthRequest request = new ClusterHealthRequest(); @@ -231,7 +232,7 @@ public void testClusterHealthYellowClusterLevel() throws IOException { assertThat(response.getIndices().size(), equalTo(0)); } - public void testClusterHealthYellowIndicesLevel() throws IOException { + public void testClusterHealthYellowIndicesLevel() throws IOException, ParseException { String firstIndex = "index"; String secondIndex = "index2"; // including another index that we do not assert on, to ensure that we are not diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java index 27adc18fd37b8..f201599632969 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java @@ -32,8 +32,6 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPut; import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; import org.opensearch.action.admin.cluster.settings.ClusterGetSettingsRequest; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; @@ -44,6 +42,8 @@ import org.opensearch.common.Priority; import org.opensearch.common.util.CollectionUtils; import org.opensearch.test.OpenSearchTestCase; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.hamcrest.CoreMatchers; import org.junit.Assert; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/CustomRestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/CustomRestHighLevelClientTests.java index 1d94f190c611c..972c96999945f 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/CustomRestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/CustomRestHighLevelClientTests.java @@ -32,15 +32,14 @@ package org.opensearch.client; -import org.apache.http.Header; -import org.apache.http.HttpHost; -import org.apache.http.ProtocolVersion; -import org.apache.http.RequestLine; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.entity.ContentType; -import org.apache.http.message.BasicRequestLine; -import org.apache.http.message.BasicStatusLine; -import org.apache.http.nio.entity.NByteArrayEntity; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ProtocolVersion; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; +import org.apache.hc.core5.http.message.RequestLine; +import org.apache.hc.core5.http.message.StatusLine; import org.apache.lucene.util.BytesRef; import org.opensearch.Build; import org.opensearch.Version; @@ -172,13 +171,13 @@ private Response mockPerformRequest(Request request) throws IOException { when(mockResponse.getHost()).thenReturn(new HttpHost("localhost", 9200)); ProtocolVersion protocol = new ProtocolVersion("HTTP", 1, 1); - when(mockResponse.getStatusLine()).thenReturn(new BasicStatusLine(protocol, 200, "OK")); + when(mockResponse.getStatusLine()).thenReturn(new StatusLine(protocol, 200, "OK")); MainResponse response = new MainResponse(httpHeader.getValue(), Version.CURRENT, ClusterName.DEFAULT, "_na", Build.CURRENT); BytesRef bytesRef = XContentHelper.toXContent(response, XContentType.JSON, false).toBytesRef(); - when(mockResponse.getEntity()).thenReturn(new NByteArrayEntity(bytesRef.bytes, ContentType.APPLICATION_JSON)); + when(mockResponse.getEntity()).thenReturn(new ByteArrayEntity(bytesRef.bytes, ContentType.APPLICATION_JSON)); - RequestLine requestLine = new BasicRequestLine(HttpGet.METHOD_NAME, ENDPOINT, protocol); + RequestLine requestLine = new RequestLine(HttpGet.METHOD_NAME, ENDPOINT, protocol); when(mockResponse.getRequestLine()).thenReturn(requestLine); return mockResponse; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/HighLevelRestClientCompressionIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/HighLevelRestClientCompressionIT.java index c0c03ed1d0e7c..054d0ae8670b5 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/HighLevelRestClientCompressionIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/HighLevelRestClientCompressionIT.java @@ -31,9 +31,9 @@ package org.opensearch.client; -import org.apache.http.HttpHeaders; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; +import org.apache.hc.core5.http.HttpHeaders; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchResponse; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java index f9c8851f8839e..750b0c15e9c14 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java @@ -32,9 +32,9 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchStatusException; import org.opensearch.action.admin.indices.alias.Alias; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java index fdb5f2843b44d..512cc058a64a7 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java @@ -32,11 +32,11 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.apache.lucene.tests.util.LuceneTestCase; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.admin.indices.alias.Alias; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IngestRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/IngestRequestConvertersTests.java index 200069ade1ea2..8aae33307279b 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IngestRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IngestRequestConvertersTests.java @@ -32,10 +32,6 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; import org.opensearch.action.ingest.DeletePipelineRequest; import org.opensearch.action.ingest.GetPipelineRequest; import org.opensearch.action.ingest.PutPipelineRequest; @@ -44,6 +40,10 @@ import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.OpenSearchTestCase; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.junit.Assert; import java.io.IOException; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/MockRestHighLevelTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/MockRestHighLevelTests.java index bd57c5c9e53f6..e1179c0f24cb8 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/MockRestHighLevelTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/MockRestHighLevelTests.java @@ -32,13 +32,12 @@ package org.opensearch.client; -import org.apache.http.HttpHost; -import org.apache.http.ProtocolVersion; -import org.apache.http.RequestLine; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.message.BasicRequestLine; -import org.apache.http.message.BasicStatusLine; import org.opensearch.test.OpenSearchTestCase; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ProtocolVersion; +import org.apache.hc.core5.http.message.RequestLine; +import org.apache.hc.core5.http.message.StatusLine; import org.junit.Before; import java.io.IOException; @@ -64,9 +63,9 @@ private void setupClient() throws IOException { when(mockResponse.getWarnings()).thenReturn(WARNINGS); ProtocolVersion protocol = new ProtocolVersion("HTTP", 1, 1); - when(mockResponse.getStatusLine()).thenReturn(new BasicStatusLine(protocol, 200, "OK")); + when(mockResponse.getStatusLine()).thenReturn(new StatusLine(protocol, 200, "OK")); - RequestLine requestLine = new BasicRequestLine(HttpGet.METHOD_NAME, "/_blah", protocol); + RequestLine requestLine = new RequestLine(HttpGet.METHOD_NAME, "/_blah", protocol); when(mockResponse.getRequestLine()).thenReturn(requestLine); WarningFailureException expectedException = new WarningFailureException(mockResponse); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/OpenSearchRestHighLevelClientTestCase.java b/client/rest-high-level/src/test/java/org/opensearch/client/OpenSearchRestHighLevelClientTestCase.java index efac508cf6814..a8c73393f54ce 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/OpenSearchRestHighLevelClientTestCase.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/OpenSearchRestHighLevelClientTestCase.java @@ -32,7 +32,7 @@ package org.opensearch.client; -import org.apache.http.util.EntityUtils; +import org.opensearch.OpenSearchParseException; import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; @@ -61,6 +61,8 @@ import org.opensearch.search.SearchModule; import org.opensearch.tasks.TaskId; import org.opensearch.test.rest.OpenSearchRestTestCase; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.junit.AfterClass; import org.junit.Before; @@ -324,7 +326,7 @@ protected static void setupRemoteClusterConfig(String remoteClusterName) throws }); } - protected static Map toMap(Response response) throws IOException { + protected static Map toMap(Response response) throws IOException, OpenSearchParseException, ParseException { return XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(response.getEntity()), false); } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/PingAndInfoIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/PingAndInfoIT.java index 09ef90cef144d..6f66a5279afa3 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/PingAndInfoIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/PingAndInfoIT.java @@ -32,7 +32,7 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpGet; import org.opensearch.client.core.MainResponse; import java.io.IOException; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java index be9b614a8720f..1f10deb400ecc 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java @@ -8,8 +8,8 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.junit.Before; import org.opensearch.OpenSearchStatusException; import org.opensearch.action.ActionListener; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java index ee5795deb165d..576fe02718ba3 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java @@ -32,14 +32,6 @@ package org.opensearch.client; -import org.apache.http.HttpEntity; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.nio.entity.NByteArrayEntity; -import org.apache.http.util.EntityUtils; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; import org.opensearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; @@ -120,6 +112,14 @@ import org.opensearch.tasks.TaskId; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.RandomObjects; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.hamcrest.Matchers; import java.io.IOException; @@ -733,8 +733,8 @@ public void testIndex() throws IOException { assertEquals(method, request.getMethod()); HttpEntity entity = request.getEntity(); - assertTrue(entity instanceof NByteArrayEntity); - assertEquals(indexRequest.getContentType().mediaTypeWithoutParameters(), entity.getContentType().getValue()); + assertTrue(entity instanceof ByteArrayEntity); + assertEquals(indexRequest.getContentType().mediaTypeWithoutParameters(), entity.getContentType()); try (XContentParser parser = createParser(xContentType.xContent(), entity.getContent())) { assertEquals(nbFields, parser.map().size()); } @@ -805,11 +805,11 @@ public void testUpdate() throws IOException { assertEquals(HttpPost.METHOD_NAME, request.getMethod()); HttpEntity entity = request.getEntity(); - assertTrue(entity instanceof NByteArrayEntity); + assertTrue(entity instanceof ByteArrayEntity); UpdateRequest parsedUpdateRequest = new UpdateRequest(); - XContentType entityContentType = XContentType.fromMediaType(entity.getContentType().getValue()); + XContentType entityContentType = XContentType.fromMediaType(entity.getContentType()); try (XContentParser parser = createParser(entityContentType.xContent(), entity.getContent())) { parsedUpdateRequest.fromXContent(parser); } @@ -926,7 +926,7 @@ public void testBulk() throws IOException { assertEquals("/_bulk", request.getEndpoint()); assertEquals(expectedParams, request.getParameters()); assertEquals(HttpPost.METHOD_NAME, request.getMethod()); - assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType()); byte[] content = new byte[(int) request.getEntity().getContentLength()]; try (InputStream inputStream = request.getEntity().getContent()) { Streams.readFully(inputStream, content); @@ -979,7 +979,7 @@ public void testBulkWithDifferentContentTypes() throws IOException { bulkRequest.add(new DeleteRequest("index", "2")); Request request = RequestConverters.bulk(bulkRequest); - assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), request.getEntity().getContentType()); } { XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE); @@ -989,7 +989,7 @@ public void testBulkWithDifferentContentTypes() throws IOException { bulkRequest.add(new DeleteRequest("index", "2")); Request request = RequestConverters.bulk(bulkRequest); - assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType()); } { XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE); @@ -1001,7 +1001,7 @@ public void testBulkWithDifferentContentTypes() throws IOException { } Request request = RequestConverters.bulk(new BulkRequest().add(updateRequest)); - assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType()); } { BulkRequest bulkRequest = new BulkRequest(); @@ -1289,7 +1289,7 @@ public void testSearchScroll() throws IOException { assertEquals("/_search/scroll", request.getEndpoint()); assertEquals(0, request.getParameters().size()); assertToXContentBody(searchScrollRequest, request.getEntity()); - assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType()); } public void testClearScroll() throws IOException { @@ -1303,7 +1303,7 @@ public void testClearScroll() throws IOException { assertEquals("/_search/scroll", request.getEndpoint()); assertEquals(0, request.getParameters().size()); assertToXContentBody(clearScrollRequest, request.getEntity()); - assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType()); } public void testCreatePit() throws IOException { @@ -1324,7 +1324,7 @@ public void testCreatePit() throws IOException { assertEquals(endpoint.toString(), request.getEndpoint()); assertEquals(expectedParams, request.getParameters()); assertToXContentBody(createPitRequest, request.getEntity()); - assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType()); } public void testDeletePit() throws IOException { @@ -1337,7 +1337,7 @@ public void testDeletePit() throws IOException { assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); assertEquals(endpoint, request.getEndpoint()); assertToXContentBody(deletePitRequest, request.getEntity()); - assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType()); } public void testDeleteAllPits() { @@ -1456,7 +1456,7 @@ public void testMultiSearchTemplate() throws Exception { HttpEntity actualEntity = multiRequest.getEntity(); byte[] expectedBytes = MultiSearchTemplateRequest.writeMultiLineFormat(multiSearchTemplateRequest, XContentType.JSON.xContent()); - assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), actualEntity.getContentType().getValue()); + assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), actualEntity.getContentType()); assertEquals(new BytesArray(expectedBytes), new BytesArray(EntityUtils.toByteArray(actualEntity))); } @@ -1763,7 +1763,7 @@ public void testDeleteScriptRequest() { static void assertToXContentBody(ToXContent expectedBody, HttpEntity actualEntity) throws IOException { BytesReference expectedBytes = XContentHelper.toXContent(expectedBody, REQUEST_BODY_CONTENT_TYPE, false); - assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), actualEntity.getContentType().getValue()); + assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), actualEntity.getContentType()); assertEquals(expectedBytes, new BytesArray(EntityUtils.toByteArray(actualEntity))); } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientExtTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientExtTests.java index dbdf7eba3dca4..5743820ff0175 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientExtTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientExtTests.java @@ -32,9 +32,9 @@ package org.opensearch.client; -import org.apache.http.HttpEntity; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.entity.NStringEntity; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; import org.junit.Before; import org.opensearch.common.ParseField; import org.opensearch.common.xcontent.NamedXContentRegistry; @@ -64,14 +64,14 @@ public void initClient() { public void testParseEntityCustomResponseSection() throws IOException { { - HttpEntity jsonEntity = new NStringEntity("{\"custom1\":{ \"field\":\"value\"}}", ContentType.APPLICATION_JSON); + HttpEntity jsonEntity = new StringEntity("{\"custom1\":{ \"field\":\"value\"}}", ContentType.APPLICATION_JSON); BaseCustomResponseSection customSection = restHighLevelClient.parseEntity(jsonEntity, BaseCustomResponseSection::fromXContent); assertThat(customSection, instanceOf(CustomResponseSection1.class)); CustomResponseSection1 customResponseSection1 = (CustomResponseSection1) customSection; assertEquals("value", customResponseSection1.value); } { - HttpEntity jsonEntity = new NStringEntity("{\"custom2\":{ \"array\": [\"item1\", \"item2\"]}}", ContentType.APPLICATION_JSON); + HttpEntity jsonEntity = new StringEntity("{\"custom2\":{ \"array\": [\"item1\", \"item2\"]}}", ContentType.APPLICATION_JSON); BaseCustomResponseSection customSection = restHighLevelClient.parseEntity(jsonEntity, BaseCustomResponseSection::fromXContent); assertThat(customSection, instanceOf(CustomResponseSection2.class)); CustomResponseSection2 customResponseSection2 = (CustomResponseSection2) customSection; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java index 1b9c7d4aead12..9086fc9563c57 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java @@ -33,19 +33,6 @@ package org.opensearch.client; import com.fasterxml.jackson.core.JsonParseException; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; -import org.apache.http.HttpResponse; -import org.apache.http.ProtocolVersion; -import org.apache.http.RequestLine; -import org.apache.http.StatusLine; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.entity.ContentType; -import org.apache.http.message.BasicHttpResponse; -import org.apache.http.message.BasicRequestLine; -import org.apache.http.message.BasicStatusLine; -import org.apache.http.nio.entity.NByteArrayEntity; -import org.apache.http.nio.entity.NStringEntity; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; @@ -87,6 +74,17 @@ import org.opensearch.test.InternalAggregationTestCase; import org.opensearch.test.rest.yaml.restspec.ClientYamlSuiteRestApi; import org.opensearch.test.rest.yaml.restspec.ClientYamlSuiteRestSpec; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ProtocolVersion; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.message.RequestLine; +import org.apache.hc.core5.http.message.StatusLine; import org.hamcrest.Matchers; import org.junit.Before; @@ -123,7 +121,7 @@ public class RestHighLevelClientTests extends OpenSearchTestCase { private static final String SUBMIT_TASK_PREFIX = "submit_"; private static final String SUBMIT_TASK_SUFFIX = "_task"; private static final ProtocolVersion HTTP_PROTOCOL = new ProtocolVersion("http", 1, 1); - private static final RequestLine REQUEST_LINE = new BasicRequestLine(HttpGet.METHOD_NAME, "/", HTTP_PROTOCOL); + private static final RequestLine REQUEST_LINE = new RequestLine(HttpGet.METHOD_NAME, "/", HTTP_PROTOCOL); /** * These APIs do not use a Request object (because they don't have a body, or any request parameters). @@ -258,7 +256,7 @@ private void mockResponse(ToXContent toXContent) throws IOException { Response response = mock(Response.class); ContentType contentType = ContentType.parse(RequestConverters.REQUEST_BODY_CONTENT_TYPE.mediaType()); String requestBody = toXContent(toXContent, RequestConverters.REQUEST_BODY_CONTENT_TYPE, false).utf8ToString(); - when(response.getEntity()).thenReturn(new NStringEntity(requestBody, contentType)); + when(response.getEntity()).thenReturn(new StringEntity(requestBody, contentType)); when(restClient.performRequest(any(Request.class))).thenReturn(response); } @@ -308,14 +306,14 @@ public void testParseEntity() throws IOException { { IllegalStateException ise = expectThrows( IllegalStateException.class, - () -> restHighLevelClient.parseEntity(new NStringEntity("", (ContentType) null), null) + () -> restHighLevelClient.parseEntity(new StringEntity("", (ContentType) null), null) ); assertEquals("OpenSearch didn't return the [Content-Type] header, unable to parse response body", ise.getMessage()); } { - NStringEntity entity = new NStringEntity("", ContentType.APPLICATION_SVG_XML); + StringEntity entity = new StringEntity("", ContentType.APPLICATION_SVG_XML); IllegalStateException ise = expectThrows(IllegalStateException.class, () -> restHighLevelClient.parseEntity(entity, null)); - assertEquals("Unsupported Content-Type: " + entity.getContentType().getValue(), ise.getMessage()); + assertEquals("Unsupported Content-Type: " + entity.getContentType(), ise.getMessage()); } { CheckedFunction entityParser = parser -> { @@ -326,9 +324,9 @@ public void testParseEntity() throws IOException { assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); return value; }; - HttpEntity jsonEntity = new NStringEntity("{\"field\":\"value\"}", ContentType.APPLICATION_JSON); + HttpEntity jsonEntity = new StringEntity("{\"field\":\"value\"}", ContentType.APPLICATION_JSON); assertEquals("value", restHighLevelClient.parseEntity(jsonEntity, entityParser)); - HttpEntity yamlEntity = new NStringEntity("---\nfield: value\n", ContentType.create("application/yaml")); + HttpEntity yamlEntity = new StringEntity("---\nfield: value\n", ContentType.create("application/yaml")); assertEquals("value", restHighLevelClient.parseEntity(yamlEntity, entityParser)); HttpEntity smileEntity = createBinaryEntity(SmileXContent.contentBuilder(), ContentType.create("application/smile")); assertEquals("value", restHighLevelClient.parseEntity(smileEntity, entityParser)); @@ -342,13 +340,13 @@ private static HttpEntity createBinaryEntity(XContentBuilder xContentBuilder, Co builder.startObject(); builder.field("field", "value"); builder.endObject(); - return new NByteArrayEntity(BytesReference.bytes(builder).toBytesRef().bytes, contentType); + return new ByteArrayEntity(BytesReference.bytes(builder).toBytesRef().bytes, contentType); } } public void testConvertExistsResponse() { RestStatus restStatus = randomBoolean() ? RestStatus.OK : randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); boolean result = RestHighLevelClient.convertExistsResponse(response); assertEquals(restStatus == RestStatus.OK, result); @@ -357,7 +355,7 @@ public void testConvertExistsResponse() { public void testParseResponseException() throws IOException { { RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); OpenSearchException openSearchException = restHighLevelClient.parseResponseException(responseException); @@ -367,9 +365,9 @@ public void testParseResponseException() throws IOException { } { RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); httpResponse.setEntity( - new NStringEntity( + new StringEntity( "{\"error\":\"test error message\",\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON ) @@ -383,8 +381,8 @@ public void testParseResponseException() throws IOException { } { RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); - httpResponse.setEntity(new NStringEntity("{\"error\":", ContentType.APPLICATION_JSON)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); + httpResponse.setEntity(new StringEntity("{\"error\":", ContentType.APPLICATION_JSON)); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); OpenSearchException openSearchException = restHighLevelClient.parseResponseException(responseException); @@ -395,8 +393,8 @@ public void testParseResponseException() throws IOException { } { RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); - httpResponse.setEntity(new NStringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); + httpResponse.setEntity(new StringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON)); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); OpenSearchException openSearchException = restHighLevelClient.parseResponseException(responseException); @@ -411,7 +409,7 @@ public void testPerformRequestOnSuccess() throws IOException { MainRequest mainRequest = new MainRequest(); CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); when(restClient.performRequest(any(Request.class))).thenReturn(mockResponse); { @@ -437,7 +435,7 @@ public void testPerformRequestOnSuccess() throws IOException { ); assertEquals( "Unable to parse response body for Response{requestLine=GET / http/1.1, host=http://localhost:9200, " - + "response=http/1.1 " + + "response=HTTP/1.1 " + restStatus.getStatus() + " " + restStatus.name() @@ -451,7 +449,7 @@ public void testPerformRequestOnResponseExceptionWithoutEntity() throws IOExcept MainRequest mainRequest = new MainRequest(); CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); when(restClient.performRequest(any(Request.class))).thenThrow(responseException); @@ -474,9 +472,9 @@ public void testPerformRequestOnResponseExceptionWithEntity() throws IOException MainRequest mainRequest = new MainRequest(); CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); httpResponse.setEntity( - new NStringEntity("{\"error\":\"test error message\",\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON) + new StringEntity("{\"error\":\"test error message\",\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON) ); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); @@ -500,8 +498,8 @@ public void testPerformRequestOnResponseExceptionWithBrokenEntity() throws IOExc MainRequest mainRequest = new MainRequest(); CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); - httpResponse.setEntity(new NStringEntity("{\"error\":", ContentType.APPLICATION_JSON)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); + httpResponse.setEntity(new StringEntity("{\"error\":", ContentType.APPLICATION_JSON)); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); when(restClient.performRequest(any(Request.class))).thenThrow(responseException); @@ -525,8 +523,8 @@ public void testPerformRequestOnResponseExceptionWithBrokenEntity2() throws IOEx MainRequest mainRequest = new MainRequest(); CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); - httpResponse.setEntity(new NStringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); + httpResponse.setEntity(new StringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON)); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); when(restClient.performRequest(any(Request.class))).thenThrow(responseException); @@ -549,7 +547,7 @@ public void testPerformRequestOnResponseExceptionWithBrokenEntity2() throws IOEx public void testPerformRequestOnResponseExceptionWithIgnores() throws IOException { MainRequest mainRequest = new MainRequest(); CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(RestStatus.NOT_FOUND.getStatus(), RestStatus.NOT_FOUND.name()); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); when(restClient.performRequest(any(Request.class))).thenThrow(responseException); @@ -569,7 +567,7 @@ public void testPerformRequestOnResponseExceptionWithIgnores() throws IOExceptio public void testPerformRequestOnResponseExceptionWithIgnoresErrorNoBody() throws IOException { MainRequest mainRequest = new MainRequest(); CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(RestStatus.NOT_FOUND.getStatus(), RestStatus.NOT_FOUND.name()); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); when(restClient.performRequest(any(Request.class))).thenThrow(responseException); @@ -591,8 +589,8 @@ public void testPerformRequestOnResponseExceptionWithIgnoresErrorNoBody() throws public void testPerformRequestOnResponseExceptionWithIgnoresErrorValidBody() throws IOException { MainRequest mainRequest = new MainRequest(); CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); - httpResponse.setEntity(new NStringEntity("{\"error\":\"test error message\",\"status\":404}", ContentType.APPLICATION_JSON)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(RestStatus.NOT_FOUND.getStatus(), RestStatus.NOT_FOUND.name()); + httpResponse.setEntity(new StringEntity("{\"error\":\"test error message\",\"status\":404}", ContentType.APPLICATION_JSON)); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); when(restClient.performRequest(any(Request.class))).thenThrow(responseException); @@ -620,7 +618,7 @@ public void testWrapResponseListenerOnSuccess() { Collections.emptySet() ); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); responseListener.onSuccess(new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse)); assertNull(trackingActionListener.exception.get()); assertEquals(restStatus.getStatus(), trackingActionListener.statusCode.get()); @@ -633,13 +631,13 @@ public void testWrapResponseListenerOnSuccess() { Collections.emptySet() ); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); responseListener.onSuccess(new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse)); assertThat(trackingActionListener.exception.get(), instanceOf(IOException.class)); IOException ioe = (IOException) trackingActionListener.exception.get(); assertEquals( "Unable to parse response body for Response{requestLine=GET / http/1.1, host=http://localhost:9200, " - + "response=http/1.1 " + + "response=HTTP/1.1 " + restStatus.getStatus() + " " + restStatus.name() @@ -670,7 +668,7 @@ public void testWrapResponseListenerOnResponseExceptionWithoutEntity() throws IO Collections.emptySet() ); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); responseListener.onFailure(responseException); @@ -689,9 +687,9 @@ public void testWrapResponseListenerOnResponseExceptionWithEntity() throws IOExc Collections.emptySet() ); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); httpResponse.setEntity( - new NStringEntity("{\"error\":\"test error message\",\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON) + new StringEntity("{\"error\":\"test error message\",\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON) ); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); @@ -712,8 +710,8 @@ public void testWrapResponseListenerOnResponseExceptionWithBrokenEntity() throws Collections.emptySet() ); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); - httpResponse.setEntity(new NStringEntity("{\"error\":", ContentType.APPLICATION_JSON)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); + httpResponse.setEntity(new StringEntity("{\"error\":", ContentType.APPLICATION_JSON)); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); responseListener.onFailure(responseException); @@ -732,8 +730,8 @@ public void testWrapResponseListenerOnResponseExceptionWithBrokenEntity() throws Collections.emptySet() ); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); - httpResponse.setEntity(new NStringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); + httpResponse.setEntity(new StringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON)); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); responseListener.onFailure(responseException); @@ -753,7 +751,7 @@ public void testWrapResponseListenerOnResponseExceptionWithIgnores() throws IOEx trackingActionListener, Collections.singleton(404) ); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(RestStatus.NOT_FOUND.getStatus(), RestStatus.NOT_FOUND.name()); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); responseListener.onFailure(responseException); @@ -771,7 +769,7 @@ public void testWrapResponseListenerOnResponseExceptionWithIgnoresErrorNoBody() trackingActionListener, Collections.singleton(404) ); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(RestStatus.NOT_FOUND.getStatus(), RestStatus.NOT_FOUND.name()); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); responseListener.onFailure(responseException); @@ -791,8 +789,8 @@ public void testWrapResponseListenerOnResponseExceptionWithIgnoresErrorValidBody trackingActionListener, Collections.singleton(404) ); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); - httpResponse.setEntity(new NStringEntity("{\"error\":\"test error message\",\"status\":404}", ContentType.APPLICATION_JSON)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(RestStatus.NOT_FOUND.getStatus(), RestStatus.NOT_FOUND.name()); + httpResponse.setEntity(new StringEntity("{\"error\":\"test error message\",\"status\":404}", ContentType.APPLICATION_JSON)); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); responseListener.onFailure(responseException); @@ -1162,6 +1160,6 @@ public void onFailure(Exception e) { } private static StatusLine newStatusLine(RestStatus restStatus) { - return new BasicStatusLine(HTTP_PROTOCOL, restStatus.getStatus(), restStatus.name()); + return new StatusLine(HTTP_PROTOCOL, restStatus.getStatus(), restStatus.name()); } } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java index 8b509e5d19e92..cc6f08217d057 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java @@ -32,8 +32,6 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchStatusException; import org.opensearch.action.explain.ExplainRequest; @@ -101,6 +99,8 @@ import org.opensearch.search.suggest.Suggest; import org.opensearch.search.suggest.SuggestBuilder; import org.opensearch.search.suggest.phrase.PhraseSuggestionBuilder; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.hamcrest.Matchers; import org.junit.Before; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotRequestConvertersTests.java index 10baaa2e53dd4..e86de6ba718f9 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotRequestConvertersTests.java @@ -32,10 +32,10 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.opensearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.opensearch.action.admin.cluster.repositories.put.PutRepositoryRequest; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/TasksRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/TasksRequestConvertersTests.java index 64fec3c8fb810..a777bbc5d1868 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/TasksRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/TasksRequestConvertersTests.java @@ -32,8 +32,8 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPost; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.opensearch.client.tasks.CancelTasksRequest; import org.opensearch.tasks.TaskId; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/CRUDDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/CRUDDocumentationIT.java index 959c5a827f143..c63b311feebc7 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/CRUDDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/CRUDDocumentationIT.java @@ -32,7 +32,7 @@ package org.opensearch.client.documentation; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.DocWriteRequest; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/MiscellaneousDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/MiscellaneousDocumentationIT.java index 0213441a0b6a7..3edf639da8867 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/MiscellaneousDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/MiscellaneousDocumentationIT.java @@ -32,7 +32,7 @@ package org.opensearch.client.documentation; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.OpenSearchRestHighLevelClientTestCase; import org.opensearch.client.RequestOptions; import org.opensearch.client.RestClient; @@ -92,8 +92,8 @@ public void testInitializationFromClientBuilder() throws IOException { //tag::rest-high-level-client-init RestHighLevelClient client = new RestHighLevelClient( RestClient.builder( - new HttpHost("localhost", 9200, "http"), - new HttpHost("localhost", 9201, "http"))); + new HttpHost("http", "localhost", 9200), + new HttpHost("http", "localhost", 9201))); //end::rest-high-level-client-init //tag::rest-high-level-client-close diff --git a/client/rest/build.gradle b/client/rest/build.gradle index 01c186ed83fc2..178359948f32e 100644 --- a/client/rest/build.gradle +++ b/client/rest/build.gradle @@ -40,12 +40,12 @@ group = 'org.opensearch.client' archivesBaseName = 'opensearch-rest-client' dependencies { - api "org.apache.httpcomponents:httpclient:${versions.httpclient}" - api "org.apache.httpcomponents:httpcore:${versions.httpcore}" - api "org.apache.httpcomponents:httpasyncclient:${versions.httpasyncclient}" - api "org.apache.httpcomponents:httpcore-nio:${versions.httpcore}" + api "org.apache.httpcomponents.client5:httpclient5:${versions.httpclient5}" + api "org.apache.httpcomponents.core5:httpcore5:${versions.httpcore5}" + api "org.apache.httpcomponents.core5:httpcore5-h2:${versions.httpcore5}" api "commons-codec:commons-codec:${versions.commonscodec}" api "commons-logging:commons-logging:${versions.commonslogging}" + api "org.slf4j:slf4j-api:${versions.slf4j}" testImplementation project(":client:test") testImplementation "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" @@ -54,6 +54,9 @@ dependencies { testImplementation "org.mockito:mockito-core:${versions.mockito}" testImplementation "org.objenesis:objenesis:${versions.objenesis}" testImplementation "net.bytebuddy:byte-buddy:${versions.bytebuddy}" + testImplementation "org.apache.logging.log4j:log4j-api:${versions.log4j}" + testImplementation "org.apache.logging.log4j:log4j-core:${versions.log4j}" + testImplementation "org.apache.logging.log4j:log4j-slf4j-impl:${versions.log4j}" } tasks.withType(CheckForbiddenApis).configureEach { @@ -85,6 +88,10 @@ testingConventions { } thirdPartyAudit.ignoreMissingClasses( + 'org.conscrypt.Conscrypt', + 'org.slf4j.impl.StaticLoggerBinder', + 'org.slf4j.impl.StaticMDCBinder', + 'org.slf4j.impl.StaticMarkerBinder', //commons-logging optional dependencies 'org.apache.avalon.framework.logger.Logger', 'org.apache.log.Hierarchy', diff --git a/client/rest/licenses/httpasyncclient-4.1.5.jar.sha1 b/client/rest/licenses/httpasyncclient-4.1.5.jar.sha1 deleted file mode 100644 index 366a9e31069a6..0000000000000 --- a/client/rest/licenses/httpasyncclient-4.1.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cd18227f1eb8e9a263286c1d7362ceb24f6f9b32 \ No newline at end of file diff --git a/client/rest/licenses/httpasyncclient-LICENSE.txt b/client/rest/licenses/httpasyncclient-LICENSE.txt deleted file mode 100644 index 2c41ec88f61cf..0000000000000 --- a/client/rest/licenses/httpasyncclient-LICENSE.txt +++ /dev/null @@ -1,182 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - -This project contains annotations derived from JCIP-ANNOTATIONS -Copyright (c) 2005 Brian Goetz and Tim Peierls. -See http://www.jcip.net and the Creative Commons Attribution License -(http://creativecommons.org/licenses/by/2.5) - diff --git a/client/rest/licenses/httpasyncclient-NOTICE.txt b/client/rest/licenses/httpasyncclient-NOTICE.txt deleted file mode 100644 index b45be98d168a4..0000000000000 --- a/client/rest/licenses/httpasyncclient-NOTICE.txt +++ /dev/null @@ -1,5 +0,0 @@ -Apache HttpComponents AsyncClient -Copyright 2010-2016 The Apache Software Foundation - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). diff --git a/client/rest/licenses/httpclient-4.5.13.jar.sha1 b/client/rest/licenses/httpclient-4.5.13.jar.sha1 deleted file mode 100644 index 3281e21595b39..0000000000000 --- a/client/rest/licenses/httpclient-4.5.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e5f6cae5ca7ecaac1ec2827a9e2d65ae2869cada \ No newline at end of file diff --git a/client/rest/licenses/httpclient5-5.1.3.jar.sha1 b/client/rest/licenses/httpclient5-5.1.3.jar.sha1 new file mode 100644 index 0000000000000..b18cf050ac8df --- /dev/null +++ b/client/rest/licenses/httpclient5-5.1.3.jar.sha1 @@ -0,0 +1 @@ +13c984b7b881afcff3a7f0bb95878724a48a4b66 \ No newline at end of file diff --git a/client/rest/licenses/httpclient-LICENSE.txt b/client/rest/licenses/httpclient5-LICENSE.txt similarity index 100% rename from client/rest/licenses/httpclient-LICENSE.txt rename to client/rest/licenses/httpclient5-LICENSE.txt diff --git a/client/rest/licenses/httpclient-NOTICE.txt b/client/rest/licenses/httpclient5-NOTICE.txt similarity index 72% rename from client/rest/licenses/httpclient-NOTICE.txt rename to client/rest/licenses/httpclient5-NOTICE.txt index 91e5c40c4c6d3..afee7c6e6880b 100644 --- a/client/rest/licenses/httpclient-NOTICE.txt +++ b/client/rest/licenses/httpclient5-NOTICE.txt @@ -1,5 +1,5 @@ Apache HttpComponents Client -Copyright 1999-2016 The Apache Software Foundation +Copyright 1999-2022 The Apache Software Foundation This product includes software developed at The Apache Software Foundation (http://www.apache.org/). diff --git a/client/rest/licenses/httpcore-4.4.15.jar.sha1 b/client/rest/licenses/httpcore-4.4.15.jar.sha1 deleted file mode 100644 index 42a03b5d7a376..0000000000000 --- a/client/rest/licenses/httpcore-4.4.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d \ No newline at end of file diff --git a/client/rest/licenses/httpcore-LICENSE.txt b/client/rest/licenses/httpcore-LICENSE.txt deleted file mode 100644 index e454a52586f29..0000000000000 --- a/client/rest/licenses/httpcore-LICENSE.txt +++ /dev/null @@ -1,178 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - diff --git a/client/rest/licenses/httpcore-nio-4.4.15.jar.sha1 b/client/rest/licenses/httpcore-nio-4.4.15.jar.sha1 deleted file mode 100644 index 251b35ab6a1a5..0000000000000 --- a/client/rest/licenses/httpcore-nio-4.4.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -85d2b6825d42db909a1474f0ffbd6328429b7a32 \ No newline at end of file diff --git a/client/rest/licenses/httpcore-nio-LICENSE.txt b/client/rest/licenses/httpcore-nio-LICENSE.txt deleted file mode 100644 index d645695673349..0000000000000 --- a/client/rest/licenses/httpcore-nio-LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/client/rest/licenses/httpcore5-5.1.4.jar.sha1 b/client/rest/licenses/httpcore5-5.1.4.jar.sha1 new file mode 100644 index 0000000000000..c8981fb933736 --- /dev/null +++ b/client/rest/licenses/httpcore5-5.1.4.jar.sha1 @@ -0,0 +1 @@ +92538a62a4aacf96c9ea8992346a453e83da85fc \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-LICENSE.txt b/client/rest/licenses/httpcore5-LICENSE.txt new file mode 100644 index 0000000000000..32f01eda18fe9 --- /dev/null +++ b/client/rest/licenses/httpcore5-LICENSE.txt @@ -0,0 +1,558 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +========================================================================= + +This project includes Public Suffix List copied from + +licensed under the terms of the Mozilla Public License, v. 2.0 + +Full license text: + +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/client/rest/licenses/httpcore-nio-NOTICE.txt b/client/rest/licenses/httpcore5-NOTICE.txt similarity index 56% rename from client/rest/licenses/httpcore-nio-NOTICE.txt rename to client/rest/licenses/httpcore5-NOTICE.txt index a2e17bb60009f..afee7c6e6880b 100644 --- a/client/rest/licenses/httpcore-nio-NOTICE.txt +++ b/client/rest/licenses/httpcore5-NOTICE.txt @@ -1,8 +1,6 @@ - -Apache HttpCore NIO -Copyright 2005-2016 The Apache Software Foundation +Apache HttpComponents Client +Copyright 1999-2022 The Apache Software Foundation This product includes software developed at The Apache Software Foundation (http://www.apache.org/). - diff --git a/client/rest/licenses/httpcore5-h2-5.1.4.jar.sha1 b/client/rest/licenses/httpcore5-h2-5.1.4.jar.sha1 new file mode 100644 index 0000000000000..2369ee9dfb7e1 --- /dev/null +++ b/client/rest/licenses/httpcore5-h2-5.1.4.jar.sha1 @@ -0,0 +1 @@ +04de79e0bb34d65c86e4d163ae2f45d53746b70d \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-h2-LICENSE.txt b/client/rest/licenses/httpcore5-h2-LICENSE.txt new file mode 100644 index 0000000000000..32f01eda18fe9 --- /dev/null +++ b/client/rest/licenses/httpcore5-h2-LICENSE.txt @@ -0,0 +1,558 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +========================================================================= + +This project includes Public Suffix List copied from + +licensed under the terms of the Mozilla Public License, v. 2.0 + +Full license text: + +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/client/rest/licenses/httpcore-NOTICE.txt b/client/rest/licenses/httpcore5-h2-NOTICE.txt similarity index 55% rename from client/rest/licenses/httpcore-NOTICE.txt rename to client/rest/licenses/httpcore5-h2-NOTICE.txt index 013448d3e9561..afee7c6e6880b 100644 --- a/client/rest/licenses/httpcore-NOTICE.txt +++ b/client/rest/licenses/httpcore5-h2-NOTICE.txt @@ -1,5 +1,6 @@ -Apache HttpComponents Core -Copyright 2005-2016 The Apache Software Foundation +Apache HttpComponents Client +Copyright 1999-2022 The Apache Software Foundation This product includes software developed at The Apache Software Foundation (http://www.apache.org/). + diff --git a/client/rest/licenses/slf4j-api-1.7.36.jar.sha1 b/client/rest/licenses/slf4j-api-1.7.36.jar.sha1 new file mode 100644 index 0000000000000..77b9917528382 --- /dev/null +++ b/client/rest/licenses/slf4j-api-1.7.36.jar.sha1 @@ -0,0 +1 @@ +6c62681a2f655b49963a5983b8b0950a6120ae14 \ No newline at end of file diff --git a/client/rest/licenses/slf4j-api-LICENSE.txt b/client/rest/licenses/slf4j-api-LICENSE.txt new file mode 100644 index 0000000000000..8fda22f4d72f6 --- /dev/null +++ b/client/rest/licenses/slf4j-api-LICENSE.txt @@ -0,0 +1,21 @@ +Copyright (c) 2004-2014 QOS.ch +All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/client/rest/licenses/slf4j-api-NOTICE.txt b/client/rest/licenses/slf4j-api-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/client/rest/src/main/java/org/opensearch/client/Cancellable.java b/client/rest/src/main/java/org/opensearch/client/Cancellable.java index 4bfc0704227aa..56e31a3742f35 100644 --- a/client/rest/src/main/java/org/opensearch/client/Cancellable.java +++ b/client/rest/src/main/java/org/opensearch/client/Cancellable.java @@ -31,24 +31,26 @@ package org.opensearch.client; -import org.apache.http.client.methods.AbstractExecutionAwareRequest; -import org.apache.http.client.methods.HttpRequestBase; +import org.apache.hc.client5.http.classic.methods.HttpUriRequestBase; +import org.apache.hc.core5.concurrent.CancellableDependency; import java.util.concurrent.CancellationException; /** * Represents an operation that can be cancelled. * Returned when executing async requests through {@link RestClient#performRequestAsync(Request, ResponseListener)}, so that the request - * can be cancelled if needed. Cancelling a request will result in calling {@link AbstractExecutionAwareRequest#abort()} on the underlying + * can be cancelled if needed. Cancelling a request will result in calling {@link CancellableDependency#cancel()} on the underlying * request object, which will in turn cancel its corresponding {@link java.util.concurrent.Future}. * Note that cancelling a request does not automatically translate to aborting its execution on the server side, which needs to be * specifically implemented in each API. */ -public class Cancellable { +public class Cancellable implements org.apache.hc.core5.concurrent.Cancellable { static final Cancellable NO_OP = new Cancellable(null) { @Override - public void cancel() {} + public boolean cancel() { + throw new UnsupportedOperationException(); + } @Override void runIfNotCancelled(Runnable runnable) { @@ -56,13 +58,13 @@ void runIfNotCancelled(Runnable runnable) { } }; - static Cancellable fromRequest(HttpRequestBase httpRequest) { + static Cancellable fromRequest(CancellableDependency httpRequest) { return new Cancellable(httpRequest); } - private final HttpRequestBase httpRequest; + private final CancellableDependency httpRequest; - private Cancellable(HttpRequestBase httpRequest) { + private Cancellable(CancellableDependency httpRequest) { this.httpRequest = httpRequest; } @@ -70,15 +72,15 @@ private Cancellable(HttpRequestBase httpRequest) { * Cancels the on-going request that is associated with the current instance of {@link Cancellable}. * */ - public synchronized void cancel() { - this.httpRequest.abort(); + public synchronized boolean cancel() { + return this.httpRequest.cancel(); } /** * Executes some arbitrary code iff the on-going request has not been cancelled, otherwise throws {@link CancellationException}. * This is needed to guarantee that cancelling a request works correctly even in case {@link #cancel()} is called between different - * attempts of the same request. The low-level client reuses the same instance of the {@link AbstractExecutionAwareRequest} by calling - * {@link AbstractExecutionAwareRequest#reset()} between subsequent retries. The {@link #cancel()} method can be called at anytime, + * attempts of the same request. The low-level client reuses the same instance of the {@link CancellableDependency} by calling + * {@link HttpUriRequestBase#reset()} between subsequent retries. The {@link #cancel()} method can be called at anytime, * and we need to handle the case where it gets called while there is no request being executed as one attempt may have failed and * the subsequent attempt has not been started yet. * If the request has already been cancelled we don't go ahead with the next attempt, and artificially raise the @@ -87,7 +89,7 @@ public synchronized void cancel() { * when there is no future to cancel, which would make cancelling the request a no-op. */ synchronized void runIfNotCancelled(Runnable runnable) { - if (this.httpRequest.isAborted()) { + if (this.httpRequest.isCancelled()) { throw newCancellationException(); } runnable.run(); diff --git a/client/rest/src/main/java/org/opensearch/client/HasAttributeNodeSelector.java b/client/rest/src/main/java/org/opensearch/client/HasAttributeNodeSelector.java index e6005c207ec93..0a54dbaf30364 100644 --- a/client/rest/src/main/java/org/opensearch/client/HasAttributeNodeSelector.java +++ b/client/rest/src/main/java/org/opensearch/client/HasAttributeNodeSelector.java @@ -57,6 +57,10 @@ public HasAttributeNodeSelector(String key, String value) { this.value = value; } + /** + * Select the {@link Node}s to which to send requests. + * @param nodes the {@link Node}s targeted for the sending requests + */ @Override public void select(Iterable nodes) { Iterator itr = nodes.iterator(); @@ -70,6 +74,10 @@ public void select(Iterable nodes) { } } + /** + * Compare two node selectors for equality + * @param o node selector instance to compare with + */ @Override public boolean equals(Object o) { if (this == o) { @@ -82,11 +90,17 @@ public boolean equals(Object o) { return Objects.equals(key, that.key) && Objects.equals(value, that.value); } + /** + * Calculate the hash code of the node selector + */ @Override public int hashCode() { return Objects.hash(key, value); } + /** + * Convert this node selector to string representation + */ @Override public String toString() { return key + "=" + value; diff --git a/client/rest/src/main/java/org/opensearch/client/HeapBufferedAsyncResponseConsumer.java b/client/rest/src/main/java/org/opensearch/client/HeapBufferedAsyncResponseConsumer.java deleted file mode 100644 index e2993e48a5a05..0000000000000 --- a/client/rest/src/main/java/org/opensearch/client/HeapBufferedAsyncResponseConsumer.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.client; - -import org.apache.http.ContentTooLongException; -import org.apache.http.HttpEntity; -import org.apache.http.HttpException; -import org.apache.http.HttpResponse; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.ContentDecoder; -import org.apache.http.nio.IOControl; -import org.apache.http.nio.entity.ContentBufferEntity; -import org.apache.http.nio.protocol.AbstractAsyncResponseConsumer; -import org.apache.http.nio.util.ByteBufferAllocator; -import org.apache.http.nio.util.HeapByteBufferAllocator; -import org.apache.http.nio.util.SimpleInputBuffer; -import org.apache.http.protocol.HttpContext; - -import java.io.IOException; - -/** - * Default implementation of {@link org.apache.http.nio.protocol.HttpAsyncResponseConsumer}. Buffers the whole - * response content in heap memory, meaning that the size of the buffer is equal to the content-length of the response. - * Limits the size of responses that can be read based on a configurable argument. Throws an exception in case the entity is longer - * than the configured buffer limit. - */ -public class HeapBufferedAsyncResponseConsumer extends AbstractAsyncResponseConsumer { - - private final int bufferLimitBytes; - private volatile HttpResponse response; - private volatile SimpleInputBuffer buf; - - /** - * Creates a new instance of this consumer with the provided buffer limit. - * - * @param bufferLimit the buffer limit. Must be greater than 0. - * @throws IllegalArgumentException if {@code bufferLimit} is less than or equal to 0. - */ - public HeapBufferedAsyncResponseConsumer(int bufferLimit) { - if (bufferLimit <= 0) { - throw new IllegalArgumentException("bufferLimit must be greater than 0"); - } - this.bufferLimitBytes = bufferLimit; - } - - /** - * Get the limit of the buffer. - */ - public int getBufferLimit() { - return bufferLimitBytes; - } - - @Override - protected void onResponseReceived(HttpResponse response) throws HttpException, IOException { - this.response = response; - } - - @Override - protected void onEntityEnclosed(HttpEntity entity, ContentType contentType) throws IOException { - long len = entity.getContentLength(); - if (len > bufferLimitBytes) { - throw new ContentTooLongException( - "entity content is too long [" + len + "] for the configured buffer limit [" + bufferLimitBytes + "]" - ); - } - if (len < 0) { - len = 4096; - } - this.buf = new SimpleInputBuffer((int) len, getByteBufferAllocator()); - this.response.setEntity(new ContentBufferEntity(entity, this.buf)); - } - - /** - * Returns the instance of {@link ByteBufferAllocator} to use for content buffering. - * Allows to plug in any {@link ByteBufferAllocator} implementation. - */ - protected ByteBufferAllocator getByteBufferAllocator() { - return HeapByteBufferAllocator.INSTANCE; - } - - @Override - protected void onContentReceived(ContentDecoder decoder, IOControl ioctrl) throws IOException { - this.buf.consumeContent(decoder); - } - - @Override - protected HttpResponse buildResult(HttpContext context) throws Exception { - return response; - } - - @Override - protected void releaseResources() { - response = null; - } -} diff --git a/client/rest/src/main/java/org/opensearch/client/HttpAsyncResponseConsumerFactory.java b/client/rest/src/main/java/org/opensearch/client/HttpAsyncResponseConsumerFactory.java index 7a56e03a1162c..6420a615484d0 100644 --- a/client/rest/src/main/java/org/opensearch/client/HttpAsyncResponseConsumerFactory.java +++ b/client/rest/src/main/java/org/opensearch/client/HttpAsyncResponseConsumerFactory.java @@ -32,30 +32,31 @@ package org.opensearch.client; -import org.apache.http.HttpResponse; -import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.nio.AsyncResponseConsumer; +import org.opensearch.client.nio.HeapBufferedAsyncResponseConsumer; import static org.opensearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory.DEFAULT_BUFFER_LIMIT; /** - * Factory used to create instances of {@link HttpAsyncResponseConsumer}. Each request retry needs its own instance of the + * Factory used to create instances of {@link AsyncResponseConsumer}. Each request retry needs its own instance of the * consumer object. Users can implement this interface and pass their own instance to the specialized * performRequest methods that accept an {@link HttpAsyncResponseConsumerFactory} instance as argument. */ public interface HttpAsyncResponseConsumerFactory { /** - * Creates the default type of {@link HttpAsyncResponseConsumer}, based on heap buffering with a buffer limit of 100MB. + * Creates the default type of {@link AsyncResponseConsumer}, based on heap buffering with a buffer limit of 100MB. */ HttpAsyncResponseConsumerFactory DEFAULT = new HeapBufferedResponseConsumerFactory(DEFAULT_BUFFER_LIMIT); /** - * Creates the {@link HttpAsyncResponseConsumer}, called once per request attempt. + * Creates the {@link AsyncResponseConsumer}, called once per request attempt. */ - HttpAsyncResponseConsumer createHttpAsyncResponseConsumer(); + AsyncResponseConsumer createHttpAsyncResponseConsumer(); /** - * Default factory used to create instances of {@link HttpAsyncResponseConsumer}. + * Default factory used to create instances of {@link AsyncResponseConsumer}. * Creates one instance of {@link HeapBufferedAsyncResponseConsumer} for each request attempt, with a configurable * buffer limit which defaults to 100MB. */ @@ -75,8 +76,11 @@ public HeapBufferedResponseConsumerFactory(int bufferLimitBytes) { this.bufferLimit = bufferLimitBytes; } + /** + * Creates the {@link AsyncResponseConsumer}, called once per request attempt. + */ @Override - public HttpAsyncResponseConsumer createHttpAsyncResponseConsumer() { + public AsyncResponseConsumer createHttpAsyncResponseConsumer() { return new HeapBufferedAsyncResponseConsumer(bufferLimit); } } diff --git a/client/rest/src/main/java/org/opensearch/client/HttpDeleteWithEntity.java b/client/rest/src/main/java/org/opensearch/client/HttpDeleteWithEntity.java deleted file mode 100644 index 52618cd7edc75..0000000000000 --- a/client/rest/src/main/java/org/opensearch/client/HttpDeleteWithEntity.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.client; - -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; - -import java.net.URI; - -/** - * Allows to send DELETE requests providing a body (not supported out of the box) - */ -final class HttpDeleteWithEntity extends HttpEntityEnclosingRequestBase { - - static final String METHOD_NAME = HttpDelete.METHOD_NAME; - - HttpDeleteWithEntity(final URI uri) { - setURI(uri); - } - - @Override - public String getMethod() { - return METHOD_NAME; - } -} diff --git a/client/rest/src/main/java/org/opensearch/client/HttpGetWithEntity.java b/client/rest/src/main/java/org/opensearch/client/HttpGetWithEntity.java deleted file mode 100644 index 8ab639433f6be..0000000000000 --- a/client/rest/src/main/java/org/opensearch/client/HttpGetWithEntity.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.client; - -import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; -import org.apache.http.client.methods.HttpGet; - -import java.net.URI; - -/** - * Allows to send GET requests providing a body (not supported out of the box) - */ -final class HttpGetWithEntity extends HttpEntityEnclosingRequestBase { - - static final String METHOD_NAME = HttpGet.METHOD_NAME; - - HttpGetWithEntity(final URI uri) { - setURI(uri); - } - - @Override - public String getMethod() { - return METHOD_NAME; - } -} diff --git a/client/rest/src/main/java/org/opensearch/client/Node.java b/client/rest/src/main/java/org/opensearch/client/Node.java index 2fa6605d57ad2..8fe5dcfa00db0 100644 --- a/client/rest/src/main/java/org/opensearch/client/Node.java +++ b/client/rest/src/main/java/org/opensearch/client/Node.java @@ -32,7 +32,7 @@ package org.opensearch.client; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import java.util.List; import java.util.Map; @@ -152,6 +152,9 @@ public Map> getAttributes() { return attributes; } + /** + * Convert node to string representation + */ @Override public String toString() { StringBuilder b = new StringBuilder(); @@ -174,6 +177,10 @@ public String toString() { return b.append(']').toString(); } + /** + * Compare two nodes for equality + * @param obj node instance to compare with + */ @Override public boolean equals(Object obj) { if (obj == null || obj.getClass() != getClass()) { @@ -188,6 +195,9 @@ public boolean equals(Object obj) { && Objects.equals(attributes, other.attributes); } + /** + * Calculate the hash code of the node + */ @Override public int hashCode() { return Objects.hash(host, boundHosts, name, version, roles, attributes); @@ -246,11 +256,18 @@ public boolean isSearch() { return roles.contains("search"); } + /** + * Convert roles to string representation + */ @Override public String toString() { return String.join(",", roles); } + /** + * Compare two roles for equality + * @param obj roles instance to compare with + */ @Override public boolean equals(Object obj) { if (obj == null || obj.getClass() != getClass()) { @@ -260,6 +277,9 @@ public boolean equals(Object obj) { return roles.equals(other.roles); } + /** + * Calculate the hash code of the roles + */ @Override public int hashCode() { return roles.hashCode(); diff --git a/client/rest/src/main/java/org/opensearch/client/PersistentCredentialsAuthenticationStrategy.java b/client/rest/src/main/java/org/opensearch/client/PersistentCredentialsAuthenticationStrategy.java deleted file mode 100644 index 8a35d6eb607ca..0000000000000 --- a/client/rest/src/main/java/org/opensearch/client/PersistentCredentialsAuthenticationStrategy.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - * - * - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.client; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.http.HttpHost; -import org.apache.http.auth.AuthScheme; -import org.apache.http.impl.client.TargetAuthenticationStrategy; -import org.apache.http.protocol.HttpContext; - -/** - * An {@link org.apache.http.client.AuthenticationStrategy} implementation that does not perform - * any special handling if authentication fails. - * The default handler in Apache HTTP client mimics standard browser behaviour of clearing authentication - * credentials if it receives a 401 response from the server. While this can be useful for browser, it is - * rarely the desired behaviour with the OpenSearch REST API. - * If the code using the REST client has configured credentials for the REST API, then we can and should - * assume that this is intentional, and those credentials represent the best possible authentication - * mechanism to the OpenSearch node. - * If we receive a 401 status, a probably cause is that the authentication mechanism in place was unable - * to perform the requisite password checks (the node has not yet recovered its state, or an external - * authentication provider was unavailable). - * If this occurs, then the desired behaviour is for the Rest client to retry with the same credentials - * (rather than trying with no credentials, or expecting the calling code to provide alternate credentials). - */ -final class PersistentCredentialsAuthenticationStrategy extends TargetAuthenticationStrategy { - - private final Log logger = LogFactory.getLog(PersistentCredentialsAuthenticationStrategy.class); - - @Override - public void authFailed(HttpHost host, AuthScheme authScheme, HttpContext context) { - if (logger.isDebugEnabled()) { - logger.debug( - "Authentication to " - + host - + " failed (scheme: " - + authScheme.getSchemeName() - + "). Preserving credentials for next request" - ); - } - // Do nothing. - // The superclass implementation of method will clear the credentials from the cache, but we don't - } -} diff --git a/client/rest/src/main/java/org/opensearch/client/PreferHasAttributeNodeSelector.java b/client/rest/src/main/java/org/opensearch/client/PreferHasAttributeNodeSelector.java index ddec1da068bf0..7cf7490692650 100644 --- a/client/rest/src/main/java/org/opensearch/client/PreferHasAttributeNodeSelector.java +++ b/client/rest/src/main/java/org/opensearch/client/PreferHasAttributeNodeSelector.java @@ -58,6 +58,10 @@ public PreferHasAttributeNodeSelector(String key, String value) { this.value = value; } + /** + * Select the {@link Node}s to which to send requests. + * @param nodes the {@link Node}s targeted for the sending requests + */ @Override public void select(Iterable nodes) { boolean foundAtLeastOne = false; @@ -99,6 +103,10 @@ public void select(Iterable nodes) { } } + /** + * Compare two node selectors for equality + * @param o node selector instance to compare with + */ @Override public boolean equals(Object o) { if (this == o) { @@ -111,11 +119,17 @@ public boolean equals(Object o) { return Objects.equals(key, that.key) && Objects.equals(value, that.value); } + /** + * Calculate the hash code of the node selector + */ @Override public int hashCode() { return Objects.hash(key, value); } + /** + * Convert this node selector to string representation + */ @Override public String toString() { return key + "=" + value; diff --git a/client/rest/src/main/java/org/opensearch/client/Request.java b/client/rest/src/main/java/org/opensearch/client/Request.java index df81ca7f717ae..441b01b0891ad 100644 --- a/client/rest/src/main/java/org/opensearch/client/Request.java +++ b/client/rest/src/main/java/org/opensearch/client/Request.java @@ -32,9 +32,9 @@ package org.opensearch.client; -import org.apache.http.HttpEntity; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.entity.NStringEntity; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; import java.util.HashMap; import java.util.Map; @@ -133,7 +133,7 @@ public void setEntity(HttpEntity entity) { * @param entity JSON string to be set as the entity body of the request. */ public void setJsonEntity(String entity) { - setEntity(entity == null ? null : new NStringEntity(entity, ContentType.APPLICATION_JSON)); + setEntity(entity == null ? null : new StringEntity(entity, ContentType.APPLICATION_JSON)); } /** @@ -176,6 +176,9 @@ public RequestOptions getOptions() { return options; } + /** + * Convert request to string representation + */ @Override public String toString() { StringBuilder b = new StringBuilder(); @@ -192,6 +195,10 @@ public String toString() { return b.append('}').toString(); } + /** + * Compare two requests for equality + * @param obj request instance to compare with + */ @Override public boolean equals(Object obj) { if (obj == null || (obj.getClass() != getClass())) { @@ -209,6 +216,9 @@ public boolean equals(Object obj) { && options.equals(other.options); } + /** + * Calculate the hash code of the request + */ @Override public int hashCode() { return Objects.hash(method, endpoint, parameters, entity, options); diff --git a/client/rest/src/main/java/org/opensearch/client/RequestLogger.java b/client/rest/src/main/java/org/opensearch/client/RequestLogger.java index 297885fa3131b..0f2e0e6da834d 100644 --- a/client/rest/src/main/java/org/opensearch/client/RequestLogger.java +++ b/client/rest/src/main/java/org/opensearch/client/RequestLogger.java @@ -34,16 +34,16 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.http.Header; -import org.apache.http.HttpEntity; -import org.apache.http.HttpEntityEnclosingRequest; -import org.apache.http.HttpHost; -import org.apache.http.HttpResponse; -import org.apache.http.RequestLine; -import org.apache.http.client.methods.HttpUriRequest; -import org.apache.http.entity.BufferedHttpEntity; -import org.apache.http.entity.ContentType; -import org.apache.http.util.EntityUtils; +import org.apache.hc.client5.http.classic.methods.HttpUriRequest; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.BufferedHttpEntity; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.apache.hc.core5.http.message.StatusLine; import java.io.BufferedReader; import java.io.IOException; @@ -66,17 +66,10 @@ private RequestLogger() {} /** * Logs a request that yielded a response */ - static void logResponse(Log logger, HttpUriRequest request, HttpHost host, HttpResponse httpResponse) { + static void logResponse(Log logger, HttpUriRequest request, HttpHost host, ClassicHttpResponse httpResponse) { if (logger.isDebugEnabled()) { logger.debug( - "request [" - + request.getMethod() - + " " - + host - + getUri(request.getRequestLine()) - + "] returned [" - + httpResponse.getStatusLine() - + "]" + "request [" + request.getMethod() + " " + host + getUri(request) + "] returned [" + new StatusLine(httpResponse) + "]" ); } if (logger.isWarnEnabled()) { @@ -109,7 +102,7 @@ static void logResponse(Log logger, HttpUriRequest request, HttpHost host, HttpR */ static void logFailedRequest(Log logger, HttpUriRequest request, Node node, Exception e) { if (logger.isDebugEnabled()) { - logger.debug("request [" + request.getMethod() + " " + node.getHost() + getUri(request.getRequestLine()) + "] failed", e); + logger.debug("request [" + request.getMethod() + " " + node.getHost() + getUri(request) + "] failed", e); } if (tracer.isTraceEnabled()) { String traceRequest; @@ -127,7 +120,7 @@ static String buildWarningMessage(HttpUriRequest request, HttpHost host, Header[ StringBuilder message = new StringBuilder("request [").append(request.getMethod()) .append(" ") .append(host) - .append(getUri(request.getRequestLine())) + .append(getUri(request)) .append("] returned ") .append(warnings.length) .append(" warnings: "); @@ -144,17 +137,18 @@ static String buildWarningMessage(HttpUriRequest request, HttpHost host, Header[ * Creates curl output for given request */ static String buildTraceRequest(HttpUriRequest request, HttpHost host) throws IOException { - String requestLine = "curl -iX " + request.getMethod() + " '" + host + getUri(request.getRequestLine()) + "'"; - if (request instanceof HttpEntityEnclosingRequest) { - HttpEntityEnclosingRequest enclosingRequest = (HttpEntityEnclosingRequest) request; - if (enclosingRequest.getEntity() != null) { - requestLine += " -d '"; - HttpEntity entity = enclosingRequest.getEntity(); - if (entity.isRepeatable() == false) { - entity = new BufferedHttpEntity(enclosingRequest.getEntity()); - enclosingRequest.setEntity(entity); - } + String requestLine = "curl -iX " + request.getMethod() + " '" + host + getUri(request) + "'"; + if (request.getEntity() != null) { + requestLine += " -d '"; + HttpEntity entity = request.getEntity(); + if (entity.isRepeatable() == false) { + entity = new BufferedHttpEntity(request.getEntity()); + request.setEntity(entity); + } + try { requestLine += EntityUtils.toString(entity, StandardCharsets.UTF_8) + "'"; + } catch (final ParseException ex) { + throw new IOException(ex); } } return requestLine; @@ -163,10 +157,10 @@ static String buildTraceRequest(HttpUriRequest request, HttpHost host) throws IO /** * Creates curl output for given response */ - static String buildTraceResponse(HttpResponse httpResponse) throws IOException { + static String buildTraceResponse(ClassicHttpResponse httpResponse) throws IOException { StringBuilder responseLine = new StringBuilder(); - responseLine.append("# ").append(httpResponse.getStatusLine()); - for (Header header : httpResponse.getAllHeaders()) { + responseLine.append("# ").append(new StatusLine(httpResponse)); + for (Header header : httpResponse.getHeaders()) { responseLine.append("\n# ").append(header.getName()).append(": ").append(header.getValue()); } responseLine.append("\n#"); @@ -176,7 +170,7 @@ static String buildTraceResponse(HttpResponse httpResponse) throws IOException { entity = new BufferedHttpEntity(entity); } httpResponse.setEntity(entity); - ContentType contentType = ContentType.get(entity); + ContentType contentType = ContentType.parse(entity.getContentType()); Charset charset = StandardCharsets.UTF_8; if (contentType != null && contentType.getCharset() != null) { charset = contentType.getCharset(); @@ -191,10 +185,14 @@ static String buildTraceResponse(HttpResponse httpResponse) throws IOException { return responseLine.toString(); } - private static String getUri(RequestLine requestLine) { - if (requestLine.getUri().charAt(0) != '/') { - return "/" + requestLine.getUri(); + private static String getUri(HttpUriRequest request) { + final String uri = request.getRequestUri(); + if (uri == null) { + return "/"; + } else if (!uri.startsWith("/")) { + return "/" + uri; + } else { + return uri; } - return requestLine.getUri(); } } diff --git a/client/rest/src/main/java/org/opensearch/client/RequestOptions.java b/client/rest/src/main/java/org/opensearch/client/RequestOptions.java index 5390e303ff499..189d785faaf45 100644 --- a/client/rest/src/main/java/org/opensearch/client/RequestOptions.java +++ b/client/rest/src/main/java/org/opensearch/client/RequestOptions.java @@ -32,10 +32,10 @@ package org.opensearch.client; -import org.apache.http.Header; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.message.BasicHeader; -import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; +import org.apache.hc.client5.http.config.RequestConfig; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.message.BasicHeader; +import org.apache.hc.core5.http.nio.AsyncResponseConsumer; import org.opensearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory; import java.util.ArrayList; @@ -86,7 +86,7 @@ public List

getHeaders() { /** * The {@link HttpAsyncResponseConsumerFactory} used to create one - * {@link HttpAsyncResponseConsumer} callback per retry. Controls how the + * {@link AsyncResponseConsumer} callback per retry. Controls how the * response body gets streamed from a non-blocking HTTP connection on the * client side. */ @@ -124,6 +124,9 @@ public RequestConfig getRequestConfig() { return requestConfig; } + /** + * Convert request options to string representation + */ @Override public String toString() { StringBuilder b = new StringBuilder(); @@ -152,6 +155,10 @@ public String toString() { return b.append('}').toString(); } + /** + * Compare two request options for equality + * @param obj request options instance to compare with + */ @Override public boolean equals(Object obj) { if (obj == null || (obj.getClass() != getClass())) { @@ -167,6 +174,9 @@ public boolean equals(Object obj) { && Objects.equals(warningsHandler, other.warningsHandler); } + /** + * Calculate the hash code of the request options + */ @Override public int hashCode() { return Objects.hash(headers, httpAsyncResponseConsumerFactory, warningsHandler); @@ -218,11 +228,11 @@ public Builder addHeader(String name, String value) { /** * Set the {@link HttpAsyncResponseConsumerFactory} used to create one - * {@link HttpAsyncResponseConsumer} callback per retry. Controls how the + * {@link AsyncResponseConsumer} callback per retry. Controls how the * response body gets streamed from a non-blocking HTTP connection on the * client side. * - * @param httpAsyncResponseConsumerFactory factory for creating {@link HttpAsyncResponseConsumer}. + * @param httpAsyncResponseConsumerFactory factory for creating {@link AsyncResponseConsumer}. * @throws NullPointerException if {@code httpAsyncResponseConsumerFactory} is null. */ public void setHttpAsyncResponseConsumerFactory(HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory) { diff --git a/client/rest/src/main/java/org/opensearch/client/Response.java b/client/rest/src/main/java/org/opensearch/client/Response.java index d380607b7df9e..c758826b776ba 100644 --- a/client/rest/src/main/java/org/opensearch/client/Response.java +++ b/client/rest/src/main/java/org/opensearch/client/Response.java @@ -32,12 +32,13 @@ package org.opensearch.client; -import org.apache.http.Header; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; -import org.apache.http.HttpResponse; -import org.apache.http.RequestLine; -import org.apache.http.StatusLine; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.HttpResponse; +import org.apache.hc.core5.http.message.RequestLine; +import org.apache.hc.core5.http.message.StatusLine; import java.util.ArrayList; import java.util.List; @@ -53,9 +54,9 @@ public class Response { private final RequestLine requestLine; private final HttpHost host; - private final HttpResponse response; + private final ClassicHttpResponse response; - Response(RequestLine requestLine, HttpHost host, HttpResponse response) { + Response(RequestLine requestLine, HttpHost host, ClassicHttpResponse response) { Objects.requireNonNull(requestLine, "requestLine cannot be null"); Objects.requireNonNull(host, "host cannot be null"); Objects.requireNonNull(response, "response cannot be null"); @@ -82,14 +83,14 @@ public HttpHost getHost() { * Returns the status line of the current response */ public StatusLine getStatusLine() { - return response.getStatusLine(); + return new StatusLine(response); } /** * Returns all the response headers */ public Header[] getHeaders() { - return response.getAllHeaders(); + return response.getHeaders(); } /** @@ -199,12 +200,15 @@ public boolean hasWarnings() { return warnings != null && warnings.length > 0; } - HttpResponse getHttpResponse() { + ClassicHttpResponse getHttpResponse() { return response; } + /** + * Convert response to string representation + */ @Override public String toString() { - return "Response{" + "requestLine=" + requestLine + ", host=" + host + ", response=" + response.getStatusLine() + '}'; + return "Response{" + "requestLine=" + requestLine + ", host=" + host + ", response=" + getStatusLine() + '}'; } } diff --git a/client/rest/src/main/java/org/opensearch/client/ResponseException.java b/client/rest/src/main/java/org/opensearch/client/ResponseException.java index 8104c32c422e5..ed816c7e1177e 100644 --- a/client/rest/src/main/java/org/opensearch/client/ResponseException.java +++ b/client/rest/src/main/java/org/opensearch/client/ResponseException.java @@ -32,9 +32,10 @@ package org.opensearch.client; -import org.apache.http.HttpEntity; -import org.apache.http.entity.BufferedHttpEntity; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.BufferedHttpEntity; +import org.apache.hc.core5.http.io.entity.EntityUtils; import java.io.IOException; import java.util.Locale; @@ -77,7 +78,11 @@ static String buildMessage(Response response) throws IOException { entity = new BufferedHttpEntity(entity); response.getHttpResponse().setEntity(entity); } - message += "\n" + EntityUtils.toString(entity); + try { + message += "\n" + EntityUtils.toString(entity); + } catch (final ParseException ex) { + throw new IOException(ex); + } } return message; } diff --git a/client/rest/src/main/java/org/opensearch/client/RestClient.java b/client/rest/src/main/java/org/opensearch/client/RestClient.java index 92aed2c8fb179..9d140a145b004 100644 --- a/client/rest/src/main/java/org/opensearch/client/RestClient.java +++ b/client/rest/src/main/java/org/opensearch/client/RestClient.java @@ -33,36 +33,43 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.http.ConnectionClosedException; -import org.apache.http.Header; -import org.apache.http.HttpEntity; -import org.apache.http.entity.HttpEntityWrapper; -import org.apache.http.HttpHost; -import org.apache.http.HttpRequest; -import org.apache.http.HttpResponse; -import org.apache.http.client.AuthCache; -import org.apache.http.client.ClientProtocolException; -import org.apache.http.client.entity.GzipCompressingEntity; -import org.apache.http.client.entity.GzipDecompressingEntity; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpOptions; -import org.apache.http.client.methods.HttpPatch; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.client.methods.HttpRequestBase; -import org.apache.http.client.methods.HttpTrace; -import org.apache.http.client.protocol.HttpClientContext; -import org.apache.http.client.utils.URIBuilder; -import org.apache.http.concurrent.FutureCallback; -import org.apache.http.conn.ConnectTimeoutException; -import org.apache.http.impl.auth.BasicScheme; -import org.apache.http.impl.client.BasicAuthCache; -import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; -import org.apache.http.nio.client.methods.HttpAsyncMethods; -import org.apache.http.nio.protocol.HttpAsyncRequestProducer; -import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ConnectionClosedException; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.HttpEntityWrapper; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.HttpRequest; +import org.apache.hc.client5.http.auth.AuthCache; +import org.apache.hc.client5.http.auth.AuthScheme; +import org.apache.hc.client5.http.auth.AuthScope; +import org.apache.hc.client5.http.auth.Credentials; +import org.apache.hc.client5.http.auth.CredentialsProvider; +import org.apache.hc.client5.http.ConnectTimeoutException; +import org.apache.hc.client5.http.ClientProtocolException; +import org.apache.hc.client5.http.entity.GzipDecompressingEntity; +import org.apache.hc.client5.http.config.RequestConfig; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.client5.http.classic.methods.HttpOptions; +import org.apache.hc.client5.http.classic.methods.HttpPatch; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpUriRequestBase; +import org.apache.hc.client5.http.classic.methods.HttpTrace; +import org.apache.hc.client5.http.protocol.HttpClientContext; +import org.apache.hc.core5.http.message.RequestLine; +import org.apache.hc.core5.http.nio.AsyncRequestProducer; +import org.apache.hc.core5.http.nio.AsyncResponseConsumer; +import org.apache.hc.core5.net.URIBuilder; +import org.apache.hc.core5.reactor.IOReactorStatus; +import org.apache.hc.core5.util.Args; +import org.opensearch.client.http.HttpUriRequestProducer; +import org.apache.hc.core5.concurrent.FutureCallback; +import org.apache.hc.client5.http.impl.auth.BasicScheme; +import org.apache.hc.client5.http.impl.auth.BasicAuthCache; +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; import javax.net.ssl.SSLHandshakeException; import java.io.ByteArrayInputStream; @@ -70,6 +77,7 @@ import java.io.Closeable; import java.io.IOException; import java.io.InputStream; +import java.io.OutputStream; import java.net.ConnectException; import java.net.SocketTimeoutException; import java.net.URI; @@ -92,6 +100,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import java.util.zip.GZIPOutputStream; @@ -218,7 +227,7 @@ public static RestClientBuilder builder(String cloudId) { } String url = decodedParts[1] + "." + domain; - return builder(new HttpHost(url, port, "https")); + return builder(new HttpHost("https", url, port)); } /** @@ -287,7 +296,7 @@ public List getNodes() { * @return client running status */ public boolean isRunning() { - return client.isRunning(); + return client.getStatus() == IOReactorStatus.ACTIVE; } /** @@ -323,7 +332,7 @@ public Response performRequest(Request request) throws IOException { private Response performRequest(final NodeTuple> nodeTuple, final InternalRequest request, Exception previousException) throws IOException { RequestContext context = request.createContextForNextAttempt(nodeTuple.nodes.next(), nodeTuple.authCache); - HttpResponse httpResponse; + ClassicHttpResponse httpResponse; try { httpResponse = client.execute(context.requestProducer, context.asyncResponseConsumer, context.context, null).get(); } catch (Exception e) { @@ -353,18 +362,18 @@ private Response performRequest(final NodeTuple> nodeTuple, final throw responseOrResponseException.responseException; } - private ResponseOrResponseException convertResponse(InternalRequest request, Node node, HttpResponse httpResponse) throws IOException { + private ResponseOrResponseException convertResponse(InternalRequest request, Node node, ClassicHttpResponse httpResponse) + throws IOException { RequestLogger.logResponse(logger, request.httpRequest, node.getHost(), httpResponse); - int statusCode = httpResponse.getStatusLine().getStatusCode(); + int statusCode = httpResponse.getCode(); Optional.ofNullable(httpResponse.getEntity()) .map(HttpEntity::getContentEncoding) - .map(Header::getValue) .filter("gzip"::equalsIgnoreCase) .map(gzipHeaderValue -> new GzipDecompressingEntity(httpResponse.getEntity())) .ifPresent(httpResponse::setEntity); - Response response = new Response(request.httpRequest.getRequestLine(), node.getHost(), httpResponse); + Response response = new Response(new RequestLine(request.httpRequest), node.getHost(), httpResponse); if (isSuccessfulResponse(statusCode) || request.ignoreErrorCodes.contains(response.getStatusLine().getStatusCode())) { onResponse(node); if (request.warningsHandler.warningsShouldFailRequest(response.getWarnings())) { @@ -418,47 +427,56 @@ private void performRequestAsync( ) { request.cancellable.runIfNotCancelled(() -> { final RequestContext context = request.createContextForNextAttempt(nodeTuple.nodes.next(), nodeTuple.authCache); - client.execute(context.requestProducer, context.asyncResponseConsumer, context.context, new FutureCallback() { - @Override - public void completed(HttpResponse httpResponse) { - try { - ResponseOrResponseException responseOrResponseException = convertResponse(request, context.node, httpResponse); - if (responseOrResponseException.responseException == null) { - listener.onSuccess(responseOrResponseException.response); - } else { + Future future = client.execute( + context.requestProducer, + context.asyncResponseConsumer, + context.context, + new FutureCallback() { + @Override + public void completed(ClassicHttpResponse httpResponse) { + try { + ResponseOrResponseException responseOrResponseException = convertResponse(request, context.node, httpResponse); + if (responseOrResponseException.responseException == null) { + listener.onSuccess(responseOrResponseException.response); + } else { + if (nodeTuple.nodes.hasNext()) { + listener.trackFailure(responseOrResponseException.responseException); + performRequestAsync(nodeTuple, request, listener); + } else { + listener.onDefinitiveFailure(responseOrResponseException.responseException); + } + } + } catch (Exception e) { + listener.onDefinitiveFailure(e); + } + } + + @Override + public void failed(Exception failure) { + try { + RequestLogger.logFailedRequest(logger, request.httpRequest, context.node, failure); + onFailure(context.node); if (nodeTuple.nodes.hasNext()) { - listener.trackFailure(responseOrResponseException.responseException); + listener.trackFailure(failure); performRequestAsync(nodeTuple, request, listener); } else { - listener.onDefinitiveFailure(responseOrResponseException.responseException); + listener.onDefinitiveFailure(failure); } + } catch (Exception e) { + listener.onDefinitiveFailure(e); } - } catch (Exception e) { - listener.onDefinitiveFailure(e); } - } - @Override - public void failed(Exception failure) { - try { - RequestLogger.logFailedRequest(logger, request.httpRequest, context.node, failure); - onFailure(context.node); - if (nodeTuple.nodes.hasNext()) { - listener.trackFailure(failure); - performRequestAsync(nodeTuple, request, listener); - } else { - listener.onDefinitiveFailure(failure); - } - } catch (Exception e) { - listener.onDefinitiveFailure(e); + @Override + public void cancelled() { + listener.onDefinitiveFailure(Cancellable.newCancellationException()); } } + ); - @Override - public void cancelled() { - listener.onDefinitiveFailure(Cancellable.newCancellationException()); - } - }); + if (future instanceof org.apache.hc.core5.concurrent.Cancellable) { + request.httpRequest.setDependency((org.apache.hc.core5.concurrent.Cancellable) future); + } }); } @@ -583,6 +601,9 @@ private void onFailure(Node node) { failureListener.onFailure(node); } + /** + * Close the underlying {@link CloseableHttpAsyncClient} instance + */ @Override public void close() throws IOException { client.close(); @@ -608,12 +629,12 @@ private static void addSuppressedException(Exception suppressedException, Except } } - private HttpRequestBase createHttpRequest(String method, URI uri, HttpEntity entity) { + private HttpUriRequestBase createHttpRequest(String method, URI uri, HttpEntity entity) { switch (method.toUpperCase(Locale.ROOT)) { - case HttpDeleteWithEntity.METHOD_NAME: - return addRequestBody(new HttpDeleteWithEntity(uri), entity); - case HttpGetWithEntity.METHOD_NAME: - return addRequestBody(new HttpGetWithEntity(uri), entity); + case HttpDelete.METHOD_NAME: + return addRequestBody(new HttpDelete(uri), entity); + case HttpGet.METHOD_NAME: + return addRequestBody(new HttpGet(uri), entity); case HttpHead.METHOD_NAME: return addRequestBody(new HttpHead(uri), entity); case HttpOptions.METHOD_NAME: @@ -633,22 +654,18 @@ private HttpRequestBase createHttpRequest(String method, URI uri, HttpEntity ent } } - private HttpRequestBase addRequestBody(HttpRequestBase httpRequest, HttpEntity entity) { + private HttpUriRequestBase addRequestBody(HttpUriRequestBase httpRequest, HttpEntity entity) { if (entity != null) { - if (httpRequest instanceof HttpEntityEnclosingRequestBase) { - if (compressionEnabled) { - if (chunkedEnabled.isPresent()) { - entity = new ContentCompressingEntity(entity, chunkedEnabled.get()); - } else { - entity = new ContentCompressingEntity(entity); - } - } else if (chunkedEnabled.isPresent()) { - entity = new ContentHttpEntity(entity, chunkedEnabled.get()); + if (compressionEnabled) { + if (chunkedEnabled.isPresent()) { + entity = new ContentCompressingEntity(entity, chunkedEnabled.get()); + } else { + entity = new ContentCompressingEntity(entity); } - ((HttpEntityEnclosingRequestBase) httpRequest).setEntity(entity); - } else { - throw new UnsupportedOperationException(httpRequest.getMethod() + " with body is not supported"); + } else if (chunkedEnabled.isPresent()) { + entity = new ContentHttpEntity(entity, chunkedEnabled.get()); } + httpRequest.setEntity(entity); } return httpRequest; } @@ -673,7 +690,12 @@ static URI buildUri(String pathPrefix, String path, Map params) for (Map.Entry param : params.entrySet()) { uriBuilder.addParameter(param.getKey(), param.getValue()); } - return uriBuilder.build(); + + // The Apache HttpClient 5.x **does not** encode URIs but Apache HttpClient 4.x does. It leads + // to the issues with Unicode characters (f.e. document IDs could contain Unicode characters) and + // weird characters are being passed instead. By using `toASCIIString()`, the URI is already created + // with proper encoding. + return new URI(uriBuilder.build().toASCIIString()); } catch (URISyntaxException e) { throw new IllegalArgumentException(e.getMessage(), e); } @@ -802,7 +824,7 @@ public void remove() { private class InternalRequest { private final Request request; private final Set ignoreErrorCodes; - private final HttpRequestBase httpRequest; + private final HttpUriRequestBase httpRequest; private final Cancellable cancellable; private final WarningsHandler warningsHandler; @@ -839,7 +861,7 @@ private void setHeaders(HttpRequest httpRequest, Collection
requestHeade } } - private void setRequestConfig(HttpRequestBase httpRequest, RequestConfig requestConfig) { + private void setRequestConfig(HttpUriRequestBase httpRequest, RequestConfig requestConfig) { if (requestConfig != null) { httpRequest.setConfig(requestConfig); } @@ -851,21 +873,81 @@ RequestContext createContextForNextAttempt(Node node, AuthCache authCache) { } } + /** + * The Apache HttpClient 5 adds "Authorization" header even if the credentials for basic authentication are not provided + * (effectively, username and password are 'null'). To workaround that, wrapping the AuthCache around current HttpClientContext + * and ensuring that the credentials are indeed provided for particular HttpHost, otherwise returning no authentication scheme + * even if it is present in the cache. + */ + private static class WrappingAuthCache implements AuthCache { + private final HttpClientContext context; + private final AuthCache delegate; + private final boolean usePersistentCredentials = true; + + public WrappingAuthCache(HttpClientContext context, AuthCache delegate) { + this.context = context; + this.delegate = delegate; + } + + @Override + public void put(HttpHost host, AuthScheme authScheme) { + delegate.put(host, authScheme); + } + + @Override + public AuthScheme get(HttpHost host) { + AuthScheme authScheme = delegate.get(host); + + if (authScheme != null) { + final CredentialsProvider credsProvider = context.getCredentialsProvider(); + if (credsProvider != null) { + final String schemeName = authScheme.getName(); + final AuthScope authScope = new AuthScope(host, null, schemeName); + final Credentials creds = credsProvider.getCredentials(authScope, context); + + // See please https://issues.apache.org/jira/browse/HTTPCLIENT-2203 + if (authScheme instanceof BasicScheme) { + ((BasicScheme) authScheme).initPreemptive(creds); + } + + if (creds == null) { + return null; + } + } + } + + return authScheme; + } + + @Override + public void remove(HttpHost host) { + if (!usePersistentCredentials) { + delegate.remove(host); + } + } + + @Override + public void clear() { + delegate.clear(); + } + + } + private static class RequestContext { private final Node node; - private final HttpAsyncRequestProducer requestProducer; - private final HttpAsyncResponseConsumer asyncResponseConsumer; + private final AsyncRequestProducer requestProducer; + private final AsyncResponseConsumer asyncResponseConsumer; private final HttpClientContext context; RequestContext(InternalRequest request, Node node, AuthCache authCache) { this.node = node; // we stream the request body if the entity allows for it - this.requestProducer = HttpAsyncMethods.create(node.getHost(), request.httpRequest); + this.requestProducer = HttpUriRequestProducer.create(request.httpRequest, node.getHost()); this.asyncResponseConsumer = request.request.getOptions() .getHttpAsyncResponseConsumerFactory() .createHttpAsyncResponseConsumer(); this.context = HttpClientContext.create(); - context.setAuthCache(authCache); + context.setAuthCache(new WrappingAuthCache(context, authCache)); } } @@ -966,7 +1048,9 @@ private static Exception extractAndWrapCause(Exception exception) { /** * A gzip compressing entity that also implements {@code getContent()}. */ - public static class ContentCompressingEntity extends GzipCompressingEntity { + public static class ContentCompressingEntity extends HttpEntityWrapper { + private static final String GZIP_CODEC = "gzip"; + private Optional chunkedEnabled; /** @@ -979,6 +1063,14 @@ public ContentCompressingEntity(HttpEntity entity) { this.chunkedEnabled = Optional.empty(); } + /** + * Returns content encoding of the entity, if known. + */ + @Override + public String getContentEncoding() { + return GZIP_CODEC; + } + /** * Creates a {@link ContentCompressingEntity} instance with the provided HTTP entity. * @@ -990,11 +1082,14 @@ public ContentCompressingEntity(HttpEntity entity, boolean chunkedEnabled) { this.chunkedEnabled = Optional.of(chunkedEnabled); } + /** + * Returns a content stream of the entity. + */ @Override public InputStream getContent() throws IOException { ByteArrayInputOutputStream out = new ByteArrayInputOutputStream(1024); try (GZIPOutputStream gzipOut = new GZIPOutputStream(out)) { - wrappedEntity.writeTo(gzipOut); + super.writeTo(gzipOut); } return out.asInput(); } @@ -1030,9 +1125,24 @@ public long getContentLength() { return size; } } else { - return super.getContentLength(); + return -1; } } + + /** + * Writes the entity content out to the output stream. + * @param outStream the output stream to write entity content to + * @throws IOException if an I/O error occurs + */ + @Override + public void writeTo(final OutputStream outStream) throws IOException { + Args.notNull(outStream, "Output stream"); + final GZIPOutputStream gzip = new GZIPOutputStream(outStream); + super.writeTo(gzip); + // Only close output stream if the wrapped entity has been + // successfully written out + gzip.close(); + } } /** diff --git a/client/rest/src/main/java/org/opensearch/client/RestClientBuilder.java b/client/rest/src/main/java/org/opensearch/client/RestClientBuilder.java index 8841d371754c3..679a7ccb17d49 100644 --- a/client/rest/src/main/java/org/opensearch/client/RestClientBuilder.java +++ b/client/rest/src/main/java/org/opensearch/client/RestClientBuilder.java @@ -32,15 +32,23 @@ package org.opensearch.client; -import org.apache.http.Header; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.impl.client.CloseableHttpClient; -import org.apache.http.impl.client.HttpClientBuilder; -import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; -import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; -import org.apache.http.nio.conn.SchemeIOSessionStrategy; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.nio.ssl.TlsStrategy; +import org.apache.hc.core5.util.Timeout; +import org.apache.hc.client5.http.async.HttpAsyncClient; +import org.apache.hc.client5.http.auth.CredentialsProvider; +import org.apache.hc.client5.http.config.RequestConfig; +import org.apache.hc.client5.http.impl.classic.CloseableHttpClient; +import org.apache.hc.client5.http.impl.classic.HttpClientBuilder; +import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManager; +import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManagerBuilder; +import org.apache.hc.client5.http.ssl.ClientTlsStrategyBuilder; +import org.apache.hc.client5.http.impl.DefaultAuthenticationStrategy; +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; +import org.apache.hc.client5.http.impl.async.HttpAsyncClientBuilder; import javax.net.ssl.SSLContext; + import java.security.AccessController; import java.security.NoSuchAlgorithmException; import java.security.PrivilegedAction; @@ -50,19 +58,19 @@ /** * Helps creating a new {@link RestClient}. Allows to set the most common http client configuration options when internally - * creating the underlying {@link org.apache.http.nio.client.HttpAsyncClient}. Also allows to provide an externally created - * {@link org.apache.http.nio.client.HttpAsyncClient} in case additional customization is needed. + * creating the underlying {@link HttpAsyncClient}. Also allows to provide an externally created + * {@link HttpAsyncClient} in case additional customization is needed. */ public final class RestClientBuilder { /** - * The default connection timout in milliseconds. + * The default connection timeout in milliseconds. */ public static final int DEFAULT_CONNECT_TIMEOUT_MILLIS = 1000; /** - * The default socket timeout in milliseconds. + * The default response timeout in milliseconds. */ - public static final int DEFAULT_SOCKET_TIMEOUT_MILLIS = 30000; + public static final int DEFAULT_RESPONSE_TIMEOUT_MILLIS = 30000; /** * The default maximum of connections per route. @@ -296,20 +304,26 @@ public RestClient build() { private CloseableHttpAsyncClient createHttpClient() { // default timeouts are all infinite RequestConfig.Builder requestConfigBuilder = RequestConfig.custom() - .setConnectTimeout(DEFAULT_CONNECT_TIMEOUT_MILLIS) - .setSocketTimeout(DEFAULT_SOCKET_TIMEOUT_MILLIS); + .setConnectTimeout(Timeout.ofMilliseconds(DEFAULT_CONNECT_TIMEOUT_MILLIS)) + .setResponseTimeout(Timeout.ofMilliseconds(DEFAULT_RESPONSE_TIMEOUT_MILLIS)); if (requestConfigCallback != null) { requestConfigBuilder = requestConfigCallback.customizeRequestConfig(requestConfigBuilder); } try { - HttpAsyncClientBuilder httpClientBuilder = HttpAsyncClientBuilder.create() - .setDefaultRequestConfig(requestConfigBuilder.build()) - // default settings for connection pooling may be too constraining + final TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create().setSslContext(SSLContext.getDefault()).build(); + + final PoolingAsyncClientConnectionManager connectionManager = PoolingAsyncClientConnectionManagerBuilder.create() .setMaxConnPerRoute(DEFAULT_MAX_CONN_PER_ROUTE) .setMaxConnTotal(DEFAULT_MAX_CONN_TOTAL) - .setSSLContext(SSLContext.getDefault()) - .setTargetAuthenticationStrategy(new PersistentCredentialsAuthenticationStrategy()); + .setTlsStrategy(tlsStrategy) + .build(); + + HttpAsyncClientBuilder httpClientBuilder = HttpAsyncClientBuilder.create() + .setDefaultRequestConfig(requestConfigBuilder.build()) + .setConnectionManager(connectionManager) + .setTargetAuthenticationStrategy(DefaultAuthenticationStrategy.INSTANCE) + .disableAutomaticRetries(); if (httpClientConfigCallback != null) { httpClientBuilder = httpClientConfigCallback.customizeHttpClient(httpClientBuilder); } @@ -344,9 +358,9 @@ public interface RequestConfigCallback { public interface HttpClientConfigCallback { /** * Allows to customize the {@link CloseableHttpAsyncClient} being created and used by the {@link RestClient}. - * Commonly used to customize the default {@link org.apache.http.client.CredentialsProvider} for authentication - * or the {@link SchemeIOSessionStrategy} for communication through ssl without losing any other useful default - * value that the {@link RestClientBuilder} internally sets, like connection pooling. + * Commonly used to customize the default {@link CredentialsProvider} for authentication for communication + * through TLS/SSL without losing any other useful default value that the {@link RestClientBuilder} internally + * sets, like connection pooling. * * @param httpClientBuilder the {@link HttpClientBuilder} for customizing the client instance. */ diff --git a/client/rest/src/main/java/org/opensearch/client/http/HttpUriRequestProducer.java b/client/rest/src/main/java/org/opensearch/client/http/HttpUriRequestProducer.java new file mode 100644 index 0000000000000..a65427cd0b032 --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/http/HttpUriRequestProducer.java @@ -0,0 +1,63 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.client.http; + +import org.apache.hc.client5.http.classic.methods.HttpUriRequestBase; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.nio.AsyncEntityProducer; +import org.apache.hc.core5.http.nio.support.BasicRequestProducer; +import org.apache.hc.core5.net.URIAuthority; +import org.apache.hc.core5.util.Args; +import org.opensearch.client.nio.HttpEntityAsyncEntityProducer; + +/** + * The producer of the {@link HttpUriRequestBase} instances associated with a particular {@link HttpHost} + */ +public class HttpUriRequestProducer extends BasicRequestProducer { + private final HttpUriRequestBase request; + + HttpUriRequestProducer(final HttpUriRequestBase request, final AsyncEntityProducer entityProducer) { + super(request, entityProducer); + this.request = request; + } + + /** + * Get the produced {@link HttpUriRequestBase} instance + * @return produced {@link HttpUriRequestBase} instance + */ + public HttpUriRequestBase getRequest() { + return request; + } + + /** + * Create new request producer for {@link HttpUriRequestBase} instance and {@link HttpHost} + * @param request {@link HttpUriRequestBase} instance + * @param host {@link HttpHost} instance + * @return new request producer + */ + public static HttpUriRequestProducer create(final HttpUriRequestBase request, final HttpHost host) { + Args.notNull(request, "Request"); + Args.notNull(host, "HttpHost"); + + // TODO: Should we copy request here instead of modifying in place? + request.setAuthority(new URIAuthority(host)); + request.setScheme(host.getSchemeName()); + + final HttpEntity entity = request.getEntity(); + AsyncEntityProducer entityProducer = null; + + if (entity != null) { + entityProducer = new HttpEntityAsyncEntityProducer(entity); + } + + return new HttpUriRequestProducer(request, entityProducer); + } + +} diff --git a/client/rest/src/main/java/org/opensearch/client/http/package-info.java b/client/rest/src/main/java/org/opensearch/client/http/package-info.java new file mode 100644 index 0000000000000..32e0aa2016d53 --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/http/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * HTTP support classes for REST client. + */ +package org.opensearch.client.http; diff --git a/client/rest/src/main/java/org/opensearch/client/nio/HeapBufferedAsyncEntityConsumer.java b/client/rest/src/main/java/org/opensearch/client/nio/HeapBufferedAsyncEntityConsumer.java new file mode 100644 index 0000000000000..9bd17d1c24c7e --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/nio/HeapBufferedAsyncEntityConsumer.java @@ -0,0 +1,139 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.client.nio; + +import org.apache.hc.core5.http.ContentTooLongException; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpException; +import org.apache.hc.core5.http.nio.AsyncEntityConsumer; +import org.apache.hc.core5.http.nio.entity.AbstractBinAsyncEntityConsumer; +import org.apache.hc.core5.util.ByteArrayBuffer; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.concurrent.atomic.AtomicReference; + +/** + * Default implementation of {@link AsyncEntityConsumer}. Buffers the whole + * response content in heap memory, meaning that the size of the buffer is equal to the content-length of the response. + * Limits the size of responses that can be read based on a configurable argument. Throws an exception in case the entity is longer + * than the configured buffer limit. + */ +public class HeapBufferedAsyncEntityConsumer extends AbstractBinAsyncEntityConsumer { + + private final int bufferLimitBytes; + private AtomicReference bufferRef = new AtomicReference<>(); + + /** + * Creates a new instance of this consumer with the provided buffer limit. + * + * @param bufferLimit the buffer limit. Must be greater than 0. + * @throws IllegalArgumentException if {@code bufferLimit} is less than or equal to 0. + */ + public HeapBufferedAsyncEntityConsumer(int bufferLimit) { + if (bufferLimit <= 0) { + throw new IllegalArgumentException("bufferLimit must be greater than 0"); + } + this.bufferLimitBytes = bufferLimit; + } + + /** + * Get the limit of the buffer. + */ + public int getBufferLimit() { + return bufferLimitBytes; + } + + /** + * Triggered to signal beginning of entity content stream. + * + * @param contentType the entity content type + */ + @Override + protected void streamStart(final ContentType contentType) throws HttpException, IOException {} + + /** + * Triggered to obtain the capacity increment. + * + * @return the number of bytes this consumer is prepared to process. + */ + @Override + protected int capacityIncrement() { + return Integer.MAX_VALUE; + } + + /** + * Triggered to pass incoming data packet to the data consumer. + * + * @param src the data packet. + * @param endOfStream flag indicating whether this data packet is the last in the data stream. + * + */ + @Override + protected void data(final ByteBuffer src, final boolean endOfStream) throws IOException { + if (src == null) { + return; + } + + ByteArrayBuffer buffer = bufferRef.get(); + if (buffer == null) { + buffer = new ByteArrayBuffer(bufferLimitBytes); + if (bufferRef.compareAndSet(null, buffer) == false) { + buffer = bufferRef.get(); + } + } + + int len = src.limit(); + if (buffer.length() + len > bufferLimitBytes) { + throw new ContentTooLongException( + "entity content is too long [" + len + "] for the configured buffer limit [" + bufferLimitBytes + "]" + ); + } + + if (len < 0) { + len = 4096; + } + + if (src.hasArray()) { + buffer.append(src.array(), src.arrayOffset() + src.position(), src.remaining()); + } else { + while (src.hasRemaining()) { + buffer.append(src.get()); + } + } + } + + /** + * Triggered to generate entity representation. + * + * @return the entity content + */ + @Override + protected byte[] generateContent() throws IOException { + final ByteArrayBuffer buffer = bufferRef.get(); + return buffer == null ? new byte[0] : buffer.toByteArray(); + } + + /** + * Release resources being held + */ + @Override + public void releaseResources() { + ByteArrayBuffer buffer = bufferRef.getAndSet(null); + if (buffer != null) { + buffer.clear(); + buffer = null; + } + } +} diff --git a/client/rest/src/main/java/org/opensearch/client/nio/HeapBufferedAsyncResponseConsumer.java b/client/rest/src/main/java/org/opensearch/client/nio/HeapBufferedAsyncResponseConsumer.java new file mode 100644 index 0000000000000..3d93478f49f99 --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/nio/HeapBufferedAsyncResponseConsumer.java @@ -0,0 +1,123 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.client.nio; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpException; +import org.apache.hc.core5.http.HttpHeaders; +import org.apache.hc.core5.http.HttpResponse; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.nio.AsyncResponseConsumer; +import org.apache.hc.core5.http.nio.support.AbstractAsyncResponseConsumer; +import org.apache.hc.core5.http.protocol.HttpContext; + +import java.io.IOException; + +/** + * Default implementation of {@link AsyncResponseConsumer}. Buffers the whole + * response content in heap memory, meaning that the size of the buffer is equal to the content-length of the response. + * Limits the size of responses that can be read based on a configurable argument. Throws an exception in case the entity is longer + * than the configured buffer limit. + */ +public class HeapBufferedAsyncResponseConsumer extends AbstractAsyncResponseConsumer { + private static final Log LOGGER = LogFactory.getLog(HeapBufferedAsyncResponseConsumer.class); + private final int bufferLimit; + + /** + * Creates a new instance of this consumer with the provided buffer limit. + * + * @param bufferLimit the buffer limit. Must be greater than 0. + * @throws IllegalArgumentException if {@code bufferLimit} is less than or equal to 0. + */ + public HeapBufferedAsyncResponseConsumer(int bufferLimit) { + super(new HeapBufferedAsyncEntityConsumer(bufferLimit)); + this.bufferLimit = bufferLimit; + } + + /** + * Get the limit of the buffer. + */ + public int getBufferLimit() { + return bufferLimit; + } + + /** + * Triggered to signal receipt of an intermediate (1xx) HTTP response. + * + * @param response the intermediate (1xx) HTTP response. + * @param context the actual execution context. + */ + @Override + public void informationResponse(final HttpResponse response, final HttpContext context) throws HttpException, IOException {} + + /** + * Triggered to generate object that represents a result of response message processing. + * @param response the response message. + * @param entity the response entity. + * @param contentType the response content type. + * @return the result of response processing. + */ + @Override + protected ClassicHttpResponse buildResult(final HttpResponse response, final byte[] entity, final ContentType contentType) { + final ClassicHttpResponse classicResponse = new BasicClassicHttpResponse(response.getCode()); + classicResponse.setVersion(response.getVersion()); + classicResponse.setHeaders(response.getHeaders()); + classicResponse.setReasonPhrase(response.getReasonPhrase()); + if (response.getLocale() != null) { + classicResponse.setLocale(response.getLocale()); + } + + if (entity != null) { + String encoding = null; + + try { + final Header contentEncoding = response.getHeader(HttpHeaders.CONTENT_ENCODING); + if (contentEncoding != null) { + encoding = contentEncoding.getValue(); + } + } catch (final HttpException ex) { + LOGGER.debug("Unable to detect content encoding", ex); + } + + final ByteArrayEntity httpEntity = new ByteArrayEntity(entity, contentType, encoding); + classicResponse.setEntity(httpEntity); + } + + return classicResponse; + } +} diff --git a/client/rest/src/main/java/org/opensearch/client/nio/HttpEntityAsyncEntityProducer.java b/client/rest/src/main/java/org/opensearch/client/nio/HttpEntityAsyncEntityProducer.java new file mode 100644 index 0000000000000..81fe77ddcfbed --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/nio/HttpEntityAsyncEntityProducer.java @@ -0,0 +1,182 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.client.nio; + +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.nio.AsyncEntityProducer; +import org.apache.hc.core5.http.nio.DataStreamChannel; +import org.apache.hc.core5.http.nio.ResourceHolder; +import org.apache.hc.core5.util.Args; +import org.apache.hc.core5.util.Asserts; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.Channels; +import java.nio.channels.ReadableByteChannel; +import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; + +/** + * The {@link AsyncEntityProducer} implementation for {@link HttpEntity} + */ +public class HttpEntityAsyncEntityProducer implements AsyncEntityProducer { + + private final HttpEntity entity; + private final ByteBuffer byteBuffer; + private final boolean chunked; + private final AtomicReference exception; + private final AtomicReference channelRef; + private boolean eof; + + /** + * Create new async HTTP entity producer + * @param entity HTTP entity + * @param bufferSize buffer size + */ + public HttpEntityAsyncEntityProducer(final HttpEntity entity, final int bufferSize) { + this.entity = Args.notNull(entity, "Http Entity"); + this.byteBuffer = ByteBuffer.allocate(bufferSize); + this.chunked = entity.isChunked(); + this.exception = new AtomicReference<>(); + this.channelRef = new AtomicReference<>(); + } + + /** + * Create new async HTTP entity producer with default buffer size (8192 bytes) + * @param entity HTTP entity + */ + public HttpEntityAsyncEntityProducer(final HttpEntity entity) { + this(entity, 8192); + } + + /** + * Determines whether the producer can consistently produce the same content + * after invocation of {@link ResourceHolder#releaseResources()}. + */ + @Override + public boolean isRepeatable() { + return entity.isRepeatable(); + } + + /** + * Returns content type of the entity, if known. + */ + @Override + public String getContentType() { + return entity.getContentType(); + } + + /** + * Returns length of the entity, if known. + */ + @Override + public long getContentLength() { + return entity.getContentLength(); + } + + /** + * Returns the number of bytes immediately available for output. + * This method can be used as a hint to control output events + * of the underlying I/O session. + * + * @return the number of bytes immediately available for output + */ + @Override + public int available() { + return Integer.MAX_VALUE; + } + + /** + * Returns content encoding of the entity, if known. + */ + @Override + public String getContentEncoding() { + return entity.getContentEncoding(); + } + + /** + * Returns chunked transfer hint for this entity. + *

+ * The behavior of wrapping entities is implementation dependent, + * but should respect the primary purpose. + *

+ */ + @Override + public boolean isChunked() { + return chunked; + } + + /** + * Preliminary declaration of trailing headers. + */ + @Override + public Set getTrailerNames() { + return entity.getTrailerNames(); + } + + /** + * Triggered to signal the ability of the underlying data channel + * to accept more data. The data producer can choose to write data + * immediately inside the call or asynchronously at some later point. + * + * @param channel the data channel capable to accepting more data. + */ + @Override + public void produce(final DataStreamChannel channel) throws IOException { + ReadableByteChannel stream = channelRef.get(); + if (stream == null) { + stream = Channels.newChannel(entity.getContent()); + Asserts.check(channelRef.getAndSet(stream) == null, "Illegal producer state"); + } + if (!eof) { + final int bytesRead = stream.read(byteBuffer); + if (bytesRead < 0) { + eof = true; + } + } + if (byteBuffer.position() > 0) { + byteBuffer.flip(); + channel.write(byteBuffer); + byteBuffer.compact(); + } + if (eof && byteBuffer.position() == 0) { + channel.endStream(); + releaseResources(); + } + } + + /** + * Triggered to signal a failure in data generation. + * + * @param cause the cause of the failure. + */ + @Override + public void failed(final Exception cause) { + if (exception.compareAndSet(null, cause)) { + releaseResources(); + } + } + + /** + * Release resources being held + */ + @Override + public void releaseResources() { + eof = false; + final ReadableByteChannel stream = channelRef.getAndSet(null); + if (stream != null) { + try { + stream.close(); + } catch (final IOException ex) { + /* Close quietly */ + } + } + } + +} diff --git a/client/rest/src/main/java/org/opensearch/client/nio/package-info.java b/client/rest/src/main/java/org/opensearch/client/nio/package-info.java new file mode 100644 index 0000000000000..ce4961ed21f7c --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/nio/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * NIO support classes for REST client. + */ +package org.opensearch.client.nio; diff --git a/client/rest/src/test/java/org/opensearch/client/FailureTrackingResponseListenerTests.java b/client/rest/src/test/java/org/opensearch/client/FailureTrackingResponseListenerTests.java index 0a997a586acc9..9722ec867a376 100644 --- a/client/rest/src/test/java/org/opensearch/client/FailureTrackingResponseListenerTests.java +++ b/client/rest/src/test/java/org/opensearch/client/FailureTrackingResponseListenerTests.java @@ -32,14 +32,11 @@ package org.opensearch.client; -import org.apache.http.HttpHost; -import org.apache.http.HttpResponse; -import org.apache.http.ProtocolVersion; -import org.apache.http.RequestLine; -import org.apache.http.StatusLine; -import org.apache.http.message.BasicHttpResponse; -import org.apache.http.message.BasicRequestLine; -import org.apache.http.message.BasicStatusLine; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ProtocolVersion; +import org.apache.hc.core5.http.message.RequestLine; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; import java.util.concurrent.atomic.AtomicReference; @@ -116,9 +113,8 @@ public void onFailure(Exception exception) { private static Response mockResponse() { ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1); - RequestLine requestLine = new BasicRequestLine("GET", "/", protocolVersion); - StatusLine statusLine = new BasicStatusLine(protocolVersion, 200, "OK"); - HttpResponse httpResponse = new BasicHttpResponse(statusLine); + RequestLine requestLine = new RequestLine("GET", "/", protocolVersion); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(200, "OK"); return new Response(requestLine, new HttpHost("localhost", 9200), httpResponse); } } diff --git a/client/rest/src/test/java/org/opensearch/client/HasAttributeNodeSelectorTests.java b/client/rest/src/test/java/org/opensearch/client/HasAttributeNodeSelectorTests.java index fd18bba6ee548..b5aca86e95d6c 100644 --- a/client/rest/src/test/java/org/opensearch/client/HasAttributeNodeSelectorTests.java +++ b/client/rest/src/test/java/org/opensearch/client/HasAttributeNodeSelectorTests.java @@ -32,7 +32,7 @@ package org.opensearch.client; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.Node.Roles; import java.util.ArrayList; diff --git a/client/rest/src/test/java/org/opensearch/client/HeapBufferedAsyncResponseConsumerTests.java b/client/rest/src/test/java/org/opensearch/client/HeapBufferedAsyncResponseConsumerTests.java index 22852fe4cb793..ed329d973eb78 100644 --- a/client/rest/src/test/java/org/opensearch/client/HeapBufferedAsyncResponseConsumerTests.java +++ b/client/rest/src/test/java/org/opensearch/client/HeapBufferedAsyncResponseConsumerTests.java @@ -32,34 +32,31 @@ package org.opensearch.client; -import org.apache.http.ContentTooLongException; -import org.apache.http.HttpEntity; -import org.apache.http.HttpResponse; -import org.apache.http.ProtocolVersion; -import org.apache.http.StatusLine; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.apache.http.message.BasicHttpResponse; -import org.apache.http.message.BasicStatusLine; -import org.apache.http.nio.ContentDecoder; -import org.apache.http.nio.IOControl; -import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; -import org.apache.http.protocol.HttpContext; - +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ContentTooLongException; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.EntityDetails; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.impl.BasicEntityDetails; +import org.apache.hc.core5.http.io.entity.AbstractHttpEntity; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.nio.AsyncResponseConsumer; +import org.apache.hc.core5.http.protocol.HttpContext; +import org.opensearch.client.nio.HeapBufferedAsyncResponseConsumer; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Modifier; +import java.nio.charset.StandardCharsets; import java.util.concurrent.atomic.AtomicReference; import static org.hamcrest.CoreMatchers.instanceOf; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertSame; import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; public class HeapBufferedAsyncResponseConsumerTests extends RestClientTestCase { @@ -67,33 +64,6 @@ public class HeapBufferedAsyncResponseConsumerTests extends RestClientTestCase { private static final int MAX_TEST_BUFFER_SIZE = 50 * 1024 * 1024; private static final int TEST_BUFFER_LIMIT = 10 * 1024 * 1024; - public void testResponseProcessing() throws Exception { - ContentDecoder contentDecoder = mock(ContentDecoder.class); - IOControl ioControl = mock(IOControl.class); - HttpContext httpContext = mock(HttpContext.class); - - HeapBufferedAsyncResponseConsumer consumer = spy(new HeapBufferedAsyncResponseConsumer(TEST_BUFFER_LIMIT)); - - ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1); - StatusLine statusLine = new BasicStatusLine(protocolVersion, 200, "OK"); - HttpResponse httpResponse = new BasicHttpResponse(statusLine); - httpResponse.setEntity(new StringEntity("test", ContentType.TEXT_PLAIN)); - - // everything goes well - consumer.responseReceived(httpResponse); - consumer.consumeContent(contentDecoder, ioControl); - consumer.responseCompleted(httpContext); - - verify(consumer).releaseResources(); - verify(consumer).buildResult(httpContext); - assertTrue(consumer.isDone()); - assertSame(httpResponse, consumer.getResult()); - - consumer.responseCompleted(httpContext); - verify(consumer, times(1)).releaseResources(); - verify(consumer, times(1)).buildResult(httpContext); - } - public void testDefaultBufferLimit() throws Exception { HeapBufferedAsyncResponseConsumer consumer = new HeapBufferedAsyncResponseConsumer(TEST_BUFFER_LIMIT); bufferLimitTest(consumer, TEST_BUFFER_LIMIT); @@ -127,7 +97,7 @@ public void testCanConfigureHeapBufferLimitFromOutsidePackage() throws ClassNotF assertThat(object, instanceOf(HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory.class)); HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory consumerFactory = (HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory) object; - HttpAsyncResponseConsumer consumer = consumerFactory.createHttpAsyncResponseConsumer(); + AsyncResponseConsumer consumer = consumerFactory.createHttpAsyncResponseConsumer(); assertThat(consumer, instanceOf(HeapBufferedAsyncResponseConsumer.class)); HeapBufferedAsyncResponseConsumer bufferedAsyncResponseConsumer = (HeapBufferedAsyncResponseConsumer) consumer; assertEquals(bufferLimit, bufferedAsyncResponseConsumer.getBufferLimit()); @@ -138,23 +108,40 @@ public void testHttpAsyncResponseConsumerFactoryVisibility() throws ClassNotFoun } private static void bufferLimitTest(HeapBufferedAsyncResponseConsumer consumer, int bufferLimit) throws Exception { - ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1); - StatusLine statusLine = new BasicStatusLine(protocolVersion, 200, "OK"); - consumer.onResponseReceived(new BasicHttpResponse(statusLine)); + HttpContext httpContext = mock(HttpContext.class); + + BasicClassicHttpResponse response = new BasicClassicHttpResponse(200, "OK"); + consumer.consumeResponse(response, null, httpContext, null); final AtomicReference contentLength = new AtomicReference<>(); - HttpEntity entity = new StringEntity("", ContentType.APPLICATION_JSON) { + HttpEntity entity = new AbstractHttpEntity(ContentType.APPLICATION_JSON, null, false) { @Override public long getContentLength() { return contentLength.get(); } + + @Override + public InputStream getContent() throws IOException, UnsupportedOperationException { + return new ByteArrayInputStream("".getBytes(StandardCharsets.UTF_8)); + } + + @Override + public boolean isStreaming() { + return false; + } + + @Override + public void close() throws IOException {} }; contentLength.set(randomLongBetween(0L, bufferLimit)); - consumer.onEntityEnclosed(entity, ContentType.APPLICATION_JSON); + response.setEntity(entity); + + final EntityDetails details = new BasicEntityDetails(4096, ContentType.APPLICATION_JSON); + consumer.consumeResponse(response, details, httpContext, null); contentLength.set(randomLongBetween(bufferLimit + 1, MAX_TEST_BUFFER_SIZE)); try { - consumer.onEntityEnclosed(entity, ContentType.APPLICATION_JSON); + consumer.consumeResponse(response, details, httpContext, null); } catch (ContentTooLongException e) { assertEquals( "entity content is too long [" + entity.getContentLength() + "] for the configured buffer limit [" + bufferLimit + "]", diff --git a/client/rest/src/test/java/org/opensearch/client/HostsTrackingFailureListener.java b/client/rest/src/test/java/org/opensearch/client/HostsTrackingFailureListener.java index 2b256e7205397..0e454c6f919f5 100644 --- a/client/rest/src/test/java/org/opensearch/client/HostsTrackingFailureListener.java +++ b/client/rest/src/test/java/org/opensearch/client/HostsTrackingFailureListener.java @@ -32,7 +32,7 @@ package org.opensearch.client; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import java.util.HashSet; import java.util.List; diff --git a/client/rest/src/test/java/org/opensearch/client/NodeSelectorTests.java b/client/rest/src/test/java/org/opensearch/client/NodeSelectorTests.java index 65a831e59bfb0..cfc95f0281bcc 100644 --- a/client/rest/src/test/java/org/opensearch/client/NodeSelectorTests.java +++ b/client/rest/src/test/java/org/opensearch/client/NodeSelectorTests.java @@ -32,7 +32,7 @@ package org.opensearch.client; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.Node.Roles; import java.util.ArrayList; diff --git a/client/rest/src/test/java/org/opensearch/client/NodeTests.java b/client/rest/src/test/java/org/opensearch/client/NodeTests.java index 296c4a1f09122..748bec5fb7de5 100644 --- a/client/rest/src/test/java/org/opensearch/client/NodeTests.java +++ b/client/rest/src/test/java/org/opensearch/client/NodeTests.java @@ -32,7 +32,7 @@ package org.opensearch.client; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.Node.Roles; import java.util.Arrays; diff --git a/client/rest/src/test/java/org/opensearch/client/PreferHasAttributeNodeSelectorTests.java b/client/rest/src/test/java/org/opensearch/client/PreferHasAttributeNodeSelectorTests.java index 0135cde573743..7dde1b96b3b45 100644 --- a/client/rest/src/test/java/org/opensearch/client/PreferHasAttributeNodeSelectorTests.java +++ b/client/rest/src/test/java/org/opensearch/client/PreferHasAttributeNodeSelectorTests.java @@ -32,7 +32,7 @@ package org.opensearch.client; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.Node.Roles; import java.util.ArrayList; diff --git a/client/rest/src/test/java/org/opensearch/client/RequestLoggerTests.java b/client/rest/src/test/java/org/opensearch/client/RequestLoggerTests.java index 3c317db1b72d9..8dea2ad922bd6 100644 --- a/client/rest/src/test/java/org/opensearch/client/RequestLoggerTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RequestLoggerTests.java @@ -32,27 +32,29 @@ package org.opensearch.client; -import org.apache.http.Header; -import org.apache.http.HttpEntity; -import org.apache.http.HttpEntityEnclosingRequest; -import org.apache.http.HttpHost; -import org.apache.http.ProtocolVersion; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpOptions; -import org.apache.http.client.methods.HttpPatch; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.client.methods.HttpTrace; -import org.apache.http.client.methods.HttpUriRequest; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.InputStreamEntity; -import org.apache.http.entity.StringEntity; -import org.apache.http.message.BasicHeader; -import org.apache.http.message.BasicHttpResponse; -import org.apache.http.message.BasicStatusLine; -import org.apache.http.nio.entity.NByteArrayEntity; -import org.apache.http.nio.entity.NStringEntity; -import org.apache.http.util.EntityUtils; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.client5.http.classic.methods.HttpOptions; +import org.apache.hc.client5.http.classic.methods.HttpPatch; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpTrace; +import org.apache.hc.client5.http.classic.methods.HttpUriRequest; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.ProtocolVersion; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.apache.hc.core5.http.io.entity.InputStreamEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.message.BasicHeader; +import org.apache.hc.core5.http.message.StatusLine; import java.io.ByteArrayInputStream; import java.io.IOException; @@ -66,8 +68,8 @@ import static org.junit.Assert.assertThat; public class RequestLoggerTests extends RestClientTestCase { - public void testTraceRequest() throws IOException, URISyntaxException { - HttpHost host = new HttpHost("localhost", 9200, randomBoolean() ? "http" : "https"); + public void testTraceRequest() throws IOException, URISyntaxException, ParseException { + HttpHost host = new HttpHost(randomBoolean() ? "http" : "https", "localhost", 9200); String expectedEndpoint = "/index/type/_api"; URI uri; if (randomBoolean()) { @@ -77,11 +79,10 @@ public void testTraceRequest() throws IOException, URISyntaxException { } HttpUriRequest request = randomHttpRequest(uri); String expected = "curl -iX " + request.getMethod() + " '" + host + expectedEndpoint + "'"; - boolean hasBody = request instanceof HttpEntityEnclosingRequest && randomBoolean(); + boolean hasBody = !(request instanceof HttpTrace) && randomBoolean(); String requestBody = "{ \"field\": \"value\" }"; if (hasBody) { expected += " -d '" + requestBody + "'"; - HttpEntityEnclosingRequest enclosingRequest = (HttpEntityEnclosingRequest) request; HttpEntity entity; switch (randomIntBetween(0, 4)) { case 0: @@ -94,10 +95,10 @@ public void testTraceRequest() throws IOException, URISyntaxException { ); break; case 2: - entity = new NStringEntity(requestBody, ContentType.APPLICATION_JSON); + entity = new StringEntity(requestBody, ContentType.APPLICATION_JSON); break; case 3: - entity = new NByteArrayEntity(requestBody.getBytes(StandardCharsets.UTF_8), ContentType.APPLICATION_JSON); + entity = new ByteArrayEntity(requestBody.getBytes(StandardCharsets.UTF_8), ContentType.APPLICATION_JSON); break; case 4: // Evil entity without a charset @@ -106,24 +107,24 @@ public void testTraceRequest() throws IOException, URISyntaxException { default: throw new UnsupportedOperationException(); } - enclosingRequest.setEntity(entity); + request.setEntity(entity); } String traceRequest = RequestLogger.buildTraceRequest(request, host); assertThat(traceRequest, equalTo(expected)); if (hasBody) { // check that the body is still readable as most entities are not repeatable - String body = EntityUtils.toString(((HttpEntityEnclosingRequest) request).getEntity(), StandardCharsets.UTF_8); + String body = EntityUtils.toString(request.getEntity(), StandardCharsets.UTF_8); assertThat(body, equalTo(requestBody)); } } - public void testTraceResponse() throws IOException { + public void testTraceResponse() throws IOException, ParseException { ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1); int statusCode = randomIntBetween(200, 599); String reasonPhrase = "REASON"; - BasicStatusLine statusLine = new BasicStatusLine(protocolVersion, statusCode, reasonPhrase); + StatusLine statusLine = new StatusLine(protocolVersion, statusCode, reasonPhrase); String expected = "# " + statusLine.toString(); - BasicHttpResponse httpResponse = new BasicHttpResponse(statusLine); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(statusCode, reasonPhrase); int numHeaders = randomIntBetween(0, 3); for (int i = 0; i < numHeaders; i++) { httpResponse.setHeader("header" + i, "value"); @@ -192,13 +193,13 @@ private static HttpUriRequest randomHttpRequest(URI uri) { int requestType = randomIntBetween(0, 7); switch (requestType) { case 0: - return new HttpGetWithEntity(uri); + return new HttpGet(uri); case 1: return new HttpPost(uri); case 2: return new HttpPut(uri); case 3: - return new HttpDeleteWithEntity(uri); + return new HttpDelete(uri); case 4: return new HttpHead(uri); case 5: diff --git a/client/rest/src/test/java/org/opensearch/client/RequestOptionsTests.java b/client/rest/src/test/java/org/opensearch/client/RequestOptionsTests.java index aaa40db1442ee..a7f9a48c73393 100644 --- a/client/rest/src/test/java/org/opensearch/client/RequestOptionsTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RequestOptionsTests.java @@ -32,8 +32,9 @@ package org.opensearch.client; -import org.apache.http.Header; -import org.apache.http.client.config.RequestConfig; +import org.apache.hc.client5.http.config.RequestConfig; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.util.Timeout; import org.opensearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory; import java.util.ArrayList; @@ -108,15 +109,15 @@ public void testSetRequestBuilder() { RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder(); RequestConfig.Builder requestConfigBuilder = RequestConfig.custom(); - int socketTimeout = 10000; - int connectTimeout = 100; - requestConfigBuilder.setSocketTimeout(socketTimeout).setConnectTimeout(connectTimeout); + Timeout responseTimeout = Timeout.ofMilliseconds(10000); + Timeout connectTimeout = Timeout.ofMilliseconds(100); + requestConfigBuilder.setResponseTimeout(responseTimeout).setConnectTimeout(connectTimeout); RequestConfig requestConfig = requestConfigBuilder.build(); builder.setRequestConfig(requestConfig); RequestOptions options = builder.build(); assertSame(options.getRequestConfig(), requestConfig); - assertEquals(options.getRequestConfig().getSocketTimeout(), socketTimeout); + assertEquals(options.getRequestConfig().getResponseTimeout(), responseTimeout); assertEquals(options.getRequestConfig().getConnectTimeout(), connectTimeout); } diff --git a/client/rest/src/test/java/org/opensearch/client/RequestTests.java b/client/rest/src/test/java/org/opensearch/client/RequestTests.java index ba15c0d0b733c..d11982e9f9642 100644 --- a/client/rest/src/test/java/org/opensearch/client/RequestTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RequestTests.java @@ -32,15 +32,17 @@ package org.opensearch.client; -import org.apache.http.HttpEntity; -import org.apache.http.entity.ByteArrayEntity; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.apache.http.nio.entity.NStringEntity; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; +import org.apache.hc.core5.http.io.entity.InputStreamEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; import org.opensearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory; +import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.HashMap; import java.util.Map; @@ -133,7 +135,7 @@ public void testSetJsonEntity() throws IOException { final String json = randomAsciiLettersOfLengthBetween(1, 100); request.setJsonEntity(json); - assertEquals(ContentType.APPLICATION_JSON.toString(), request.getEntity().getContentType().getValue()); + assertEquals(ContentType.APPLICATION_JSON.toString(), request.getEntity().getContentType()); ByteArrayOutputStream os = new ByteArrayOutputStream(); request.getEntity().writeTo(os); assertEquals(json, new String(os.toByteArray(), ContentType.APPLICATION_JSON.getCharset())); @@ -201,7 +203,10 @@ private static Request randomRequest() { randomFrom( new HttpEntity[] { new StringEntity(randomAsciiAlphanumOfLength(10), ContentType.APPLICATION_JSON), - new NStringEntity(randomAsciiAlphanumOfLength(10), ContentType.APPLICATION_JSON), + new InputStreamEntity( + new ByteArrayInputStream(randomAsciiAlphanumOfLength(10).getBytes(StandardCharsets.UTF_8)), + ContentType.APPLICATION_JSON + ), new ByteArrayEntity(randomBytesOfLength(40), ContentType.APPLICATION_JSON) } ) ); diff --git a/client/rest/src/test/java/org/opensearch/client/ResponseExceptionTests.java b/client/rest/src/test/java/org/opensearch/client/ResponseExceptionTests.java index 8ecd3e1a29c99..dfbf105637962 100644 --- a/client/rest/src/test/java/org/opensearch/client/ResponseExceptionTests.java +++ b/client/rest/src/test/java/org/opensearch/client/ResponseExceptionTests.java @@ -32,19 +32,17 @@ package org.opensearch.client; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; -import org.apache.http.HttpResponse; -import org.apache.http.ProtocolVersion; -import org.apache.http.RequestLine; -import org.apache.http.StatusLine; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.InputStreamEntity; -import org.apache.http.entity.StringEntity; -import org.apache.http.message.BasicHttpResponse; -import org.apache.http.message.BasicRequestLine; -import org.apache.http.message.BasicStatusLine; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.ProtocolVersion; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.apache.hc.core5.http.io.entity.InputStreamEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.message.RequestLine; import java.io.ByteArrayInputStream; import java.io.IOException; @@ -57,10 +55,9 @@ public class ResponseExceptionTests extends RestClientTestCase { - public void testResponseException() throws IOException { + public void testResponseException() throws IOException, ParseException { ProtocolVersion protocolVersion = new ProtocolVersion("http", 1, 1); - StatusLine statusLine = new BasicStatusLine(protocolVersion, 500, "Internal Server Error"); - HttpResponse httpResponse = new BasicHttpResponse(statusLine); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(500, "Internal Server Error"); String responseBody = "{\"error\":{\"root_cause\": {}}}"; boolean hasBody = getRandom().nextBoolean(); @@ -78,7 +75,7 @@ public void testResponseException() throws IOException { httpResponse.setEntity(entity); } - RequestLine requestLine = new BasicRequestLine("GET", "/", protocolVersion); + RequestLine requestLine = new RequestLine("GET", "/", protocolVersion); HttpHost httpHost = new HttpHost("localhost", 9200); Response response = new Response(requestLine, httpHost, httpResponse); ResponseException responseException = new ResponseException(response); diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientBuilderIntegTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientBuilderIntegTests.java index 10bf9568c8798..f5e1735042e66 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientBuilderIntegTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientBuilderIntegTests.java @@ -36,7 +36,8 @@ import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpsConfigurator; import com.sun.net.httpserver.HttpsServer; -import org.apache.http.HttpHost; + +import org.apache.hc.core5.http.HttpHost; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -117,7 +118,7 @@ public void testBuilderUsesDefaultSSLContext() throws Exception { private RestClient buildRestClient() { InetSocketAddress address = httpsServer.getAddress(); - return RestClient.builder(new HttpHost(address.getHostString(), address.getPort(), "https")).build(); + return RestClient.builder(new HttpHost("https", address.getHostString(), address.getPort())).build(); } private static SSLContext getSslContext() throws Exception { diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientBuilderTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientBuilderTests.java index ac81cd1132a2f..7165174e688e1 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientBuilderTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientBuilderTests.java @@ -32,11 +32,12 @@ package org.opensearch.client; -import org.apache.http.Header; -import org.apache.http.HttpHost; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; -import org.apache.http.message.BasicHeader; +import org.apache.hc.client5.http.config.RequestConfig; +import org.apache.hc.client5.http.impl.async.HttpAsyncClientBuilder; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.message.BasicHeader; +import org.apache.hc.core5.util.Timeout; import java.io.IOException; import java.util.Base64; @@ -271,7 +272,7 @@ public RequestConfig.Builder customizeRequestConfig(RequestConfig.Builder reques RequestConfig requestConfig = requestConfigBuilder.build(); assertEquals(RequestConfig.DEFAULT.getConnectionRequestTimeout(), requestConfig.getConnectionRequestTimeout()); // this way we get notified if the default ever changes - assertEquals(-1, requestConfig.getConnectionRequestTimeout()); + assertEquals(Timeout.ofMinutes(3), requestConfig.getConnectionRequestTimeout()); return requestConfigBuilder; } }); diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientCompressionTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientCompressionTests.java index e8b7742044f67..bf2c19b8127a1 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientCompressionTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientCompressionTests.java @@ -11,10 +11,11 @@ import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpServer; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; + +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.io.entity.StringEntity; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -108,7 +109,7 @@ private static byte[] readAll(InputStream in) throws IOException { private RestClient createClient(boolean enableCompression, boolean chunkedEnabled) { InetSocketAddress address = httpServer.getAddress(); - return RestClient.builder(new HttpHost(address.getHostString(), address.getPort(), "http")) + return RestClient.builder(new HttpHost("http", address.getHostString(), address.getPort())) .setCompressionEnabled(enableCompression) .setChunkedEnabled(chunkedEnabled) .build(); diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientGzipCompressionTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientGzipCompressionTests.java index 8c4d993517fee..fdcb65ff101c9 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientGzipCompressionTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientGzipCompressionTests.java @@ -35,10 +35,11 @@ import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpServer; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; + +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.io.entity.StringEntity; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -126,7 +127,7 @@ private static byte[] readAll(InputStream in) throws IOException { private RestClient createClient(boolean enableCompression) { InetSocketAddress address = httpServer.getAddress(); - return RestClient.builder(new HttpHost(address.getHostString(), address.getPort(), "http")) + return RestClient.builder(new HttpHost("http", address.getHostString(), address.getPort())) .setCompressionEnabled(enableCompression) .build(); } @@ -184,7 +185,7 @@ public void testCompressingClientSync() throws Exception { public void testCompressingClientAsync() throws Exception { InetSocketAddress address = httpServer.getAddress(); - RestClient restClient = RestClient.builder(new HttpHost(address.getHostString(), address.getPort(), "http")) + RestClient restClient = RestClient.builder(new HttpHost("http", address.getHostString(), address.getPort())) .setCompressionEnabled(true) .build(); diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsIntegTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsIntegTests.java index 277446191a36e..8c62533072c70 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsIntegTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsIntegTests.java @@ -35,7 +35,8 @@ import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpServer; -import org.apache.http.HttpHost; + +import org.apache.hc.core5.http.HttpHost; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; @@ -56,6 +57,7 @@ import static org.opensearch.client.RestClientTestUtil.getAllStatusCodes; import static org.opensearch.client.RestClientTestUtil.randomErrorNoRetryStatusCode; import static org.opensearch.client.RestClientTestUtil.randomOkStatusCode; +import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.instanceOf; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThat; @@ -63,7 +65,7 @@ import static org.junit.Assert.fail; /** - * Integration test to check interaction between {@link RestClient} and {@link org.apache.http.client.HttpClient}. + * Integration test to check interaction between {@link RestClient} and {@link org.apache.hc.client5.http.classic.HttpClient}. * Works against real http servers, multiple hosts. Also tests failover by randomly shutting down hosts. */ public class RestClientMultipleHostsIntegTests extends RestClientTestCase { @@ -299,7 +301,7 @@ public void testNodeSelector() throws Exception { } catch (ConnectException e) { // Windows isn't consistent here. Sometimes the message is even null! if (false == System.getProperty("os.name").startsWith("Windows")) { - assertEquals("Connection refused", e.getMessage()); + assertThat(e.getMessage(), containsString("Connection refused")); } } } else { diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsTests.java index d88d4f4afd9b1..62574e5ed6d5a 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsTests.java @@ -33,9 +33,10 @@ package org.opensearch.client; import com.carrotsearch.randomizedtesting.generators.RandomNumbers; -import org.apache.http.Header; -import org.apache.http.HttpHost; -import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; + +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpHost; import org.junit.After; import java.io.IOException; diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostIntegTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostIntegTests.java index 0500d282a506d..beee1c5ca21a0 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostIntegTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostIntegTests.java @@ -36,30 +36,34 @@ import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpServer; -import org.apache.http.Consts; -import org.apache.http.Header; -import org.apache.http.HttpHost; -import org.apache.http.HttpResponse; -import org.apache.http.auth.AuthScope; -import org.apache.http.auth.UsernamePasswordCredentials; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpRequestBase; -import org.apache.http.entity.ContentType; -import org.apache.http.impl.client.BasicCredentialsProvider; -import org.apache.http.impl.client.TargetAuthenticationStrategy; -import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; -import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; -import org.apache.http.message.BasicHeader; -import org.apache.http.nio.entity.NStringEntity; -import org.apache.http.util.EntityUtils; + +import org.apache.hc.client5.http.auth.AuthScope; +import org.apache.hc.client5.http.auth.UsernamePasswordCredentials; +import org.apache.hc.client5.http.classic.methods.HttpUriRequestBase; +import org.apache.hc.client5.http.impl.DefaultAuthenticationStrategy; +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; +import org.apache.hc.client5.http.impl.async.HttpAsyncClientBuilder; +import org.apache.hc.client5.http.impl.auth.BasicCredentialsProvider; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.apache.hc.core5.http.io.entity.StringEntity; +import org.apache.hc.core5.http.message.BasicHeader; +import org.apache.hc.core5.http.nio.AsyncResponseConsumer; +import org.apache.hc.core5.net.URIBuilder; import org.junit.After; import org.junit.Before; +import org.opensearch.client.http.HttpUriRequestProducer; +import org.opensearch.client.nio.HeapBufferedAsyncResponseConsumer; import java.io.IOException; import java.io.InputStreamReader; import java.io.OutputStream; import java.net.InetAddress; import java.net.InetSocketAddress; +import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.HashSet; import java.util.List; @@ -86,7 +90,7 @@ import static org.junit.Assert.fail; /** - * Integration test to check interaction between {@link RestClient} and {@link org.apache.http.client.HttpClient}. + * Integration test to check interaction between {@link RestClient} and {@link org.apache.hc.client5.http.classic.HttpClient}. * Works against a real http server, one single host. */ public class RestClientSingleHostIntegTests extends RestClientTestCase { @@ -147,7 +151,7 @@ private static class ResponseHandler implements HttpHandler { public void handle(HttpExchange httpExchange) throws IOException { // copy request body to response body so we can verify it was sent StringBuilder body = new StringBuilder(); - try (InputStreamReader reader = new InputStreamReader(httpExchange.getRequestBody(), Consts.UTF_8)) { + try (InputStreamReader reader = new InputStreamReader(httpExchange.getRequestBody(), StandardCharsets.UTF_8)) { char[] buffer = new char[256]; int read; while ((read = reader.read(buffer)) != -1) { @@ -164,7 +168,7 @@ public void handle(HttpExchange httpExchange) throws IOException { httpExchange.sendResponseHeaders(statusCode, body.length() == 0 ? -1 : body.length()); if (body.length() > 0) { try (OutputStream out = httpExchange.getResponseBody()) { - out.write(body.toString().getBytes(Consts.UTF_8)); + out.write(body.toString().getBytes(StandardCharsets.UTF_8)); } } httpExchange.close(); @@ -172,18 +176,20 @@ public void handle(HttpExchange httpExchange) throws IOException { } private RestClient createRestClient(final boolean useAuth, final boolean usePreemptiveAuth) { - // provide the username/password for every request - final BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); - credentialsProvider.setCredentials(AuthScope.ANY, new UsernamePasswordCredentials("user", "pass")); - - final RestClientBuilder restClientBuilder = RestClient.builder( - new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()) - ).setDefaultHeaders(defaultHeaders); + final HttpHost httpHost = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()); + final RestClientBuilder restClientBuilder = RestClient.builder(httpHost).setDefaultHeaders(defaultHeaders); if (pathPrefix.length() > 0) { restClientBuilder.setPathPrefix(pathPrefix); } if (useAuth) { + // provide the username/password for every request + final BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); + credentialsProvider.setCredentials( + new AuthScope(httpHost, null, "Basic"), + new UsernamePasswordCredentials("user", "pass".toCharArray()) + ); + restClientBuilder.setHttpClientConfigCallback(new RestClientBuilder.HttpClientConfigCallback() { @Override public HttpAsyncClientBuilder customizeHttpClient(final HttpAsyncClientBuilder httpClientBuilder) { @@ -191,7 +197,7 @@ public HttpAsyncClientBuilder customizeHttpClient(final HttpAsyncClientBuilder h // disable preemptive auth by ignoring any authcache httpClientBuilder.disableAuthCaching(); // don't use the "persistent credentials strategy" - httpClientBuilder.setTargetAuthenticationStrategy(new TargetAuthenticationStrategy()); + httpClientBuilder.setTargetAuthenticationStrategy(DefaultAuthenticationStrategy.INSTANCE); } return httpClientBuilder.setDefaultCredentialsProvider(credentialsProvider); @@ -220,7 +226,7 @@ public void testManyAsyncRequests() throws Exception { final List exceptions = new CopyOnWriteArrayList<>(); for (int i = 0; i < iters; i++) { Request request = new Request("PUT", "/200"); - request.setEntity(new NStringEntity("{}", ContentType.APPLICATION_JSON)); + request.setEntity(new StringEntity("{}", ContentType.APPLICATION_JSON)); restClient.performRequestAsync(request, new ResponseListener() { @Override public void onSuccess(Response response) { @@ -271,7 +277,7 @@ public void onFailure(Exception exception) { /** * This test verifies some assumptions that we rely upon around the way the async http client works when reusing the same request - * throughout multiple retries, and the use of the {@link HttpRequestBase#abort()} method. + * throughout multiple retries, and the use of the {@link HttpUriRequestBase#abort()} method. * In fact the low-level REST client reuses the same request instance throughout multiple retries, and relies on the http client * to set the future ref to the request properly so that when abort is called, the proper future gets cancelled. */ @@ -279,7 +285,10 @@ public void testRequestResetAndAbort() throws Exception { try (CloseableHttpAsyncClient client = HttpAsyncClientBuilder.create().build()) { client.start(); HttpHost httpHost = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()); - HttpGet httpGet = new HttpGet(pathPrefix + "/200"); + HttpUriRequestBase httpGet = new HttpUriRequestBase( + "GET", + new URIBuilder().setHttpHost(httpHost).setPath(pathPrefix + "/200").build() + ); // calling abort before the request is sent is a no-op httpGet.abort(); @@ -288,8 +297,11 @@ public void testRequestResetAndAbort() throws Exception { { httpGet.reset(); assertFalse(httpGet.isAborted()); + + Future future = client.execute(getRequestProducer(httpGet, httpHost), getResponseConsumer(), null); + httpGet.setDependency((org.apache.hc.core5.concurrent.Cancellable) future); httpGet.abort(); - Future future = client.execute(httpHost, httpGet, null); + try { future.get(); fail("expected cancellation exception"); @@ -300,8 +312,9 @@ public void testRequestResetAndAbort() throws Exception { } { httpGet.reset(); - Future future = client.execute(httpHost, httpGet, null); + Future future = client.execute(getRequestProducer(httpGet, httpHost), getResponseConsumer(), null); assertFalse(httpGet.isAborted()); + httpGet.setDependency((org.apache.hc.core5.concurrent.Cancellable) future); httpGet.abort(); assertTrue(httpGet.isAborted()); try { @@ -315,9 +328,9 @@ public void testRequestResetAndAbort() throws Exception { { httpGet.reset(); assertFalse(httpGet.isAborted()); - Future future = client.execute(httpHost, httpGet, null); + Future future = client.execute(getRequestProducer(httpGet, httpHost), getResponseConsumer(), null); assertFalse(httpGet.isAborted()); - assertEquals(200, future.get().getStatusLine().getStatusCode()); + assertEquals(200, future.get().getCode()); assertFalse(future.isCancelled()); } } @@ -325,7 +338,7 @@ public void testRequestResetAndAbort() throws Exception { /** * End to end test for headers. We test it explicitly against a real http client as there are different ways - * to set/add headers to the {@link org.apache.http.client.HttpClient}. + * to set/add headers to the {@link org.apache.hc.client5.http.classic.HttpClient}. * Exercises the test http server ability to send back whatever headers it received. */ public void testHeaders() throws Exception { @@ -365,7 +378,7 @@ public void testHeaders() throws Exception { /** * End to end test for delete with body. We test it explicitly as it is not supported - * out of the box by {@link org.apache.http.client.HttpClient}. + * out of the box by {@link org.apache.hc.client5.http.classic.HttpClient}. * Exercises the test http server ability to send back whatever body it received. */ public void testDeleteWithBody() throws Exception { @@ -374,7 +387,7 @@ public void testDeleteWithBody() throws Exception { /** * End to end test for get with body. We test it explicitly as it is not supported - * out of the box by {@link org.apache.http.client.HttpClient}. + * out of the box by {@link org.apache.hc.client5.http.classic.HttpClient}. * Exercises the test http server ability to send back whatever body it received. */ public void testGetWithBody() throws Exception { @@ -410,7 +423,7 @@ public void testEncodeParams() throws Exception { Request request = new Request("PUT", "/200"); request.addParameter("routing", "foo bar"); Response response = RestClientSingleHostTests.performRequestSyncOrAsync(restClient, request); - assertEquals(pathPrefix + "/200?routing=foo+bar", response.getRequestLine().getUri()); + assertEquals(pathPrefix + "/200?routing=foo%20bar", response.getRequestLine().getUri()); } { Request request = new Request("PUT", "/200"); @@ -540,4 +553,13 @@ private Response bodyTest(RestClient restClient, String method, int statusCode, return esResponse; } + + private AsyncResponseConsumer getResponseConsumer() { + return new HeapBufferedAsyncResponseConsumer(1024); + } + + private HttpUriRequestProducer getRequestProducer(HttpUriRequestBase request, HttpHost host) { + return HttpUriRequestProducer.create(request, host); + + } } diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostTests.java index e5ce5eb91ad5a..f46a91aa910f8 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostTests.java @@ -34,38 +34,42 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.http.ConnectionClosedException; -import org.apache.http.Header; -import org.apache.http.HttpEntity; -import org.apache.http.HttpEntityEnclosingRequest; -import org.apache.http.HttpHost; -import org.apache.http.HttpRequest; -import org.apache.http.HttpResponse; -import org.apache.http.ProtocolVersion; -import org.apache.http.StatusLine; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpOptions; -import org.apache.http.client.methods.HttpPatch; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.client.methods.HttpTrace; -import org.apache.http.client.methods.HttpUriRequest; -import org.apache.http.client.protocol.HttpClientContext; -import org.apache.http.client.utils.URIBuilder; -import org.apache.http.concurrent.FutureCallback; -import org.apache.http.conn.ConnectTimeoutException; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; -import org.apache.http.message.BasicHttpResponse; -import org.apache.http.message.BasicStatusLine; -import org.apache.http.nio.protocol.HttpAsyncRequestProducer; -import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; -import org.apache.http.util.EntityUtils; +import org.apache.hc.client5.http.ConnectTimeoutException; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.client5.http.classic.methods.HttpOptions; +import org.apache.hc.client5.http.classic.methods.HttpPatch; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpTrace; +import org.apache.hc.client5.http.classic.methods.HttpUriRequest; +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; +import org.apache.hc.core5.concurrent.FutureCallback; +import org.apache.hc.core5.function.Supplier; +import org.apache.hc.core5.http.ClassicHttpRequest; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ConnectionClosedException; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.HttpRequest; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.apache.hc.core5.http.io.entity.StringEntity; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.nio.AsyncPushConsumer; +import org.apache.hc.core5.http.nio.AsyncRequestProducer; +import org.apache.hc.core5.http.nio.AsyncResponseConsumer; +import org.apache.hc.core5.http.nio.HandlerFactory; +import org.apache.hc.core5.http.protocol.HttpContext; +import org.apache.hc.core5.io.CloseMode; +import org.apache.hc.core5.net.URIBuilder; +import org.apache.hc.core5.reactor.IOReactorStatus; +import org.apache.hc.core5.util.TimeValue; import org.junit.After; import org.junit.Before; -import org.mockito.ArgumentCaptor; -import org.mockito.stubbing.Answer; +import org.opensearch.client.http.HttpUriRequestProducer; import javax.net.ssl.SSLHandshakeException; import java.io.IOException; @@ -85,6 +89,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.atomic.LongAdder; import static java.util.Collections.singletonList; import static org.opensearch.client.RestClientTestUtil.getAllErrorStatusCodes; @@ -100,12 +105,6 @@ import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; -import static org.mockito.Mockito.any; -import static org.mockito.Mockito.nullable; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; /** * Tests for basic functionality of {@link RestClient} against one single host: tests http requests being sent, headers, @@ -122,10 +121,17 @@ public class RestClientSingleHostTests extends RestClientTestCase { private CloseableHttpAsyncClient httpClient; private HostsTrackingFailureListener failureListener; private boolean strictDeprecationMode; + private LongAdder requests; + private AtomicReference requestProducerCapture; @Before public void createRestClient() { - httpClient = mockHttpClient(exec); + requests = new LongAdder(); + requestProducerCapture = new AtomicReference<>(); + httpClient = mockHttpClient(exec, (target, requestProducer, responseConsumer, pushHandlerFactory, context, callback) -> { + requests.increment(); + requestProducerCapture.set(requestProducer); + }); defaultHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header-default"); node = new Node(new HttpHost("localhost", 9200)); failureListener = new HostsTrackingFailureListener(); @@ -143,41 +149,78 @@ public void createRestClient() { ); } + interface CloseableHttpAsyncClientListener { + void onExecute( + HttpHost target, + AsyncRequestProducer requestProducer, + AsyncResponseConsumer responseConsumer, + HandlerFactory pushHandlerFactory, + HttpContext context, + FutureCallback callback + ); + } + @SuppressWarnings("unchecked") - static CloseableHttpAsyncClient mockHttpClient(final ExecutorService exec) { - CloseableHttpAsyncClient httpClient = mock(CloseableHttpAsyncClient.class); - when( - httpClient.execute( - any(HttpAsyncRequestProducer.class), - any(HttpAsyncResponseConsumer.class), - any(HttpClientContext.class), - nullable(FutureCallback.class) - ) - ).thenAnswer((Answer>) invocationOnMock -> { - final HttpAsyncRequestProducer requestProducer = (HttpAsyncRequestProducer) invocationOnMock.getArguments()[0]; - final FutureCallback futureCallback = (FutureCallback) invocationOnMock.getArguments()[3]; - // Call the callback asynchronous to better simulate how async http client works - return exec.submit(() -> { - if (futureCallback != null) { - try { - HttpResponse httpResponse = responseOrException(requestProducer); - futureCallback.completed(httpResponse); - } catch (Exception e) { - futureCallback.failed(e); + static CloseableHttpAsyncClient mockHttpClient(final ExecutorService exec, final CloseableHttpAsyncClientListener... listeners) { + CloseableHttpAsyncClient httpClient = new CloseableHttpAsyncClient() { + @Override + public void close() throws IOException {} + + @Override + public void close(CloseMode closeMode) {} + + @Override + public void start() {} + + @Override + public void register(String hostname, String uriPattern, Supplier supplier) {} + + @Override + public void initiateShutdown() {} + + @Override + public IOReactorStatus getStatus() { + return null; + } + + @Override + protected Future doExecute( + HttpHost target, + AsyncRequestProducer requestProducer, + AsyncResponseConsumer responseConsumer, + HandlerFactory pushHandlerFactory, + HttpContext context, + FutureCallback callback + ) { + Arrays.stream(listeners) + .forEach(l -> l.onExecute(target, requestProducer, responseConsumer, pushHandlerFactory, context, callback)); + // Call the callback asynchronous to better simulate how async http client works + return exec.submit(() -> { + if (callback != null) { + try { + ClassicHttpResponse httpResponse = responseOrException(requestProducer); + callback.completed((T) httpResponse); + } catch (Exception e) { + callback.failed(e); + } + return null; } - return null; - } - return responseOrException(requestProducer); - }); - }); + return (T) responseOrException(requestProducer); + }); + } + + @Override + public void awaitShutdown(TimeValue waitTime) throws InterruptedException {} + }; + return httpClient; } - private static HttpResponse responseOrException(HttpAsyncRequestProducer requestProducer) throws Exception { - final HttpUriRequest request = (HttpUriRequest) requestProducer.generateRequest(); - final HttpHost httpHost = requestProducer.getTarget(); + private static ClassicHttpResponse responseOrException(AsyncRequestProducer requestProducer) throws Exception { + final ClassicHttpRequest request = getRequest(requestProducer); + final HttpHost httpHost = new HttpHost(request.getAuthority()); // return the desired status code or exception depending on the path - switch (request.getURI().getPath()) { + switch (request.getRequestUri()) { case "/soe": throw new SocketTimeoutException(httpHost.toString()); case "/coe": @@ -193,20 +236,17 @@ private static HttpResponse responseOrException(HttpAsyncRequestProducer request case "/runtime": throw new RuntimeException(); default: - int statusCode = Integer.parseInt(request.getURI().getPath().substring(1)); - StatusLine statusLine = new BasicStatusLine(new ProtocolVersion("http", 1, 1), statusCode, ""); + int statusCode = Integer.parseInt(request.getRequestUri().substring(1)); - final HttpResponse httpResponse = new BasicHttpResponse(statusLine); + final ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(statusCode, ""); // return the same body that was sent - if (request instanceof HttpEntityEnclosingRequest) { - HttpEntity entity = ((HttpEntityEnclosingRequest) request).getEntity(); - if (entity != null) { - assertTrue("the entity is not repeatable, cannot set it to the response directly", entity.isRepeatable()); - httpResponse.setEntity(entity); - } + HttpEntity entity = request.getEntity(); + if (entity != null) { + assertTrue("the entity is not repeatable, cannot set it to the response directly", entity.isRepeatable()); + httpResponse.setEntity(entity); } // return the same headers that were sent - httpResponse.setHeaders(request.getAllHeaders()); + httpResponse.setHeaders(request.getHeaders()); return httpResponse; } } @@ -224,26 +264,20 @@ public void shutdownExec() { */ @SuppressWarnings("unchecked") public void testInternalHttpRequest() throws Exception { - ArgumentCaptor requestArgumentCaptor = ArgumentCaptor.forClass(HttpAsyncRequestProducer.class); int times = 0; for (String httpMethod : getHttpMethods()) { - HttpUriRequest expectedRequest = performRandomRequest(httpMethod); - verify(httpClient, times(++times)).execute( - requestArgumentCaptor.capture(), - any(HttpAsyncResponseConsumer.class), - any(HttpClientContext.class), - nullable(FutureCallback.class) - ); - HttpUriRequest actualRequest = (HttpUriRequest) requestArgumentCaptor.getValue().generateRequest(); - assertEquals(expectedRequest.getURI(), actualRequest.getURI()); - assertEquals(expectedRequest.getClass(), actualRequest.getClass()); - assertArrayEquals(expectedRequest.getAllHeaders(), actualRequest.getAllHeaders()); - if (expectedRequest instanceof HttpEntityEnclosingRequest) { - HttpEntity expectedEntity = ((HttpEntityEnclosingRequest) expectedRequest).getEntity(); - if (expectedEntity != null) { - HttpEntity actualEntity = ((HttpEntityEnclosingRequest) actualRequest).getEntity(); - assertEquals(EntityUtils.toString(expectedEntity), EntityUtils.toString(actualEntity)); - } + ClassicHttpRequest expectedRequest = performRandomRequest(httpMethod); + assertThat(requests.intValue(), equalTo(++times)); + + ClassicHttpRequest actualRequest = getRequest(requestProducerCapture.get()); + assertEquals(expectedRequest.getRequestUri(), actualRequest.getRequestUri()); + assertEquals(expectedRequest.getMethod(), actualRequest.getMethod()); + assertArrayEquals(expectedRequest.getHeaders(), actualRequest.getHeaders()); + + HttpEntity expectedEntity = expectedRequest.getEntity(); + if (expectedEntity != null) { + HttpEntity actualEntity = actualRequest.getEntity(); + assertEquals(EntityUtils.toString(expectedEntity), EntityUtils.toString(actualEntity)); } } } @@ -414,14 +448,14 @@ public void testBody() throws Exception { } } } - for (String method : Arrays.asList("HEAD", "OPTIONS", "TRACE")) { + for (String method : Arrays.asList("TRACE")) { Request request = new Request(method, "/" + randomStatusCode(getRandom())); request.setEntity(entity); try { performRequestSyncOrAsync(restClient, request); fail("request should have failed"); - } catch (UnsupportedOperationException e) { - assertThat(e.getMessage(), equalTo(method + " with body is not supported")); + } catch (IllegalStateException e) { + assertThat(e.getMessage(), equalTo(method + " requests may not include an entity.")); } } } @@ -587,10 +621,10 @@ private HttpUriRequest performRandomRequest(String method) throws Exception { HttpUriRequest expectedRequest; switch (method) { case "DELETE": - expectedRequest = new HttpDeleteWithEntity(uri); + expectedRequest = new HttpDelete(uri); break; case "GET": - expectedRequest = new HttpGetWithEntity(uri); + expectedRequest = new HttpGet(uri); break; case "HEAD": expectedRequest = new HttpHead(uri); @@ -614,14 +648,14 @@ private HttpUriRequest performRandomRequest(String method) throws Exception { throw new UnsupportedOperationException("method not supported: " + method); } - if (expectedRequest instanceof HttpEntityEnclosingRequest && getRandom().nextBoolean()) { + if (getRandom().nextBoolean() && !(expectedRequest instanceof HttpTrace /* no entity */)) { HttpEntity entity = new StringEntity(randomAsciiAlphanumOfLengthBetween(10, 100), ContentType.APPLICATION_JSON); - ((HttpEntityEnclosingRequest) expectedRequest).setEntity(entity); + expectedRequest.setEntity(entity); request.setEntity(entity); } final Set uniqueNames = new HashSet<>(); - if (randomBoolean()) { + if (randomBoolean() && !(expectedRequest instanceof HttpTrace /* no entity */)) { Header[] headers = RestClientTestUtil.randomHeaders(getRandom(), "Header"); RequestOptions.Builder options = request.getOptions().toBuilder(); for (Header header : headers) { @@ -698,4 +732,9 @@ private static void assertExceptionStackContainsCallingMethod(Throwable t) { t.printStackTrace(new PrintWriter(stack)); fail("didn't find the calling method (looks like " + myMethod + ") in:\n" + stack); } + + private static ClassicHttpRequest getRequest(AsyncRequestProducer requestProducer) throws NoSuchFieldException, IllegalAccessException { + assertThat(requestProducer, instanceOf(HttpUriRequestProducer.class)); + return ((HttpUriRequestProducer) requestProducer).getRequest(); + } } diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientTests.java index ca761dcb6b9b6..dd51da3a30d8c 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientTests.java @@ -32,12 +32,13 @@ package org.opensearch.client; -import org.apache.http.Header; -import org.apache.http.HttpHost; -import org.apache.http.client.AuthCache; -import org.apache.http.impl.auth.BasicScheme; -import org.apache.http.impl.client.BasicAuthCache; -import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; +import org.apache.hc.client5.http.auth.AuthCache; +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; +import org.apache.hc.client5.http.impl.auth.BasicAuthCache; +import org.apache.hc.client5.http.impl.auth.BasicScheme; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.reactor.IOReactorStatus; import org.opensearch.client.RestClient.NodeTuple; import java.io.IOException; @@ -410,10 +411,10 @@ public void testIsRunning() { CloseableHttpAsyncClient client = mock(CloseableHttpAsyncClient.class); RestClient restClient = new RestClient(client, new Header[] {}, nodes, null, null, null, false, false); - when(client.isRunning()).thenReturn(true); + when(client.getStatus()).thenReturn(IOReactorStatus.ACTIVE); assertTrue(restClient.isRunning()); - when(client.isRunning()).thenReturn(false); + when(client.getStatus()).thenReturn(IOReactorStatus.INACTIVE); assertFalse(restClient.isRunning()); } diff --git a/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java b/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java index 066419844f048..f4c1c98dd4ce9 100644 --- a/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java +++ b/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java @@ -32,23 +32,28 @@ package org.opensearch.client.documentation; -import org.apache.http.Header; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; -import org.apache.http.RequestLine; -import org.apache.http.auth.AuthScope; -import org.apache.http.auth.UsernamePasswordCredentials; -import org.apache.http.client.CredentialsProvider; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.entity.ContentType; -import org.apache.http.impl.client.BasicCredentialsProvider; -import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; -import org.apache.http.impl.nio.reactor.IOReactorConfig; -import org.apache.http.message.BasicHeader; -import org.apache.http.nio.entity.NStringEntity; -import org.apache.http.ssl.SSLContextBuilder; -import org.apache.http.ssl.SSLContexts; -import org.apache.http.util.EntityUtils; +import org.apache.hc.client5.http.auth.AuthScope; +import org.apache.hc.client5.http.auth.UsernamePasswordCredentials; +import org.apache.hc.client5.http.config.RequestConfig; +import org.apache.hc.client5.http.impl.async.HttpAsyncClientBuilder; +import org.apache.hc.client5.http.impl.auth.BasicCredentialsProvider; +import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManager; +import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManagerBuilder; +import org.apache.hc.client5.http.ssl.ClientTlsStrategyBuilder; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.apache.hc.core5.http.io.entity.StringEntity; +import org.apache.hc.core5.http.message.BasicHeader; +import org.apache.hc.core5.http.message.RequestLine; +import org.apache.hc.core5.http.nio.ssl.TlsStrategy; +import org.apache.hc.core5.reactor.IOReactorConfig; +import org.apache.hc.core5.ssl.SSLContextBuilder; +import org.apache.hc.core5.ssl.SSLContexts; +import org.apache.hc.core5.util.Timeout; import org.opensearch.client.Cancellable; import org.opensearch.client.HttpAsyncResponseConsumerFactory; import org.opensearch.client.Node; @@ -109,12 +114,12 @@ public class RestClientDocumentation { // end::rest-client-options-singleton @SuppressWarnings("unused") - public void usage() throws IOException, InterruptedException { + public void usage() throws IOException, InterruptedException, ParseException { //tag::rest-client-init RestClient restClient = RestClient.builder( - new HttpHost("localhost", 9200, "http"), - new HttpHost("localhost", 9201, "http")).build(); + new HttpHost("http", "localhost", 9200), + new HttpHost("http", "localhost", 9201)).build(); //end::rest-client-init //tag::rest-client-close @@ -124,7 +129,7 @@ public void usage() throws IOException, InterruptedException { { //tag::rest-client-init-default-headers RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "http")); + new HttpHost("http", "localhost", 9200)); Header[] defaultHeaders = new Header[]{new BasicHeader("header", "value")}; builder.setDefaultHeaders(defaultHeaders); // <1> //end::rest-client-init-default-headers @@ -132,14 +137,14 @@ public void usage() throws IOException, InterruptedException { { //tag::rest-client-init-node-selector RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "http")); + new HttpHost("http", "localhost", 9200)); builder.setNodeSelector(NodeSelector.SKIP_DEDICATED_CLUSTER_MANAGERS); // <1> //end::rest-client-init-node-selector } { //tag::rest-client-init-allocation-aware-selector RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "http")); + new HttpHost("http", "localhost", 9200)); builder.setNodeSelector(new NodeSelector() { // <1> @Override public void select(Iterable nodes) { @@ -173,7 +178,7 @@ public void select(Iterable nodes) { { //tag::rest-client-init-failure-listener RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "http")); + new HttpHost("http", "localhost", 9200)); builder.setFailureListener(new RestClient.FailureListener() { @Override public void onFailure(Node node) { @@ -185,13 +190,13 @@ public void onFailure(Node node) { { //tag::rest-client-init-request-config-callback RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "http")); + new HttpHost("http", "localhost", 9200)); builder.setRequestConfigCallback( new RestClientBuilder.RequestConfigCallback() { @Override public RequestConfig.Builder customizeRequestConfig( RequestConfig.Builder requestConfigBuilder) { - return requestConfigBuilder.setSocketTimeout(10000); // <1> + return requestConfigBuilder.setResponseTimeout(Timeout.ofMilliseconds(10000)); // <1> } }); //end::rest-client-init-request-config-callback @@ -199,13 +204,13 @@ public RequestConfig.Builder customizeRequestConfig( { //tag::rest-client-init-client-config-callback RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "http")); + new HttpHost("http", "localhost", 9200)); builder.setHttpClientConfigCallback(new HttpClientConfigCallback() { @Override public HttpAsyncClientBuilder customizeHttpClient( HttpAsyncClientBuilder httpClientBuilder) { return httpClientBuilder.setProxy( - new HttpHost("proxy", 9000, "http")); // <1> + new HttpHost("http", "proxy", 9000)); // <1> } }); //end::rest-client-init-client-config-callback @@ -244,7 +249,7 @@ public void onFailure(Exception exception) { request.addParameter("pretty", "true"); //end::rest-client-parameters //tag::rest-client-body - request.setEntity(new NStringEntity( + request.setEntity(new StringEntity( "{\"json\":\"text\"}", ContentType.APPLICATION_JSON)); //end::rest-client-body @@ -334,8 +339,8 @@ public void commonConfiguration() throws Exception { public RequestConfig.Builder customizeRequestConfig( RequestConfig.Builder requestConfigBuilder) { return requestConfigBuilder - .setConnectTimeout(5000) - .setSocketTimeout(60000); + .setConnectTimeout(Timeout.ofMilliseconds(5000)) + .setResponseTimeout(Timeout.ofMilliseconds(60000)); } }); //end::rest-client-config-timeouts @@ -343,8 +348,8 @@ public RequestConfig.Builder customizeRequestConfig( { //tag::rest-client-config-request-options-timeouts RequestConfig requestConfig = RequestConfig.custom() - .setConnectTimeout(5000) - .setSocketTimeout(60000) + .setConnectTimeout(Timeout.ofMilliseconds(5000)) + .setResponseTimeout(Timeout.ofMilliseconds(60000)) .build(); RequestOptions options = RequestOptions.DEFAULT.toBuilder() .setRequestConfig(requestConfig) @@ -359,7 +364,7 @@ public RequestConfig.Builder customizeRequestConfig( @Override public HttpAsyncClientBuilder customizeHttpClient( HttpAsyncClientBuilder httpClientBuilder) { - return httpClientBuilder.setDefaultIOReactorConfig( + return httpClientBuilder.setIOReactorConfig( IOReactorConfig.custom() .setIoThreadCount(1) .build()); @@ -369,10 +374,9 @@ public HttpAsyncClientBuilder customizeHttpClient( } { //tag::rest-client-config-basic-auth - final CredentialsProvider credentialsProvider = - new BasicCredentialsProvider(); - credentialsProvider.setCredentials(AuthScope.ANY, - new UsernamePasswordCredentials("user", "password")); + final BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); + credentialsProvider.setCredentials(new AuthScope(new HttpHost("localhost", 9200)), + new UsernamePasswordCredentials("user", "password".toCharArray())); RestClientBuilder builder = RestClient.builder( new HttpHost("localhost", 9200)) @@ -388,10 +392,10 @@ public HttpAsyncClientBuilder customizeHttpClient( } { //tag::rest-client-config-disable-preemptive-auth - final CredentialsProvider credentialsProvider = + final BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); - credentialsProvider.setCredentials(AuthScope.ANY, - new UsernamePasswordCredentials("user", "password")); + credentialsProvider.setCredentials(new AuthScope(new HttpHost("localhost", 9200)), + new UsernamePasswordCredentials("user", "password".toCharArray())); RestClientBuilder builder = RestClient.builder( new HttpHost("localhost", 9200)) @@ -418,12 +422,20 @@ public HttpAsyncClientBuilder customizeHttpClient( .loadTrustMaterial(truststore, null); final SSLContext sslContext = sslBuilder.build(); RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "https")) + new HttpHost("https", "localhost", 9200)) .setHttpClientConfigCallback(new HttpClientConfigCallback() { @Override public HttpAsyncClientBuilder customizeHttpClient( HttpAsyncClientBuilder httpClientBuilder) { - return httpClientBuilder.setSSLContext(sslContext); + final TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create() + .setSslContext(sslContext) + .build(); + + final PoolingAsyncClientConnectionManager connectionManager = PoolingAsyncClientConnectionManagerBuilder.create() + .setTlsStrategy(tlsStrategy) + .build(); + + return httpClientBuilder.setConnectionManager(connectionManager); } }); //end::rest-client-config-encrypted-communication @@ -444,12 +456,20 @@ public HttpAsyncClientBuilder customizeHttpClient( .loadTrustMaterial(trustStore, null); final SSLContext sslContext = sslContextBuilder.build(); RestClient.builder( - new HttpHost("localhost", 9200, "https")) + new HttpHost("https", "localhost", 9200)) .setHttpClientConfigCallback(new HttpClientConfigCallback() { @Override public HttpAsyncClientBuilder customizeHttpClient( HttpAsyncClientBuilder httpClientBuilder) { - return httpClientBuilder.setSSLContext(sslContext); + final TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create() + .setSslContext(sslContext) + .build(); + + final PoolingAsyncClientConnectionManager connectionManager = PoolingAsyncClientConnectionManagerBuilder.create() + .setTlsStrategy(tlsStrategy) + .build(); + + return httpClientBuilder.setConnectionManager(connectionManager); } }); //end::rest-client-config-trust-ca-pem @@ -473,12 +493,20 @@ public HttpAsyncClientBuilder customizeHttpClient( .loadKeyMaterial(keyStore, keyStorePass.toCharArray()); final SSLContext sslContext = sslBuilder.build(); RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "https")) + new HttpHost("https", "localhost", 9200)) .setHttpClientConfigCallback(new HttpClientConfigCallback() { @Override public HttpAsyncClientBuilder customizeHttpClient( HttpAsyncClientBuilder httpClientBuilder) { - return httpClientBuilder.setSSLContext(sslContext); + final TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create() + .setSslContext(sslContext) + .build(); + + final PoolingAsyncClientConnectionManager connectionManager = PoolingAsyncClientConnectionManagerBuilder.create() + .setTlsStrategy(tlsStrategy) + .build(); + + return httpClientBuilder.setConnectionManager(connectionManager); } }); //end::rest-client-config-mutual-tls-authentication @@ -486,7 +514,7 @@ public HttpAsyncClientBuilder customizeHttpClient( { //tag::rest-client-auth-bearer-token RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "http")); + new HttpHost("http", "localhost", 9200)); Header[] defaultHeaders = new Header[]{new BasicHeader("Authorization", "Bearer u6iuAxZ0RG1Kcm5jVFI4eU4tZU9aVFEwT2F3")}; @@ -502,7 +530,7 @@ public HttpAsyncClientBuilder customizeHttpClient( (apiKeyId + ":" + apiKeySecret) .getBytes(StandardCharsets.UTF_8)); RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "http")); + new HttpHost("http", "localhost", 9200)); Header[] defaultHeaders = new Header[]{new BasicHeader("Authorization", "ApiKey " + apiKeyAuth)}; diff --git a/client/sniffer/build.gradle b/client/sniffer/build.gradle index b7cb0d87c02d9..eb3306cf2cea2 100644 --- a/client/sniffer/build.gradle +++ b/client/sniffer/build.gradle @@ -38,8 +38,8 @@ archivesBaseName = 'opensearch-rest-client-sniffer' dependencies { api project(":client:rest") - api "org.apache.httpcomponents:httpclient:${versions.httpclient}" - api "org.apache.httpcomponents:httpcore:${versions.httpcore}" + api "org.apache.httpcomponents.client5:httpclient5:${versions.httpclient5}" + api "org.apache.httpcomponents.core5:httpcore5:${versions.httpcore5}" api "commons-codec:commons-codec:${versions.commonscodec}" api "commons-logging:commons-logging:${versions.commonslogging}" api "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" @@ -84,6 +84,7 @@ testingConventions { } thirdPartyAudit.ignoreMissingClasses( + 'org.conscrypt.Conscrypt', //commons-logging optional dependencies 'org.apache.avalon.framework.logger.Logger', 'org.apache.log.Hierarchy', diff --git a/client/sniffer/licenses/httpclient-4.5.13.jar.sha1 b/client/sniffer/licenses/httpclient-4.5.13.jar.sha1 deleted file mode 100644 index 3281e21595b39..0000000000000 --- a/client/sniffer/licenses/httpclient-4.5.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e5f6cae5ca7ecaac1ec2827a9e2d65ae2869cada \ No newline at end of file diff --git a/client/sniffer/licenses/httpclient5-5.1.3.jar.sha1 b/client/sniffer/licenses/httpclient5-5.1.3.jar.sha1 new file mode 100644 index 0000000000000..b18cf050ac8df --- /dev/null +++ b/client/sniffer/licenses/httpclient5-5.1.3.jar.sha1 @@ -0,0 +1 @@ +13c984b7b881afcff3a7f0bb95878724a48a4b66 \ No newline at end of file diff --git a/client/sniffer/licenses/httpcore-4.4.15.jar.sha1 b/client/sniffer/licenses/httpcore-4.4.15.jar.sha1 deleted file mode 100644 index 42a03b5d7a376..0000000000000 --- a/client/sniffer/licenses/httpcore-4.4.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d \ No newline at end of file diff --git a/client/sniffer/licenses/httpcore5-5.1.4.jar.sha1 b/client/sniffer/licenses/httpcore5-5.1.4.jar.sha1 new file mode 100644 index 0000000000000..c8981fb933736 --- /dev/null +++ b/client/sniffer/licenses/httpcore5-5.1.4.jar.sha1 @@ -0,0 +1 @@ +92538a62a4aacf96c9ea8992346a453e83da85fc \ No newline at end of file diff --git a/client/sniffer/src/main/java/org/opensearch/client/sniff/OpenSearchNodesSniffer.java b/client/sniffer/src/main/java/org/opensearch/client/sniff/OpenSearchNodesSniffer.java index c1a0fcf9a8acf..e6696c1fc4039 100644 --- a/client/sniffer/src/main/java/org/opensearch/client/sniff/OpenSearchNodesSniffer.java +++ b/client/sniffer/src/main/java/org/opensearch/client/sniff/OpenSearchNodesSniffer.java @@ -37,8 +37,8 @@ import com.fasterxml.jackson.core.JsonToken; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.Node; import org.opensearch.client.Node.Roles; import org.opensearch.client.Request; @@ -192,12 +192,12 @@ private static Node readNode(String nodeId, JsonParser parser, Scheme scheme) th publishAddressAsURI = URI.create(scheme + "://" + address); host = publishAddressAsURI.getHost(); } - publishedHost = new HttpHost(host, publishAddressAsURI.getPort(), publishAddressAsURI.getScheme()); + publishedHost = new HttpHost(publishAddressAsURI.getScheme(), host, publishAddressAsURI.getPort()); } else if (parser.currentToken() == JsonToken.START_ARRAY && "bound_address".equals(parser.getCurrentName())) { while (parser.nextToken() != JsonToken.END_ARRAY) { URI boundAddressAsURI = URI.create(scheme + "://" + parser.getValueAsString()); boundHosts.add( - new HttpHost(boundAddressAsURI.getHost(), boundAddressAsURI.getPort(), boundAddressAsURI.getScheme()) + new HttpHost(boundAddressAsURI.getScheme(), boundAddressAsURI.getHost(), boundAddressAsURI.getPort()) ); } } else if (parser.getCurrentToken() == JsonToken.START_OBJECT) { diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/MockNodesSniffer.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/MockNodesSniffer.java index cbf349e534deb..9b5e89fbeb038 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/MockNodesSniffer.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/MockNodesSniffer.java @@ -32,7 +32,7 @@ package org.opensearch.client.sniff; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.Node; import java.util.Collections; diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferParseTests.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferParseTests.java index 58b60ac13dee8..fd38eceee6224 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferParseTests.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferParseTests.java @@ -33,10 +33,11 @@ package org.opensearch.client.sniff; import com.fasterxml.jackson.core.JsonFactory; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.InputStreamEntity; + +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.io.entity.InputStreamEntity; import org.opensearch.client.Node; import org.opensearch.client.Node.Roles; import org.opensearch.client.RestClientTestCase; diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferTests.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferTests.java index 1d06e9353726d..b678fb050e8f8 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferTests.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferTests.java @@ -40,14 +40,13 @@ import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpServer; -import org.apache.http.Consts; -import org.apache.http.HttpHost; -import org.apache.http.client.methods.HttpGet; import org.opensearch.client.Node; import org.opensearch.client.Response; import org.opensearch.client.ResponseException; import org.opensearch.client.RestClient; import org.opensearch.client.RestClientTestCase; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.core5.http.HttpHost; import org.junit.After; import org.junit.Before; @@ -56,6 +55,7 @@ import java.io.StringWriter; import java.net.InetAddress; import java.net.InetSocketAddress; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -181,7 +181,7 @@ public void handle(HttpExchange httpExchange) throws IOException { String nodesInfoBody = sniffResponse.nodesInfoBody; httpExchange.sendResponseHeaders(sniffResponse.nodesInfoResponseCode, nodesInfoBody.length()); try (OutputStream out = httpExchange.getResponseBody()) { - out.write(nodesInfoBody.getBytes(Consts.UTF_8)); + out.write(nodesInfoBody.getBytes(StandardCharsets.UTF_8)); return; } } @@ -210,14 +210,14 @@ private static SniffResponse buildSniffResponse(OpenSearchNodesSniffer.Scheme sc String nodeId = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 5, 10); String host = "host" + i; int port = RandomNumbers.randomIntBetween(getRandom(), 9200, 9299); - HttpHost publishHost = new HttpHost(host, port, scheme.toString()); + HttpHost publishHost = new HttpHost(scheme.toString(), host, port); Set boundHosts = new HashSet<>(); boundHosts.add(publishHost); if (randomBoolean()) { int bound = between(1, 5); for (int b = 0; b < bound; b++) { - boundHosts.add(new HttpHost(host + b, port, scheme.toString())); + boundHosts.add(new HttpHost(scheme.toString(), host + b, port)); } } diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/SniffOnFailureListenerTests.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/SniffOnFailureListenerTests.java index e4d1058282f5c..faab6babcaca6 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/SniffOnFailureListenerTests.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/SniffOnFailureListenerTests.java @@ -32,7 +32,7 @@ package org.opensearch.client.sniff; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.Node; import org.opensearch.client.RestClient; import org.opensearch.client.RestClientTestCase; diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/SnifferBuilderTests.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/SnifferBuilderTests.java index 25a3162e238ed..24ee540aa6364 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/SnifferBuilderTests.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/SnifferBuilderTests.java @@ -33,7 +33,8 @@ package org.opensearch.client.sniff; import com.carrotsearch.randomizedtesting.generators.RandomNumbers; -import org.apache.http.HttpHost; + +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.RestClient; import org.opensearch.client.RestClientTestCase; diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/SnifferTests.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/SnifferTests.java index 304243e73c078..36923281dde6b 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/SnifferTests.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/SnifferTests.java @@ -32,12 +32,12 @@ package org.opensearch.client.sniff; -import org.apache.http.HttpHost; import org.opensearch.client.Node; import org.opensearch.client.RestClient; import org.opensearch.client.RestClientTestCase; import org.opensearch.client.sniff.Sniffer.DefaultScheduler; import org.opensearch.client.sniff.Sniffer.Scheduler; +import org.apache.hc.core5.http.HttpHost; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/documentation/SnifferDocumentation.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/documentation/SnifferDocumentation.java index 3b612aab80851..8f3e446d8aefb 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/documentation/SnifferDocumentation.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/documentation/SnifferDocumentation.java @@ -32,7 +32,7 @@ package org.opensearch.client.sniff.documentation; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.Node; import org.opensearch.client.RestClient; import org.opensearch.client.sniff.OpenSearchNodesSniffer; @@ -69,7 +69,7 @@ public void usage() throws IOException { { //tag::sniffer-init RestClient restClient = RestClient.builder( - new HttpHost("localhost", 9200, "http")) + new HttpHost("http", "localhost", 9200)) .build(); Sniffer sniffer = Sniffer.builder(restClient).build(); //end::sniffer-init @@ -82,7 +82,7 @@ public void usage() throws IOException { { //tag::sniffer-interval RestClient restClient = RestClient.builder( - new HttpHost("localhost", 9200, "http")) + new HttpHost("http", "localhost", 9200)) .build(); Sniffer sniffer = Sniffer.builder(restClient) .setSniffIntervalMillis(60000).build(); @@ -105,7 +105,7 @@ public void usage() throws IOException { { //tag::sniffer-https RestClient restClient = RestClient.builder( - new HttpHost("localhost", 9200, "http")) + new HttpHost("http", "localhost", 9200)) .build(); NodesSniffer nodesSniffer = new OpenSearchNodesSniffer( restClient, @@ -118,7 +118,7 @@ public void usage() throws IOException { { //tag::sniff-request-timeout RestClient restClient = RestClient.builder( - new HttpHost("localhost", 9200, "http")) + new HttpHost("http", "localhost", 9200)) .build(); NodesSniffer nodesSniffer = new OpenSearchNodesSniffer( restClient, @@ -131,7 +131,7 @@ public void usage() throws IOException { { //tag::custom-nodes-sniffer RestClient restClient = RestClient.builder( - new HttpHost("localhost", 9200, "http")) + new HttpHost("http", "localhost", 9200)) .build(); NodesSniffer nodesSniffer = new NodesSniffer() { @Override diff --git a/client/test/build.gradle b/client/test/build.gradle index 07d874cf01ea7..13e9bd6b9e34a 100644 --- a/client/test/build.gradle +++ b/client/test/build.gradle @@ -35,7 +35,7 @@ sourceCompatibility = JavaVersion.VERSION_11 group = "${group}.client.test" dependencies { - api "org.apache.httpcomponents:httpcore:${versions.httpcore}" + api "org.apache.httpcomponents.core5:httpcore5:${versions.httpcore5}" api "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" api "junit:junit:${versions.junit}" api "org.hamcrest:hamcrest:${versions.hamcrest}" diff --git a/client/test/src/main/java/org/opensearch/client/RestClientTestCase.java b/client/test/src/main/java/org/opensearch/client/RestClientTestCase.java index 2b3e867929e27..b4eacdbf88827 100644 --- a/client/test/src/main/java/org/opensearch/client/RestClientTestCase.java +++ b/client/test/src/main/java/org/opensearch/client/RestClientTestCase.java @@ -43,7 +43,8 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies; import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; -import org.apache.http.Header; + +import org.apache.hc.core5.http.Header; import java.util.ArrayList; import java.util.HashMap; diff --git a/client/test/src/main/java/org/opensearch/client/RestClientTestUtil.java b/client/test/src/main/java/org/opensearch/client/RestClientTestUtil.java index aeba9bde4bff4..6a01ed30e0c63 100644 --- a/client/test/src/main/java/org/opensearch/client/RestClientTestUtil.java +++ b/client/test/src/main/java/org/opensearch/client/RestClientTestUtil.java @@ -35,8 +35,9 @@ import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomStrings; -import org.apache.http.Header; -import org.apache.http.message.BasicHeader; + +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.message.BasicHeader; import java.util.ArrayList; import java.util.Arrays; diff --git a/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/WaitForRefreshAndCloseIT.java b/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/WaitForRefreshAndCloseIT.java index 37ffe32d19509..07576dacffb03 100644 --- a/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/WaitForRefreshAndCloseIT.java +++ b/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/WaitForRefreshAndCloseIT.java @@ -32,13 +32,14 @@ package org.opensearch.test.rest; -import org.apache.http.util.EntityUtils; import org.opensearch.action.ActionFuture; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.client.Request; import org.opensearch.client.Response; import org.opensearch.client.ResponseException; import org.opensearch.client.ResponseListener; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.junit.After; import org.junit.Before; @@ -145,6 +146,8 @@ public void onSuccess(Response response) { future.onResponse(EntityUtils.toString(response.getEntity())); } catch (IOException e) { future.onFailure(e); + } catch (ParseException e) { + future.onFailure(e); } } diff --git a/modules/opensearch-dashboards/src/javaRestTest/java/org/opensearch/dashboards/OpenSearchDashboardsSystemIndexIT.java b/modules/opensearch-dashboards/src/javaRestTest/java/org/opensearch/dashboards/OpenSearchDashboardsSystemIndexIT.java index 2584a9b41f14d..10ee9393b343f 100644 --- a/modules/opensearch-dashboards/src/javaRestTest/java/org/opensearch/dashboards/OpenSearchDashboardsSystemIndexIT.java +++ b/modules/opensearch-dashboards/src/javaRestTest/java/org/opensearch/dashboards/OpenSearchDashboardsSystemIndexIT.java @@ -34,7 +34,10 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.apache.http.util.EntityUtils; + +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.opensearch.OpenSearchParseException; import org.opensearch.client.Request; import org.opensearch.client.Response; import org.opensearch.common.xcontent.XContentHelper; @@ -73,7 +76,7 @@ public void testCreateIndex() throws IOException { assertThat(response.getStatusLine().getStatusCode(), is(200)); } - public void testAliases() throws IOException { + public void testAliases() throws IOException, ParseException { assumeFalse("In this test, .opensearch_dashboards is the alias name", ".opensearch_dashboards".equals(indexName)); Request request = new Request("PUT", "/_opensearch_dashboards/" + indexName); Response response = client().performRequest(request); @@ -96,7 +99,7 @@ public void testBulkToOpenSearchDashboardsIndex() throws IOException { assertThat(response.getStatusLine().getStatusCode(), is(200)); } - public void testRefresh() throws IOException { + public void testRefresh() throws IOException, ParseException { Request request = new Request("POST", "/_opensearch_dashboards/_bulk"); request.setJsonEntity("{ \"index\" : { \"_index\" : \"" + indexName + "\", \"_id\" : \"1\" } }\n{ \"foo\" : \"bar\" }\n"); Response response = client().performRequest(request); @@ -114,7 +117,7 @@ public void testRefresh() throws IOException { assertThat(responseBody, containsString("bar")); } - public void testGetFromOpenSearchDashboardsIndex() throws IOException { + public void testGetFromOpenSearchDashboardsIndex() throws IOException, ParseException { Request request = new Request("POST", "/_opensearch_dashboards/_bulk"); request.setJsonEntity("{ \"index\" : { \"_index\" : \"" + indexName + "\", \"_id\" : \"1\" } }\n{ \"foo\" : \"bar\" }\n"); request.addParameter("refresh", "true"); @@ -130,7 +133,7 @@ public void testGetFromOpenSearchDashboardsIndex() throws IOException { assertThat(responseBody, containsString("bar")); } - public void testMultiGetFromOpenSearchDashboardsIndex() throws IOException { + public void testMultiGetFromOpenSearchDashboardsIndex() throws IOException, ParseException { Request request = new Request("POST", "/_opensearch_dashboards/_bulk"); request.setJsonEntity( "{ \"index\" : { \"_index\" : \"" @@ -163,7 +166,7 @@ public void testMultiGetFromOpenSearchDashboardsIndex() throws IOException { assertThat(responseBody, containsString("tag")); } - public void testSearchFromOpenSearchDashboardsIndex() throws IOException { + public void testSearchFromOpenSearchDashboardsIndex() throws IOException, ParseException { Request request = new Request("POST", "/_opensearch_dashboards/_bulk"); request.setJsonEntity( "{ \"index\" : { \"_index\" : \"" @@ -241,7 +244,7 @@ public void testUpdateIndexSettings() throws IOException { assertThat(response.getStatusLine().getStatusCode(), is(200)); } - public void testGetIndex() throws IOException { + public void testGetIndex() throws IOException, ParseException { Request request = new Request("PUT", "/_opensearch_dashboards/" + indexName); Response response = client().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), is(200)); @@ -278,7 +281,7 @@ public void testIndexingAndUpdatingDocs() throws IOException { assertThat(response.getStatusLine().getStatusCode(), is(200)); } - public void testScrollingDocs() throws IOException { + public void testScrollingDocs() throws IOException, OpenSearchParseException, ParseException { Request request = new Request("POST", "/_opensearch_dashboards/_bulk"); request.setJsonEntity( "{ \"index\" : { \"_index\" : \"" diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollAction.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollAction.java index 43adffc6f7671..bb1a9d190313f 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollAction.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollAction.java @@ -33,7 +33,8 @@ package org.opensearch.index.reindex; import java.util.Optional; -import org.apache.http.HttpRequestInterceptor; + +import org.apache.hc.core5.http.HttpRequestInterceptor; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.action.ActionListener; diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexSslConfig.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexSslConfig.java index 34fcd245289be..f8e9018bce6df 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexSslConfig.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexSslConfig.java @@ -32,10 +32,10 @@ package org.opensearch.index.reindex; -import org.apache.http.conn.ssl.DefaultHostnameVerifier; -import org.apache.http.conn.ssl.NoopHostnameVerifier; -import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy; -import org.opensearch.common.Strings; +import org.apache.hc.client5.http.ssl.ClientTlsStrategyBuilder; +import org.apache.hc.client5.http.ssl.DefaultHostnameVerifier; +import org.apache.hc.client5.http.ssl.NoopHostnameVerifier; +import org.apache.hc.core5.http.nio.ssl.TlsStrategy; import org.opensearch.common.settings.SecureSetting; import org.opensearch.common.settings.SecureString; import org.opensearch.common.settings.Setting; @@ -161,16 +161,24 @@ private void reload() { } /** - * Encapsulate the loaded SSL configuration as a HTTP-client {@link SSLIOSessionStrategy}. + * Encapsulate the loaded SSL configuration as a HTTP-client {@link TlsStrategy}. * The returned strategy is immutable, but successive calls will return different objects that may have different * configurations if the underlying key/certificate files are modified. */ - SSLIOSessionStrategy getStrategy() { + TlsStrategy getStrategy() { final HostnameVerifier hostnameVerifier = configuration.getVerificationMode().isHostnameVerificationEnabled() ? new DefaultHostnameVerifier() : new NoopHostnameVerifier(); - final String[] protocols = configuration.getSupportedProtocols().toArray(Strings.EMPTY_ARRAY); - final String[] cipherSuites = configuration.getCipherSuites().toArray(Strings.EMPTY_ARRAY); - return new SSLIOSessionStrategy(context, protocols, cipherSuites, hostnameVerifier); + + final String[] protocols = configuration.getSupportedProtocols().toArray(new String[0]); + final String[] cipherSuites = configuration.getCipherSuites().toArray(new String[0]); + + return ClientTlsStrategyBuilder.create() + .setSslContext(context) + .setHostnameVerifier(hostnameVerifier) + .setCiphers(cipherSuites) + .setTlsVersions(protocols) + .build(); + } } diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/Reindexer.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/Reindexer.java index 8ade055d10f60..aa9accbd90e21 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/Reindexer.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/Reindexer.java @@ -33,15 +33,18 @@ package org.opensearch.index.reindex; import java.util.Optional; -import org.apache.http.Header; -import org.apache.http.HttpHost; -import org.apache.http.HttpRequestInterceptor; -import org.apache.http.auth.AuthScope; -import org.apache.http.auth.UsernamePasswordCredentials; -import org.apache.http.client.CredentialsProvider; -import org.apache.http.impl.client.BasicCredentialsProvider; -import org.apache.http.impl.nio.reactor.IOReactorConfig; -import org.apache.http.message.BasicHeader; + +import org.apache.hc.client5.http.auth.AuthScope; +import org.apache.hc.client5.http.auth.UsernamePasswordCredentials; +import org.apache.hc.client5.http.impl.auth.BasicCredentialsProvider; +import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManager; +import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManagerBuilder; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.HttpRequestInterceptor; +import org.apache.hc.core5.http.message.BasicHeader; +import org.apache.hc.core5.reactor.IOReactorConfig; +import org.apache.hc.core5.util.Timeout; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.action.ActionListener; @@ -202,21 +205,23 @@ static RestClient buildRestClient( for (Map.Entry header : remoteInfo.getHeaders().entrySet()) { clientHeaders[i++] = new BasicHeader(header.getKey(), header.getValue()); } - final RestClientBuilder builder = RestClient.builder( - new HttpHost(remoteInfo.getHost(), remoteInfo.getPort(), remoteInfo.getScheme()) - ).setDefaultHeaders(clientHeaders).setRequestConfigCallback(c -> { - c.setConnectTimeout(Math.toIntExact(remoteInfo.getConnectTimeout().millis())); - c.setSocketTimeout(Math.toIntExact(remoteInfo.getSocketTimeout().millis())); + final HttpHost httpHost = new HttpHost(remoteInfo.getScheme(), remoteInfo.getHost(), remoteInfo.getPort()); + final RestClientBuilder builder = RestClient.builder(httpHost).setDefaultHeaders(clientHeaders).setRequestConfigCallback(c -> { + c.setConnectTimeout(Timeout.ofMilliseconds(Math.toIntExact(remoteInfo.getConnectTimeout().millis()))); + c.setResponseTimeout(Timeout.ofMilliseconds(Math.toIntExact(remoteInfo.getSocketTimeout().millis()))); return c; }).setHttpClientConfigCallback(c -> { // Enable basic auth if it is configured if (remoteInfo.getUsername() != null) { - UsernamePasswordCredentials creds = new UsernamePasswordCredentials(remoteInfo.getUsername(), remoteInfo.getPassword()); - CredentialsProvider credentialsProvider = new BasicCredentialsProvider(); - credentialsProvider.setCredentials(AuthScope.ANY, creds); + UsernamePasswordCredentials creds = new UsernamePasswordCredentials( + remoteInfo.getUsername(), + remoteInfo.getPassword().toCharArray() + ); + BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); + credentialsProvider.setCredentials(new AuthScope(httpHost, null, "Basic"), creds); c.setDefaultCredentialsProvider(credentialsProvider); } else { - restInterceptor.ifPresent(interceptor -> c.addInterceptorLast(interceptor)); + restInterceptor.ifPresent(interceptor -> c.addRequestInterceptorLast(interceptor)); } // Stick the task id in the thread name so we can track down tasks from stack traces AtomicInteger threads = new AtomicInteger(); @@ -227,8 +232,13 @@ static RestClient buildRestClient( return t; }); // Limit ourselves to one reactor thread because for now the search process is single threaded. - c.setDefaultIOReactorConfig(IOReactorConfig.custom().setIoThreadCount(1).build()); - c.setSSLStrategy(sslConfig.getStrategy()); + c.setIOReactorConfig(IOReactorConfig.custom().setIoThreadCount(1).build()); + + final PoolingAsyncClientConnectionManager connectionManager = PoolingAsyncClientConnectionManagerBuilder.create() + .setTlsStrategy(sslConfig.getStrategy()) + .build(); + + c.setConnectionManager(connectionManager); return c; }); if (Strings.hasLength(remoteInfo.getPathPrefix()) && "/".equals(remoteInfo.getPathPrefix()) == false) { diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteRequestBuilders.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteRequestBuilders.java index 8467fbdeacd0e..873bd7c3b48cb 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteRequestBuilders.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteRequestBuilders.java @@ -32,8 +32,8 @@ package org.opensearch.index.reindex.remote; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.entity.NStringEntity; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.io.entity.StringEntity; import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.search.SearchRequest; @@ -240,7 +240,7 @@ static Request scroll(String scroll, TimeValue keepAlive, Version remoteVersion) if (remoteVersion.before(Version.fromId(2000099))) { // Versions before 2.0.0 extract the plain scroll_id from the body - request.setEntity(new NStringEntity(scroll, ContentType.TEXT_PLAIN)); + request.setEntity(new StringEntity(scroll, ContentType.TEXT_PLAIN)); return request; } @@ -258,7 +258,7 @@ static Request clearScroll(String scroll, Version remoteVersion) { if (remoteVersion.before(Version.fromId(2000099))) { // Versions before 2.0.0 extract the plain scroll_id from the body - request.setEntity(new NStringEntity(scroll, ContentType.TEXT_PLAIN)); + request.setEntity(new StringEntity(scroll, ContentType.TEXT_PLAIN)); return request; } try (XContentBuilder entity = JsonXContent.contentBuilder()) { diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSource.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSource.java index be691243ecf84..3a943450a1a89 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSource.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSource.java @@ -32,10 +32,11 @@ package org.opensearch.index.reindex.remote; -import org.apache.http.ContentTooLongException; -import org.apache.http.HttpEntity; -import org.apache.http.entity.ContentType; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.ContentTooLongException; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; @@ -199,7 +200,7 @@ public void onSuccess(org.opensearch.client.Response response) { InputStream content = responseEntity.getContent(); XContentType xContentType = null; if (responseEntity.getContentType() != null) { - final String mimeType = ContentType.parse(responseEntity.getContentType().getValue()).getMimeType(); + final String mimeType = ContentType.parse(responseEntity.getContentType()).getMimeType(); xContentType = XContentType.fromMediaType(mimeType); } if (xContentType == null) { @@ -284,7 +285,11 @@ private static String bodyMessage(@Nullable HttpEntity entity) throws IOExceptio if (entity == null) { return "No error body."; } else { - return "body=" + EntityUtils.toString(entity); + try { + return "body=" + EntityUtils.toString(entity); + } catch (final ParseException ex) { + throw new IOException(ex); + } } } } diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/spi/ReindexRestInterceptorProvider.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/spi/ReindexRestInterceptorProvider.java index 034981c969b4b..0646c9b5d8705 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/spi/ReindexRestInterceptorProvider.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/spi/ReindexRestInterceptorProvider.java @@ -6,7 +6,8 @@ package org.opensearch.index.reindex.spi; import java.util.Optional; -import org.apache.http.HttpRequestInterceptor; + +import org.apache.hc.core5.http.HttpRequestInterceptor; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.index.reindex.ReindexRequest; diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteRequestBuildersTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteRequestBuildersTests.java index c349bc54bcbd9..e7af54a0563d3 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteRequestBuildersTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteRequestBuildersTests.java @@ -32,8 +32,8 @@ package org.opensearch.index.reindex.remote; -import org.apache.http.HttpEntity; -import org.apache.http.entity.ContentType; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; import org.opensearch.Version; import org.opensearch.action.search.SearchRequest; import org.opensearch.client.Request; @@ -245,7 +245,7 @@ public void testInitialSearchEntity() throws IOException { searchRequest.source(new SearchSourceBuilder()); String query = "{\"match_all\":{}}"; HttpEntity entity = initialSearch(searchRequest, new BytesArray(query), remoteVersion).getEntity(); - assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType().getValue()); + assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType()); if (remoteVersion.onOrAfter(Version.fromId(1000099))) { assertEquals( "{\"query\":" + query + ",\"_source\":true}", @@ -261,7 +261,7 @@ public void testInitialSearchEntity() throws IOException { // Source filtering is included if set up searchRequest.source().fetchSource(new String[] { "in1", "in2" }, new String[] { "out" }); entity = initialSearch(searchRequest, new BytesArray(query), remoteVersion).getEntity(); - assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType().getValue()); + assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType()); assertEquals( "{\"query\":" + query + ",\"_source\":{\"includes\":[\"in1\",\"in2\"],\"excludes\":[\"out\"]}}", Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8)) @@ -287,7 +287,7 @@ public void testScrollParams() { public void testScrollEntity() throws IOException { String scroll = randomAlphaOfLength(30); HttpEntity entity = scroll(scroll, timeValueMillis(between(1, 1000)), Version.fromString("5.0.0")).getEntity(); - assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType().getValue()); + assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType()); assertThat( Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8)), containsString("\"" + scroll + "\"") @@ -295,14 +295,14 @@ public void testScrollEntity() throws IOException { // Test with version < 2.0.0 entity = scroll(scroll, timeValueMillis(between(1, 1000)), Version.fromId(1070499)).getEntity(); - assertEquals(ContentType.TEXT_PLAIN.toString(), entity.getContentType().getValue()); + assertEquals(ContentType.TEXT_PLAIN.toString(), entity.getContentType()); assertEquals(scroll, Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8))); } public void testClearScroll() throws IOException { String scroll = randomAlphaOfLength(30); Request request = clearScroll(scroll, Version.fromString("5.0.0")); - assertEquals(ContentType.APPLICATION_JSON.toString(), request.getEntity().getContentType().getValue()); + assertEquals(ContentType.APPLICATION_JSON.toString(), request.getEntity().getContentType()); assertThat( Streams.copyToString(new InputStreamReader(request.getEntity().getContent(), StandardCharsets.UTF_8)), containsString("\"" + scroll + "\"") @@ -311,7 +311,7 @@ public void testClearScroll() throws IOException { // Test with version < 2.0.0 request = clearScroll(scroll, Version.fromId(1070499)); - assertEquals(ContentType.TEXT_PLAIN.toString(), request.getEntity().getContentType().getValue()); + assertEquals(ContentType.TEXT_PLAIN.toString(), request.getEntity().getContentType()); assertEquals(scroll, Streams.copyToString(new InputStreamReader(request.getEntity().getContent(), StandardCharsets.UTF_8))); assertThat(request.getParameters().keySet(), empty()); } diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSourceTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSourceTests.java index 337bc67796f8e..c0e2bd14f55bc 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSourceTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSourceTests.java @@ -32,31 +32,14 @@ package org.opensearch.index.reindex.remote; -import org.apache.http.ContentTooLongException; -import org.apache.http.HttpEntity; -import org.apache.http.HttpEntityEnclosingRequest; -import org.apache.http.HttpHost; -import org.apache.http.HttpResponse; -import org.apache.http.ProtocolVersion; -import org.apache.http.StatusLine; -import org.apache.http.client.protocol.HttpClientContext; -import org.apache.http.concurrent.FutureCallback; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.InputStreamEntity; -import org.apache.http.entity.StringEntity; -import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; -import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; -import org.apache.http.message.BasicHttpResponse; -import org.apache.http.message.BasicStatusLine; -import org.apache.http.nio.protocol.HttpAsyncRequestProducer; -import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchStatusException; import org.opensearch.Version; import org.opensearch.action.bulk.BackoffPolicy; import org.opensearch.action.search.SearchRequest; -import org.opensearch.client.HeapBufferedAsyncResponseConsumer; import org.opensearch.client.RestClient; +import org.opensearch.client.http.HttpUriRequestProducer; +import org.opensearch.client.nio.HeapBufferedAsyncResponseConsumer; import org.opensearch.common.ParsingException; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.io.FileSystemUtils; @@ -74,13 +57,32 @@ import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; +import org.apache.hc.client5.http.impl.async.HttpAsyncClientBuilder; +import org.apache.hc.core5.concurrent.FutureCallback; +import org.apache.hc.core5.function.Supplier; +import org.apache.hc.core5.http.ClassicHttpRequest; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ContentTooLongException; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.io.entity.InputStreamEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.nio.AsyncPushConsumer; +import org.apache.hc.core5.http.nio.AsyncRequestProducer; +import org.apache.hc.core5.http.nio.AsyncResponseConsumer; +import org.apache.hc.core5.http.nio.HandlerFactory; +import org.apache.hc.core5.http.protocol.HttpContext; +import org.apache.hc.core5.io.CloseMode; +import org.apache.hc.core5.reactor.IOReactorStatus; import org.junit.After; import org.junit.Before; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.stubbing.Answer; import java.io.IOException; import java.io.InputStreamReader; +import java.io.UncheckedIOException; import java.net.URL; import java.nio.charset.StandardCharsets; import java.util.Queue; @@ -97,7 +99,6 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; -import static org.mockito.Mockito.any; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; @@ -444,24 +445,49 @@ public void testWrapExceptionToPreserveStatus() throws IOException { @SuppressWarnings({ "unchecked", "rawtypes" }) public void testTooLargeResponse() throws Exception { ContentTooLongException tooLong = new ContentTooLongException("too long!"); - CloseableHttpAsyncClient httpClient = mock(CloseableHttpAsyncClient.class); - when( - httpClient.execute( - any(HttpAsyncRequestProducer.class), - any(HttpAsyncResponseConsumer.class), - any(HttpClientContext.class), - any(FutureCallback.class) - ) - ).then(new Answer>() { + CloseableHttpAsyncClient httpClient = new CloseableHttpAsyncClient() { + + @Override + public void close() throws IOException {} + + @Override + public void close(CloseMode closeMode) {} + + @Override + public void start() {} + + @Override + public void register(String hostname, String uriPattern, Supplier supplier) {} + + @Override + public void initiateShutdown() {} + @Override - public Future answer(InvocationOnMock invocationOnMock) throws Throwable { - HeapBufferedAsyncResponseConsumer consumer = (HeapBufferedAsyncResponseConsumer) invocationOnMock.getArguments()[1]; - FutureCallback callback = (FutureCallback) invocationOnMock.getArguments()[3]; - assertEquals(new ByteSizeValue(100, ByteSizeUnit.MB).bytesAsInt(), consumer.getBufferLimit()); + public IOReactorStatus getStatus() { + return null; + } + + @Override + protected Future doExecute( + HttpHost target, + AsyncRequestProducer requestProducer, + AsyncResponseConsumer responseConsumer, + HandlerFactory pushHandlerFactory, + HttpContext context, + FutureCallback callback + ) { + assertEquals( + new ByteSizeValue(100, ByteSizeUnit.MB).bytesAsInt(), + ((HeapBufferedAsyncResponseConsumer) responseConsumer).getBufferLimit() + ); callback.failed(tooLong); return null; } - }); + + @Override + public void awaitShutdown(org.apache.hc.core5.util.TimeValue waitTime) throws InterruptedException {} + }; + RemoteScrollableHitSource source = sourceWithMockedClient(true, httpClient); Throwable e = expectThrows(RuntimeException.class, source::start); @@ -539,46 +565,68 @@ private RemoteScrollableHitSource sourceWithMockedRemoteCall(boolean mockRemoteV } } - CloseableHttpAsyncClient httpClient = mock(CloseableHttpAsyncClient.class); - when( - httpClient.execute( - any(HttpAsyncRequestProducer.class), - any(HttpAsyncResponseConsumer.class), - any(HttpClientContext.class), - any(FutureCallback.class) - ) - ).thenAnswer(new Answer>() { - + final CloseableHttpAsyncClient httpClient = new CloseableHttpAsyncClient() { int responseCount = 0; @Override - public Future answer(InvocationOnMock invocationOnMock) throws Throwable { - // Throw away the current thread context to simulate running async httpclient's thread pool - threadPool.getThreadContext().stashContext(); - HttpAsyncRequestProducer requestProducer = (HttpAsyncRequestProducer) invocationOnMock.getArguments()[0]; - FutureCallback futureCallback = (FutureCallback) invocationOnMock.getArguments()[3]; - HttpEntityEnclosingRequest request = (HttpEntityEnclosingRequest) requestProducer.generateRequest(); - URL resource = resources[responseCount]; - String path = paths[responseCount++]; - ProtocolVersion protocolVersion = new ProtocolVersion("http", 1, 1); - if (path.startsWith("fail:")) { - String body = Streams.copyToString(new InputStreamReader(request.getEntity().getContent(), StandardCharsets.UTF_8)); - if (path.equals("fail:rejection.json")) { - StatusLine statusLine = new BasicStatusLine(protocolVersion, RestStatus.TOO_MANY_REQUESTS.getStatus(), ""); - BasicHttpResponse httpResponse = new BasicHttpResponse(statusLine); - futureCallback.completed(httpResponse); + public void close(CloseMode closeMode) {} + + @Override + public void close() throws IOException {} + + @Override + public void start() {} + + @Override + public IOReactorStatus getStatus() { + return null; + } + + @Override + public void awaitShutdown(org.apache.hc.core5.util.TimeValue waitTime) throws InterruptedException {} + + @Override + public void initiateShutdown() {} + + @Override + protected Future doExecute( + HttpHost target, + AsyncRequestProducer requestProducer, + AsyncResponseConsumer responseConsumer, + HandlerFactory pushHandlerFactory, + HttpContext context, + FutureCallback callback + ) { + try { + // Throw away the current thread context to simulate running async httpclient's thread pool + threadPool.getThreadContext().stashContext(); + ClassicHttpRequest request = getRequest(requestProducer); + URL resource = resources[responseCount]; + String path = paths[responseCount++]; + if (path.startsWith("fail:")) { + String body = Streams.copyToString(new InputStreamReader(request.getEntity().getContent(), StandardCharsets.UTF_8)); + if (path.equals("fail:rejection.json")) { + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(RestStatus.TOO_MANY_REQUESTS.getStatus(), ""); + callback.completed((T) httpResponse); + } else { + callback.failed(new RuntimeException(body)); + } } else { - futureCallback.failed(new RuntimeException(body)); + BasicClassicHttpResponse httpResponse = new BasicClassicHttpResponse(200, ""); + httpResponse.setEntity(new InputStreamEntity(FileSystemUtils.openFileURLStream(resource), contentType)); + callback.completed((T) httpResponse); } - } else { - StatusLine statusLine = new BasicStatusLine(protocolVersion, 200, ""); - HttpResponse httpResponse = new BasicHttpResponse(statusLine); - httpResponse.setEntity(new InputStreamEntity(FileSystemUtils.openFileURLStream(resource), contentType)); - futureCallback.completed(httpResponse); + return null; + } catch (IOException ex) { + throw new UncheckedIOException(ex); } - return null; } - }); + + @Override + public void register(String hostname, String uriPattern, Supplier supplier) {} + + }; + return sourceWithMockedClient(mockRemoteVersion, httpClient); } @@ -649,4 +697,9 @@ private T expectListenerFailure(Class expectedExcept assertNotNull(exception.get()); return exception.get(); } + + private static ClassicHttpRequest getRequest(AsyncRequestProducer requestProducer) { + assertThat(requestProducer, instanceOf(HttpUriRequestProducer.class)); + return ((HttpUriRequestProducer) requestProducer).getRequest(); + } } diff --git a/modules/repository-url/src/yamlRestTest/java/org/opensearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java b/modules/repository-url/src/yamlRestTest/java/org/opensearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java index 3d0c09fb2288c..cbadcba5ef6f0 100644 --- a/modules/repository-url/src/yamlRestTest/java/org/opensearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java +++ b/modules/repository-url/src/yamlRestTest/java/org/opensearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java @@ -34,9 +34,6 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.apache.http.HttpEntity; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.entity.NStringEntity; import org.opensearch.client.Request; import org.opensearch.client.Response; import org.opensearch.common.Strings; @@ -49,6 +46,9 @@ import org.opensearch.rest.RestStatus; import org.opensearch.test.rest.yaml.ClientYamlTestCandidate; import org.opensearch.test.rest.yaml.OpenSearchClientYamlSuiteTestCase; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; import org.junit.Before; import java.io.IOException; @@ -144,7 +144,7 @@ private static HttpEntity buildRepositorySettings(final String type, final Setti builder.endObject(); } builder.endObject(); - return new NStringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); + return new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); } } } diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/rest/discovery/Zen2RestApiIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/rest/discovery/Zen2RestApiIT.java index 96e21e0e05ff7..fbac1f1c52e95 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/rest/discovery/Zen2RestApiIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/rest/discovery/Zen2RestApiIT.java @@ -32,7 +32,6 @@ package org.opensearch.rest.discovery; -import org.apache.http.HttpHost; import org.opensearch.OpenSearchNetty4IntegTestCase; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.client.Client; @@ -49,9 +48,11 @@ import org.opensearch.http.HttpServerTransport; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalTestCluster; +import org.apache.hc.core5.http.HttpHost; import org.hamcrest.Matchers; import java.io.IOException; +import java.net.URISyntaxException; import java.util.Collections; import java.util.List; @@ -124,6 +125,8 @@ public Settings onNodeStopped(String nodeName) throws IOException { .get(); assertFalse(nodeName, clusterHealthResponse.isTimedOut()); return Settings.EMPTY; + } catch (final URISyntaxException ex) { + throw new IOException(ex); } finally { restClient.setNodes(allNodes); } diff --git a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java index e8417f9ceaf2c..1478f48f16650 100644 --- a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java +++ b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java @@ -40,8 +40,6 @@ import fixture.azure.AzureHttpHandler; import reactor.core.scheduler.Schedulers; -import org.apache.http.HttpStatus; - import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.common.Strings; import org.opensearch.common.SuppressForbidden; @@ -63,7 +61,7 @@ import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; - +import org.apache.hc.core5.http.HttpStatus; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; diff --git a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java index 616a1ae9feb4f..6850b204e0112 100644 --- a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java +++ b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java @@ -37,8 +37,8 @@ import com.google.cloud.storage.StorageOptions; import com.sun.net.httpserver.HttpHandler; import fixture.gcs.FakeOAuth2HttpHandler; -import org.apache.http.HttpStatus; +import org.apache.hc.core5.http.HttpStatus; import org.opensearch.common.Nullable; import org.opensearch.common.Strings; import org.opensearch.common.SuppressForbidden; diff --git a/qa/ccs-unavailable-clusters/src/test/java/org/opensearch/search/CrossClusterSearchUnavailableClusterIT.java b/qa/ccs-unavailable-clusters/src/test/java/org/opensearch/search/CrossClusterSearchUnavailableClusterIT.java index 5691154882c9f..ebed71d90df9a 100644 --- a/qa/ccs-unavailable-clusters/src/test/java/org/opensearch/search/CrossClusterSearchUnavailableClusterIT.java +++ b/qa/ccs-unavailable-clusters/src/test/java/org/opensearch/search/CrossClusterSearchUnavailableClusterIT.java @@ -32,9 +32,9 @@ package org.opensearch.search; -import org.apache.http.HttpEntity; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.entity.NStringEntity; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; import org.apache.lucene.search.TotalHits; import org.opensearch.OpenSearchException; import org.opensearch.Version; @@ -343,7 +343,7 @@ private static HttpEntity buildUpdateSettingsRequestBody(Map set builder.endObject(); requestBody = Strings.toString(builder); } - return new NStringEntity(requestBody, ContentType.APPLICATION_JSON); + return new StringEntity(requestBody, ContentType.APPLICATION_JSON); } private static class HighLevelClient extends RestHighLevelClient { diff --git a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java index a3db7532c3a10..8eb4bc3230b41 100644 --- a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java @@ -32,7 +32,6 @@ package org.opensearch.upgrades; -import org.apache.http.util.EntityUtils; import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.client.Request; @@ -53,6 +52,8 @@ import org.opensearch.test.XContentTestUtils; import org.opensearch.test.rest.OpenSearchRestTestCase; import org.opensearch.test.rest.yaml.ObjectPath; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.junit.Before; import java.io.IOException; @@ -286,7 +287,7 @@ public void testClusterState() throws Exception { } - public void testShrink() throws IOException { + public void testShrink() throws IOException, NumberFormatException, ParseException { String shrunkenIndex = index + "_shrunk"; int numDocs; if (isRunningAgainstOldCluster()) { @@ -356,7 +357,7 @@ public void testShrink() throws IOException { assertEquals(numDocs, totalHits); } - public void testShrinkAfterUpgrade() throws IOException { + public void testShrinkAfterUpgrade() throws IOException, ParseException { String shrunkenIndex = index + "_shrunk"; int numDocs; if (isRunningAgainstOldCluster()) { @@ -444,7 +445,7 @@ public void testShrinkAfterUpgrade() throws IOException { *
  • Make sure the document count is correct * */ - public void testRollover() throws IOException { + public void testRollover() throws IOException, ParseException { if (isRunningAgainstOldCluster()) { Request createIndex = new Request("PUT", "/" + index + "-000001"); createIndex.setJsonEntity("{" @@ -526,7 +527,7 @@ void assertBasicSearchWorks(int count) throws IOException { } } - void assertAllSearchWorks(int count) throws IOException { + void assertAllSearchWorks(int count) throws IOException, ParseException { logger.info("--> testing _all search"); Map response = entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search"))); assertNoFailures(response); @@ -623,14 +624,14 @@ void assertStoredBinaryFields(int count) throws Exception { } } - static String toStr(Response response) throws IOException { + static String toStr(Response response) throws IOException, ParseException { return EntityUtils.toString(response.getEntity()); } /** * Tests that a single document survives. Super basic smoke test. */ - public void testSingleDoc() throws IOException { + public void testSingleDoc() throws IOException, ParseException { String docLocation = "/" + index + "/" + type + "/1"; String doc = "{\"test\": \"test\"}"; @@ -792,7 +793,7 @@ public void testRecovery() throws Exception { * old and new versions. All of the snapshots include an index, a template, * and some routing configuration. */ - public void testSnapshotRestore() throws IOException { + public void testSnapshotRestore() throws IOException, ParseException { int count; if (isRunningAgainstOldCluster()) { // Create the index @@ -1060,7 +1061,7 @@ private void assertClosedIndex(final String index, final boolean checkRoutingTab } } - private void checkSnapshot(final String snapshotName, final int count, final Version tookOnVersion) throws IOException { + private void checkSnapshot(final String snapshotName, final int count, final Version tookOnVersion) throws IOException, ParseException { // Check the snapshot metadata, especially the version Request listSnapshotRequest = new Request("GET", "/_snapshot/repo/" + snapshotName); Map listSnapshotResponse = entityAsMap(client().performRequest(listSnapshotRequest)); @@ -1179,7 +1180,7 @@ private void indexDocument(String id) throws IOException { assertOK(client().performRequest(indexRequest)); } - private int countOfIndexedRandomDocuments() throws IOException { + private int countOfIndexedRandomDocuments() throws IOException, NumberFormatException, ParseException { return Integer.parseInt(loadInfoDocument(index + "_count")); } @@ -1194,7 +1195,7 @@ private void saveInfoDocument(String id, String value) throws IOException { client().performRequest(request); } - private String loadInfoDocument(String id) throws IOException { + private String loadInfoDocument(String id) throws IOException, ParseException { Request request = new Request("GET", "/info/_doc/" + id); request.addParameter("filter_path", "_source"); String doc = toStr(client().performRequest(request)); diff --git a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java index 856dc45d42203..44ed426e13782 100644 --- a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java +++ b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java @@ -32,7 +32,8 @@ package org.opensearch.upgrades; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.LegacyESVersion; import org.opensearch.client.Request; import org.opensearch.client.Response; @@ -235,7 +236,7 @@ public void testQueryBuilderBWC() throws Exception { } } - private static Map toMap(Response response) throws IOException { + private static Map toMap(Response response) throws IOException, ParseException { return toMap(EntityUtils.toString(response.getEntity())); } diff --git a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/ExceptionIT.java b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/ExceptionIT.java index f85a94cc9f556..35f530f22a141 100644 --- a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/ExceptionIT.java +++ b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/ExceptionIT.java @@ -8,7 +8,9 @@ package org.opensearch.backwards; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.HttpStatus; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.Version; import org.opensearch.client.Node; import org.opensearch.client.Request; @@ -21,8 +23,6 @@ import java.util.Collections; import java.util.Map; -import static org.apache.http.HttpStatus.SC_NOT_FOUND; - public class ExceptionIT extends OpenSearchRestTestCase { public void testOpensearchException() throws Exception { logClusterNodes(); @@ -38,13 +38,13 @@ public void testOpensearchException() throws Exception { } catch (ResponseException e) { logger.debug(e.getMessage()); Response response = e.getResponse(); - assertEquals(SC_NOT_FOUND, response.getStatusLine().getStatusCode()); + assertEquals(HttpStatus.SC_NOT_FOUND, response.getStatusLine().getStatusCode()); assertEquals("no_such_index", ObjectPath.createFromResponse(response).evaluate("error.index")); } } } - private void logClusterNodes() throws IOException { + private void logClusterNodes() throws IOException, ParseException { ObjectPath objectPath = ObjectPath.createFromResponse(client().performRequest(new Request("GET", "_nodes"))); Map nodes = objectPath.evaluate("nodes"); // As of 2.0, 'GET _cat/master' API is deprecated to promote inclusive language. diff --git a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java index 69c4f0110a3ff..4746ad35a9406 100644 --- a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java +++ b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java @@ -31,7 +31,7 @@ package org.opensearch.backwards; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.client.Request; @@ -50,6 +50,7 @@ import org.opensearch.test.rest.yaml.ObjectPath; import java.io.IOException; +import java.net.URISyntaxException; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -416,7 +417,7 @@ private List buildShards(String index, Nodes nodes, RestClient client) th return shards; } - private Nodes buildNodeAndVersions() throws IOException { + private Nodes buildNodeAndVersions() throws IOException, URISyntaxException { Response response = client().performRequest(new Request("GET", "_nodes")); ObjectPath objectPath = ObjectPath.createFromResponse(response); Map nodesAsMap = objectPath.evaluate("nodes"); @@ -426,7 +427,7 @@ private Nodes buildNodeAndVersions() throws IOException { id, objectPath.evaluate("nodes." + id + ".name"), Version.fromString(objectPath.evaluate("nodes." + id + ".version")), - HttpHost.create(objectPath.evaluate("nodes." + id + ".http.publish_address")))); + HttpHost.create((String)objectPath.evaluate("nodes." + id + ".http.publish_address")))); } response = client().performRequest(new Request("GET", "_cluster/state")); nodes.setClusterManagerNodeId(ObjectPath.createFromResponse(response).evaluate("master_node")); diff --git a/qa/remote-clusters/src/test/java/org/opensearch/cluster/remote/test/AbstractMultiClusterRemoteTestCase.java b/qa/remote-clusters/src/test/java/org/opensearch/cluster/remote/test/AbstractMultiClusterRemoteTestCase.java index 61ccbab95850d..5f73144501f94 100644 --- a/qa/remote-clusters/src/test/java/org/opensearch/cluster/remote/test/AbstractMultiClusterRemoteTestCase.java +++ b/qa/remote-clusters/src/test/java/org/opensearch/cluster/remote/test/AbstractMultiClusterRemoteTestCase.java @@ -31,7 +31,7 @@ package org.opensearch.cluster.remote.test; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.junit.AfterClass; import org.junit.Before; import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; @@ -104,8 +104,8 @@ private HighLevelClient(RestClient restClient) { private RestHighLevelClient buildClient(final String url) throws IOException { int portSeparator = url.lastIndexOf(':'); - HttpHost httpHost = new HttpHost(url.substring(0, portSeparator), - Integer.parseInt(url.substring(portSeparator + 1)), getProtocol()); + HttpHost httpHost = new HttpHost(getProtocol(), url.substring(0, portSeparator), + Integer.parseInt(url.substring(portSeparator + 1))); return new HighLevelClient(buildClient(restAdminSettings(), new HttpHost[]{httpHost})); } diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java index 888fa886c3c5e..ed4bf11041c88 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java @@ -31,7 +31,8 @@ package org.opensearch.upgrades; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.client.Request; @@ -61,7 +62,7 @@ */ public class IndexingIT extends AbstractRollingTestCase { - public void testIndexing() throws IOException { + public void testIndexing() throws IOException, ParseException { switch (CLUSTER_TYPE) { case OLD: break; @@ -203,7 +204,7 @@ private void bulk(String index, String valueSuffix, int count) throws IOExceptio client().performRequest(bulk); } - private void assertCount(String index, int count) throws IOException { + private void assertCount(String index, int count) throws IOException, ParseException { Request searchTestIndexRequest = new Request("POST", "/" + index + "/_search"); searchTestIndexRequest.addParameter(TOTAL_HITS_AS_INT_PARAM, "true"); searchTestIndexRequest.addParameter("filter_path", "hits.total"); diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java index dbb18aec19fca..4fd82af9603a9 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java @@ -31,7 +31,6 @@ package org.opensearch.upgrades; -import org.apache.http.util.EntityUtils; import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.support.PlainActionFuture; @@ -50,6 +49,7 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.rest.RestStatus; import org.opensearch.test.rest.yaml.ObjectPath; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.hamcrest.Matcher; import org.hamcrest.Matchers; diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/DetailedErrorsDisabledIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/DetailedErrorsDisabledIT.java index 6178167c98e98..0c845bb2d34e5 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/DetailedErrorsDisabledIT.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/DetailedErrorsDisabledIT.java @@ -34,7 +34,8 @@ import java.io.IOException; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.client.Request; import org.opensearch.client.Response; import org.opensearch.client.ResponseException; @@ -60,7 +61,7 @@ protected Settings nodeSettings(int nodeOrdinal) { .build(); } - public void testThatErrorTraceParamReturns400() throws IOException { + public void testThatErrorTraceParamReturns400() throws IOException, ParseException { Request request = new Request("DELETE", "/"); request.addParameter("error_trace", "true"); ResponseException e = expectThrows(ResponseException.class, () -> diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/DetailedErrorsEnabledIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/DetailedErrorsEnabledIT.java index 090a572ef0d6a..e2ccf86d31dbf 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/DetailedErrorsEnabledIT.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/DetailedErrorsEnabledIT.java @@ -32,7 +32,8 @@ package org.opensearch.http; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.client.Request; import org.opensearch.client.Response; import org.opensearch.client.ResponseException; @@ -47,7 +48,7 @@ */ public class DetailedErrorsEnabledIT extends HttpSmokeTestCase { - public void testThatErrorTraceWorksByDefault() throws IOException { + public void testThatErrorTraceWorksByDefault() throws IOException, ParseException { try { Request request = new Request("DELETE", "/"); request.addParameter("error_trace", "true"); diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpCompressionIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpCompressionIT.java index 1925ecc5cd346..5514fae996a39 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpCompressionIT.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpCompressionIT.java @@ -31,9 +31,10 @@ package org.opensearch.http; -import org.apache.http.HttpHeaders; -import org.apache.http.client.entity.GzipDecompressingEntity; -import org.apache.http.util.EntityUtils; +import org.apache.hc.client5.http.entity.GzipDecompressingEntity; +import org.apache.hc.core5.http.HttpHeaders; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.client.Request; import org.opensearch.client.RequestOptions; import org.opensearch.client.Response; @@ -56,7 +57,7 @@ public class HttpCompressionIT extends OpenSearchRestTestCase { " }\n" + "}"; - public void testCompressesResponseIfRequested() throws IOException { + public void testCompressesResponseIfRequested() throws IOException, ParseException { Request request = new Request("POST", "/company/_doc/2"); request.setJsonEntity(SAMPLE_DOCUMENT); Response response = client().performRequest(request); diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/NoHandlerIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/NoHandlerIT.java index c3d766abe96ca..8e6dea7edd0f8 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/NoHandlerIT.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/NoHandlerIT.java @@ -32,7 +32,8 @@ package org.opensearch.http; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.client.Request; import org.opensearch.client.RequestOptions; import org.opensearch.client.Response; @@ -46,7 +47,7 @@ public class NoHandlerIT extends HttpSmokeTestCase { - public void testNoHandlerRespectsAcceptHeader() throws IOException { + public void testNoHandlerRespectsAcceptHeader() throws IOException, ParseException { runTestNoHandlerRespectsAcceptHeader( "application/json", "application/json; charset=UTF-8", @@ -58,7 +59,7 @@ public void testNoHandlerRespectsAcceptHeader() throws IOException { } private void runTestNoHandlerRespectsAcceptHeader( - final String accept, final String contentType, final String expect) throws IOException { + final String accept, final String contentType, final String expect) throws IOException, ParseException { Request request = new Request("GET", "/foo/bar/baz/qux/quux"); RequestOptions.Builder options = request.getOptions().toBuilder(); options.addHeader("Accept", accept); diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/RestHttpResponseHeadersIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/RestHttpResponseHeadersIT.java index b8257272ba65b..74b85ace37b81 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/RestHttpResponseHeadersIT.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/RestHttpResponseHeadersIT.java @@ -30,7 +30,7 @@ package org.opensearch.http; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.client.Request; import org.opensearch.client.Response; import org.opensearch.client.ResponseException; diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/SearchRestCancellationIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/SearchRestCancellationIT.java index a13d406f7b133..42c7357de3f07 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/SearchRestCancellationIT.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/SearchRestCancellationIT.java @@ -31,8 +31,8 @@ package org.opensearch.http; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.entity.NByteArrayEntity; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; import org.apache.logging.log4j.LogManager; import org.apache.lucene.util.SetOnce; import org.opensearch.action.admin.cluster.node.info.NodeInfo; @@ -109,7 +109,7 @@ public void testAutomaticCancellationMultiSearchDuringQueryPhase() throws Except new Script(ScriptType.INLINE, "mockscript", ScriptedBlockPlugin.SCRIPT_NAME, Collections.emptyMap())))); Request restRequest = new Request("POST", "/_msearch"); byte[] requestBody = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, contentType.xContent()); - restRequest.setEntity(new NByteArrayEntity(requestBody, createContentType(contentType))); + restRequest.setEntity(new ByteArrayEntity(requestBody, createContentType(contentType))); verifyCancellationDuringQueryPhase(MultiSearchAction.NAME, restRequest); } @@ -158,7 +158,7 @@ public void testAutomaticCancellationMultiSearchDuringFetchPhase() throws Except new Script(ScriptType.INLINE, "mockscript", ScriptedBlockPlugin.SCRIPT_NAME, Collections.emptyMap())))); Request restRequest = new Request("POST", "/_msearch"); byte[] requestBody = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, contentType.xContent()); - restRequest.setEntity(new NByteArrayEntity(requestBody, createContentType(contentType))); + restRequest.setEntity(new ByteArrayEntity(requestBody, createContentType(contentType))); verifyCancellationDuringFetchPhase(MultiSearchAction.NAME, restRequest); } diff --git a/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelClientProducer.java b/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelClientProducer.java index f85c3efcbb6e8..2b1abe45f7723 100644 --- a/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelClientProducer.java +++ b/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelClientProducer.java @@ -32,20 +32,22 @@ package org.opensearch.wildfly.transport; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.RestClient; import org.opensearch.client.RestHighLevelClient; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.io.PathUtils; import javax.enterprise.inject.Produces; + +import java.net.URISyntaxException; import java.nio.file.Path; @SuppressWarnings("unused") public final class RestHighLevelClientProducer { @Produces - public RestHighLevelClient createRestHighLevelClient() { + public RestHighLevelClient createRestHighLevelClient() throws URISyntaxException { String httpUri = System.getProperty("opensearch.uri"); return new RestHighLevelClient(RestClient.builder(HttpHost.create(httpUri))); diff --git a/qa/wildfly/src/test/java/org/opensearch/wildfly/WildflyIT.java b/qa/wildfly/src/test/java/org/opensearch/wildfly/WildflyIT.java index 7961ca69c2d29..2f2b355baedaf 100644 --- a/qa/wildfly/src/test/java/org/opensearch/wildfly/WildflyIT.java +++ b/qa/wildfly/src/test/java/org/opensearch/wildfly/WildflyIT.java @@ -32,14 +32,15 @@ package org.opensearch.wildfly; -import org.apache.http.client.methods.CloseableHttpResponse; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.apache.http.impl.client.CloseableHttpClient; -import org.apache.http.impl.client.HttpClientBuilder; -import org.apache.http.util.EntityUtils; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPut; +import org.apache.hc.client5.http.impl.classic.CloseableHttpClient; +import org.apache.hc.client5.http.impl.classic.CloseableHttpResponse; +import org.apache.hc.client5.http.impl.classic.HttpClientBuilder; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.apache.hc.core5.http.io.entity.StringEntity; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.tests.util.LuceneTestCase; @@ -78,7 +79,7 @@ private String buildBaseUrl() { return "http://localhost:" + port + "/example-app/transport"; } - public void testRestClient() throws URISyntaxException, IOException { + public void testRestClient() throws URISyntaxException, IOException, ParseException { final String baseUrl = buildBaseUrl(); try (CloseableHttpClient client = HttpClientBuilder.create().build()) { @@ -100,7 +101,7 @@ public void testRestClient() throws URISyntaxException, IOException { put.setEntity(new StringEntity(body, ContentType.APPLICATION_JSON)); try (CloseableHttpResponse response = client.execute(put)) { - int status = response.getStatusLine().getStatusCode(); + int status = response.getCode(); assertThat( "expected a 201 response but got: " + status + " - body: " + EntityUtils.toString(response.getEntity()), status, diff --git a/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy b/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy index bcf0b704374c9..60b704dc12f0c 100644 --- a/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy +++ b/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy @@ -106,6 +106,11 @@ grant codeBase "${codebase.opensearch-rest-client}" { permission java.net.NetPermission "getProxySelector"; }; +grant codeBase "${codebase.httpcore5}" { + // httpcore makes socket connections for rest tests + permission java.net.SocketPermission "*", "connect"; +}; + grant codeBase "${codebase.httpcore-nio}" { // httpcore makes socket connections for rest tests permission java.net.SocketPermission "*", "connect"; @@ -118,6 +123,7 @@ grant codeBase "${codebase.httpasyncclient}" { permission java.net.NetPermission "getProxySelector"; }; + grant codeBase "${codebase.junit-rt.jar}" { // allows IntelliJ IDEA JUnit test runner to control number of test iterations permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; diff --git a/test/framework/src/main/java/org/opensearch/client/RestClientBuilderTestCase.java b/test/framework/src/main/java/org/opensearch/client/RestClientBuilderTestCase.java index 69710b8e5c848..eb5177bc0f39b 100644 --- a/test/framework/src/main/java/org/opensearch/client/RestClientBuilderTestCase.java +++ b/test/framework/src/main/java/org/opensearch/client/RestClientBuilderTestCase.java @@ -36,7 +36,8 @@ import java.util.Map; import joptsimple.internal.Strings; -import org.apache.http.Header; + +import org.apache.hc.core5.http.Header; import org.opensearch.test.OpenSearchTestCase; /** diff --git a/test/framework/src/main/java/org/opensearch/repositories/blobstore/AbstractBlobContainerRetriesTestCase.java b/test/framework/src/main/java/org/opensearch/repositories/blobstore/AbstractBlobContainerRetriesTestCase.java index d7b057e5479eb..adaf95ae67a8e 100644 --- a/test/framework/src/main/java/org/opensearch/repositories/blobstore/AbstractBlobContainerRetriesTestCase.java +++ b/test/framework/src/main/java/org/opensearch/repositories/blobstore/AbstractBlobContainerRetriesTestCase.java @@ -34,8 +34,6 @@ import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpServer; -import org.apache.http.ConnectionClosedException; -import org.apache.http.HttpStatus; import org.opensearch.common.Nullable; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.blobstore.BlobContainer; @@ -46,6 +44,8 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.CountDown; import org.opensearch.test.OpenSearchTestCase; +import org.apache.hc.core5.http.ConnectionClosedException; +import org.apache.hc.core5.http.HttpStatus; import org.junit.After; import org.junit.Before; diff --git a/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchMockAPIBasedRepositoryIntegTestCase.java b/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchMockAPIBasedRepositoryIntegTestCase.java index f082c7a45a207..28dbcf478eb86 100644 --- a/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchMockAPIBasedRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchMockAPIBasedRepositoryIntegTestCase.java @@ -35,7 +35,8 @@ import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpServer; -import org.apache.http.HttpStatus; + +import org.apache.hc.core5.http.HttpStatus; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 1ab7785b17f5e..b20154fff9256 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -36,7 +36,8 @@ import com.carrotsearch.randomizedtesting.annotations.TestGroup; import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomPicks; -import org.apache.http.HttpHost; + +import org.apache.hc.core5.http.HttpHost; import org.apache.lucene.search.Sort; import org.apache.lucene.search.TotalHits; import org.apache.lucene.tests.util.LuceneTestCase; @@ -2344,7 +2345,7 @@ protected static RestClient createRestClient( if (node.getInfo(HttpInfo.class) != null) { TransportAddress publishAddress = node.getInfo(HttpInfo.class).address().publishAddress(); InetSocketAddress address = publishAddress.address(); - hosts.add(new HttpHost(NetworkAddress.format(address.getAddress()), address.getPort(), protocol)); + hosts.add(new HttpHost(protocol, NetworkAddress.format(address.getAddress()), address.getPort())); } } RestClientBuilder builder = RestClient.builder(hosts.toArray(new HttpHost[hosts.size()])); diff --git a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java index bbf7763551bcd..348ea0a924b70 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java @@ -32,15 +32,19 @@ package org.opensearch.test.rest; -import org.apache.http.Header; -import org.apache.http.HttpHost; -import org.apache.http.HttpStatus; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.message.BasicHeader; -import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy; -import org.apache.http.ssl.SSLContexts; -import org.apache.http.util.EntityUtils; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; +import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManager; +import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManagerBuilder; +import org.apache.hc.client5.http.ssl.ClientTlsStrategyBuilder; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.HttpStatus; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.apache.hc.core5.http.message.BasicHeader; +import org.apache.hc.core5.http.nio.ssl.TlsStrategy; +import org.apache.hc.core5.ssl.SSLContexts; +import org.apache.hc.core5.util.Timeout; import org.apache.lucene.util.SetOnce; import org.opensearch.LegacyESVersion; import org.opensearch.Version; @@ -136,7 +140,7 @@ public abstract class OpenSearchRestTestCase extends OpenSearchTestCase { * Convert the entity from a {@link Response} into a map of maps. */ public static Map entityAsMap(Response response) throws IOException { - XContentType xContentType = XContentType.fromMediaType(response.getEntity().getContentType().getValue()); + XContentType xContentType = XContentType.fromMediaType(response.getEntity().getContentType()); // EMPTY and THROW are fine here because `.map` doesn't use named x content or deprecation try ( XContentParser parser = xContentType.xContent() @@ -154,7 +158,7 @@ public static Map entityAsMap(Response response) throws IOExcept * Convert the entity from a {@link Response} into a list of maps. */ public static List entityAsList(Response response) throws IOException { - XContentType xContentType = XContentType.fromMediaType(response.getEntity().getContentType().getValue()); + XContentType xContentType = XContentType.fromMediaType(response.getEntity().getContentType()); // EMPTY and THROW are fine here because `.map` doesn't use named x content or deprecation try ( XContentParser parser = xContentType.xContent() @@ -344,7 +348,7 @@ public boolean warningsShouldFailRequest(List warnings) { * Construct an HttpHost from the given host and port */ protected HttpHost buildHttpHost(String host, int port) { - return new HttpHost(host, port, getProtocol()); + return new HttpHost(getProtocol(), host, port); } /** @@ -845,9 +849,16 @@ protected static void configureClient(RestClientBuilder builder, Settings settin try (InputStream is = Files.newInputStream(path)) { keyStore.load(is, keystorePass.toCharArray()); } - SSLContext sslcontext = SSLContexts.custom().loadTrustMaterial(keyStore, null).build(); - SSLIOSessionStrategy sessionStrategy = new SSLIOSessionStrategy(sslcontext); - builder.setHttpClientConfigCallback(httpClientBuilder -> httpClientBuilder.setSSLStrategy(sessionStrategy)); + final SSLContext sslcontext = SSLContexts.custom().loadTrustMaterial(keyStore, null).build(); + builder.setHttpClientConfigCallback(httpClientBuilder -> { + final TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create().setSslContext(sslcontext).build(); + + final PoolingAsyncClientConnectionManager connectionManager = PoolingAsyncClientConnectionManagerBuilder.create() + .setTlsStrategy(tlsStrategy) + .build(); + + return httpClientBuilder.setConnectionManager(connectionManager); + }); } catch (KeyStoreException | NoSuchAlgorithmException | KeyManagementException | CertificateException e) { throw new RuntimeException("Error setting up ssl", e); } @@ -864,7 +875,9 @@ protected static void configureClient(RestClientBuilder builder, Settings settin socketTimeoutString == null ? "60s" : socketTimeoutString, CLIENT_SOCKET_TIMEOUT ); - builder.setRequestConfigCallback(conf -> conf.setSocketTimeout(Math.toIntExact(socketTimeout.getMillis()))); + builder.setRequestConfigCallback( + conf -> conf.setResponseTimeout(Timeout.ofMilliseconds(Math.toIntExact(socketTimeout.getMillis()))) + ); if (settings.hasValue(CLIENT_PATH_PREFIX)) { builder.setPathPrefix(settings.get(CLIENT_PATH_PREFIX)); } @@ -1082,7 +1095,7 @@ protected static Map getAsMap(final String endpoint) throws IOEx } protected static Map responseAsMap(Response response) throws IOException { - XContentType entityContentType = XContentType.fromMediaType(response.getEntity().getContentType().getValue()); + XContentType entityContentType = XContentType.fromMediaType(response.getEntity().getContentType()); Map responseEntity = XContentHelper.convertToMap( entityContentType.xContent(), response.getEntity().getContent(), diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlDocsTestClient.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlDocsTestClient.java index cd5f1fe168b12..da71d0e078dc0 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlDocsTestClient.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlDocsTestClient.java @@ -32,8 +32,8 @@ package org.opensearch.test.rest.yaml; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.Version; import org.opensearch.client.NodeSelector; import org.opensearch.client.Request; diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestClient.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestClient.java index 56ccb91dc3331..13ede9d44f1ad 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestClient.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestClient.java @@ -32,11 +32,13 @@ package org.opensearch.test.rest.yaml; import com.carrotsearch.randomizedtesting.RandomizedTest; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.entity.ContentType; -import org.apache.http.util.EntityUtils; + +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.Version; @@ -188,16 +190,20 @@ public ClientYamlTestResponse callApi( if (false == restApi.isBodySupported()) { throw new IllegalArgumentException("body is not supported by [" + restApi.getName() + "] api"); } - String contentType = entity.getContentType().getValue(); + String contentType = entity.getContentType(); // randomly test the GET with source param instead of GET/POST with body - if (sendBodyAsSourceParam(supportedMethods, contentType, entity.getContentLength())) { - logger.debug("sending the request body as source param with GET method"); - queryStringParams.put("source", EntityUtils.toString(entity)); - queryStringParams.put("source_content_type", contentType); - requestMethod = HttpGet.METHOD_NAME; - entity = null; - } else { - requestMethod = RandomizedTest.randomFrom(supportedMethods); + try { + if (sendBodyAsSourceParam(supportedMethods, contentType, entity.getContentLength())) { + logger.debug("sending the request body as source param with GET method"); + queryStringParams.put("source", EntityUtils.toString(entity)); + queryStringParams.put("source_content_type", contentType); + requestMethod = HttpGet.METHOD_NAME; + entity = null; + } else { + requestMethod = RandomizedTest.randomFrom(supportedMethods); + } + } catch (final ParseException ex) { + throw new IOException(ex); } } else { if (restApi.isBodyRequired()) { diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContext.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContext.java index 78818aefe44cc..780c43b6ccc11 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContext.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContext.java @@ -33,9 +33,9 @@ import com.carrotsearch.randomizedtesting.RandomizedTest; -import org.apache.http.HttpEntity; -import org.apache.http.entity.ByteArrayEntity; -import org.apache.http.entity.ContentType; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.BytesRef; diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestResponse.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestResponse.java index 8fc0554e2b31e..1e441e01c5a69 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestResponse.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestResponse.java @@ -31,9 +31,9 @@ package org.opensearch.test.rest.yaml; -import org.apache.http.Header; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.util.EntityUtils; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.client.Response; import org.opensearch.common.Strings; import org.opensearch.common.bytes.BytesArray; diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ObjectPath.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ObjectPath.java index 473511825ef60..aa70f7883c4b8 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ObjectPath.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ObjectPath.java @@ -31,7 +31,7 @@ package org.opensearch.test.rest.yaml; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.client.Response; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.bytes.BytesReference; diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java index f228c87186afd..b5449480e38ff 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java @@ -34,7 +34,8 @@ import com.carrotsearch.randomizedtesting.RandomizedTest; import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; -import org.apache.http.HttpHost; + +import org.apache.hc.core5.http.HttpHost; import org.apache.lucene.tests.util.TimeUnits; import org.opensearch.Version; import org.opensearch.client.Node; diff --git a/test/framework/src/test/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java b/test/framework/src/test/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java index 36186ea330021..1947982f19247 100644 --- a/test/framework/src/test/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java +++ b/test/framework/src/test/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java @@ -32,7 +32,7 @@ package org.opensearch.test.rest.yaml; -import org.apache.http.HttpEntity; +import org.apache.hc.core5.http.HttpEntity; import org.opensearch.Version; import org.opensearch.client.NodeSelector; import org.opensearch.test.OpenSearchTestCase; diff --git a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/DoSectionTests.java b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/DoSectionTests.java index 1fb08934c8b8b..eceb78a832710 100644 --- a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/DoSectionTests.java +++ b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/DoSectionTests.java @@ -32,7 +32,6 @@ package org.opensearch.test.rest.yaml.section; -import org.apache.http.HttpHost; import org.opensearch.Version; import org.opensearch.client.Node; import org.opensearch.client.NodeSelector; @@ -43,6 +42,7 @@ import org.opensearch.common.xcontent.yaml.YamlXContent; import org.opensearch.test.rest.yaml.ClientYamlTestExecutionContext; import org.opensearch.test.rest.yaml.ClientYamlTestResponse; +import org.apache.hc.core5.http.HttpHost; import org.hamcrest.MatcherAssert; import java.io.IOException; From 7c03f89d990da85d8c6ec95a88f865649a67a8a4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 11 Oct 2022 16:39:25 -0700 Subject: [PATCH 15/39] Bump woodstox-core from 6.3.0 to 6.3.1 in /plugins/repository-hdfs (#4737) * Bump woodstox-core from 6.3.0 to 6.3.1 in /plugins/repository-hdfs Bumps [woodstox-core](https://github.com/FasterXML/woodstox) from 6.3.0 to 6.3.1. - [Release notes](https://github.com/FasterXML/woodstox/releases) - [Commits](https://github.com/FasterXML/woodstox/compare/woodstox-core-6.3.0...woodstox-core-6.3.1) --- updated-dependencies: - dependency-name: com.fasterxml.woodstox:woodstox-core dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + plugins/repository-hdfs/build.gradle | 2 +- plugins/repository-hdfs/licenses/woodstox-core-6.3.0.jar.sha1 | 1 - plugins/repository-hdfs/licenses/woodstox-core-6.3.1.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/woodstox-core-6.3.0.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/woodstox-core-6.3.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index e321efc39a2bf..8b29a5d38e7c6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Bumps `protobuf-java` from 3.21.2 to 3.21.7 - Bumps `azure-core` from 1.31.0 to 1.33.0 - Bumps `avro` from 1.11.0 to 1.11.1 +- Bumps `woodstox-core` from 6.3.0 to 6.3.1 ### Added diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 3cdac4c2d2560..792bdc6bacd4a 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -85,7 +85,7 @@ dependencies { api 'net.minidev:json-smart:2.4.8' api 'org.apache.zookeeper:zookeeper:3.8.0' api "io.netty:netty-all:${versions.netty}" - implementation 'com.fasterxml.woodstox:woodstox-core:6.3.0' + implementation 'com.fasterxml.woodstox:woodstox-core:6.3.1' implementation 'org.codehaus.woodstox:stax2-api:4.2.1' hdfsFixture project(':test:fixtures:hdfs-fixture') diff --git a/plugins/repository-hdfs/licenses/woodstox-core-6.3.0.jar.sha1 b/plugins/repository-hdfs/licenses/woodstox-core-6.3.0.jar.sha1 deleted file mode 100644 index ebd85df98b39e..0000000000000 --- a/plugins/repository-hdfs/licenses/woodstox-core-6.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -03c1df4164b107ee22ad4f24bd453ec78a0efd95 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/woodstox-core-6.3.1.jar.sha1 b/plugins/repository-hdfs/licenses/woodstox-core-6.3.1.jar.sha1 new file mode 100644 index 0000000000000..fb4e67404cd42 --- /dev/null +++ b/plugins/repository-hdfs/licenses/woodstox-core-6.3.1.jar.sha1 @@ -0,0 +1 @@ +bf29b07ca4dd81ef3c0bc18c8bd5617510a81c5d \ No newline at end of file From 10f8ed8282579ed08d6dba89fce4fd589b75802d Mon Sep 17 00:00:00 2001 From: Kunal Kotwani Date: Tue, 11 Oct 2022 23:13:42 -0700 Subject: [PATCH 16/39] Format changelog as per plugin requirements (#4740) --- CHANGELOG.md | 36 ++++++------------------------------ 1 file changed, 6 insertions(+), 30 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8b29a5d38e7c6..7ef9454e6ddd4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,15 +3,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ## [Unreleased] -### Dependencies -- Bumps `gson` from 2.9.0 to 2.9.1 -- Bumps `protobuf-java` from 3.21.2 to 3.21.7 -- Bumps `azure-core` from 1.31.0 to 1.33.0 -- Bumps `avro` from 1.11.0 to 1.11.1 -- Bumps `woodstox-core` from 6.3.0 to 6.3.1 - ### Added - - Add support for s390x architecture ([#4001](https://github.com/opensearch-project/OpenSearch/pull/4001)) - Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085)) - Point in time rest layer changes for create and delete PIT API ([#4064](https://github.com/opensearch-project/OpenSearch/pull/4064)) @@ -37,16 +29,17 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Recommission API changes for service layer ([#4320](https://github.com/opensearch-project/OpenSearch/pull/4320)) - Update GeoGrid base class access modifier to support extensibility ([#4572](https://github.com/opensearch-project/OpenSearch/pull/4572)) - Add a new node role 'search' which is dedicated to provide search capability ([#4689](https://github.com/opensearch-project/OpenSearch/pull/4689)) - ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 - Bumps `reactor-netty-http` from 1.0.18 to 1.0.23 - Bumps `jettison` from 1.5.0 to 1.5.1 - Bumps `azure-storage-common` from 12.18.0 to 12.18.1 - Bumps `forbiddenapis` from 3.3 to 3.4 - - -### Dependencies +- Bumps `gson` from 2.9.0 to 2.9.1 +- Bumps `protobuf-java` from 3.21.2 to 3.21.7 +- Bumps `azure-core` from 1.31.0 to 1.33.0 +- Bumps `avro` from 1.11.0 to 1.11.1 +- Bumps `woodstox-core` from 6.3.0 to 6.3.1 - Bumps `xmlbeans` from 5.1.0 to 5.1.1 ([#4354](https://github.com/opensearch-project/OpenSearch/pull/4354)) - Bumps `azure-core-http-netty` from 1.12.0 to 1.12.4 ([#4160](https://github.com/opensearch-project/OpenSearch/pull/4160)) - Bumps `azure-core` from 1.27.0 to 1.31.0 ([#4160](https://github.com/opensearch-project/OpenSearch/pull/4160)) @@ -59,9 +52,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Bumps `jempbox` from 1.8.16 to 1.8.17 ([#4550](https://github.com/opensearch-project/OpenSearch/pull/4550)) - Bumps `hadoop-hdfs` from 3.3.3 to 3.3.4 ([#4644](https://github.com/opensearch-project/OpenSearch/pull/4644)) - Bumps `jna` from 5.11.0 to 5.12.1 ([#4656](https://github.com/opensearch-project/OpenSearch/pull/4656)) - ### Changed - - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) - Use RemoteSegmentStoreDirectory instead of RemoteDirectory ([#4240](https://github.com/opensearch-project/OpenSearch/pull/4240)) - Plugin ZIP publication groupId value is configurable ([#4156](https://github.com/opensearch-project/OpenSearch/pull/4156)) @@ -85,9 +76,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Backport Apache Lucene version change for 2.4.0 ([#4677](https://github.com/opensearch-project/OpenSearch/pull/4677)) - Refactor Base Action class javadocs to OpenSearch.API ([#4732](https://github.com/opensearch-project/OpenSearch/pull/4732)) - Migrate client transports to Apache HttpClient / Core 5.x ([#4459](https://github.com/opensearch-project/OpenSearch/pull/4459)) - ### Deprecated - ### Removed - Remove deprecated code to add node name into log pattern of log4j property file ([#4568](https://github.com/opensearch-project/OpenSearch/pull/4568)) - Unused object and import within TransportClusterAllocationExplainAction ([#4639](https://github.com/opensearch-project/OpenSearch/pull/4639)) @@ -95,9 +84,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Remove LegacyESVersion.V_7_2_ and V_7_3_ Constants ([#4702](https://github.com/opensearch-project/OpenSearch/pull/4702)) - Always auto release the flood stage block ([#4703](https://github.com/opensearch-project/OpenSearch/pull/4703)) - Remove LegacyESVersion.V_7_4_ and V_7_5_ Constants ([#4704](https://github.com/opensearch-project/OpenSearch/pull/4704)) - ### Fixed - - `opensearch-service.bat start` and `opensearch-service.bat manager` failing to run ([#4289](https://github.com/opensearch-project/OpenSearch/pull/4289)) - PR reference to checkout code for changelog verifier ([#4296](https://github.com/opensearch-project/OpenSearch/pull/4296)) - `opensearch.bat` and `opensearch-service.bat install` failing to run, missing logs directory ([#4305](https://github.com/opensearch-project/OpenSearch/pull/4305)) @@ -138,15 +125,11 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fix new race condition in DecommissionControllerTests ([4688](https://github.com/opensearch-project/OpenSearch/pull/4688)) - Fix SearchStats (de)serialization (caused by https://github.com/opensearch-project/OpenSearch/pull/4616) ([#4697](https://github.com/opensearch-project/OpenSearch/pull/4697)) - Fixing Gradle warnings associated with publishPluginZipPublicationToXxx tasks ([#4696](https://github.com/opensearch-project/OpenSearch/pull/4696)) - ### Security - - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) ## [2.x] - ### Added - - Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085)) - Label configuration for dependabot PRs ([#4348](https://github.com/opensearch-project/OpenSearch/pull/4348)) - Added RestLayer Changes for PIT stats ([#4217](https://github.com/opensearch-project/OpenSearch/pull/4217)) @@ -154,19 +137,12 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Addition of Doc values on the GeoShape Field - Addition of GeoShape ValueSource level code interfaces for accessing the DocValues. - Addition of Missing Value feature in the GeoShape Aggregations. - ### Changed - ### Deprecated - ### Removed - ### Fixed - - PR reference to checkout code for changelog verifier ([#4296](https://github.com/opensearch-project/OpenSearch/pull/4296)) - Commit workflow for dependabot changelog helper ([#4331](https://github.com/opensearch-project/OpenSearch/pull/4331)) - ### Security - [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD -[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x \ No newline at end of file +[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x From c92846d0ae121db54ce740790df75b36a24f804e Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Wed, 12 Oct 2022 13:32:27 -0500 Subject: [PATCH 17/39] Better plural stemmer than minimal_english (#4738) Drops the trailing "e" in taxes, dresses, watches, dishes etc that otherwise cause mismatches with plural and singular forms. Signed-off-by: Nicholas Walter Knize Co-authored-by: Mark Harwood Co-authored-by: Nicholas Walter Knize --- CHANGELOG.md | 2 + .../common/EnglishPluralStemFilter.java | 182 ++++++++++++++++++ .../common/StemmerTokenFilterFactory.java | 2 + .../StemmerTokenFilterFactoryTests.java | 77 ++++++++ 4 files changed, 263 insertions(+) create mode 100644 modules/analysis-common/src/main/java/org/opensearch/analysis/common/EnglishPluralStemFilter.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 7ef9454e6ddd4..3cdedbab9cef0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -143,6 +143,8 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Fixed - PR reference to checkout code for changelog verifier ([#4296](https://github.com/opensearch-project/OpenSearch/pull/4296)) - Commit workflow for dependabot changelog helper ([#4331](https://github.com/opensearch-project/OpenSearch/pull/4331)) +- Better plural stemmer than minimal_english ([#4738](https://github.com/opensearch-project/OpenSearch/pull/4738)) + ### Security [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD [2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/EnglishPluralStemFilter.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/EnglishPluralStemFilter.java new file mode 100644 index 0000000000000..c30318a31527b --- /dev/null +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/EnglishPluralStemFilter.java @@ -0,0 +1,182 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.analysis.common; + +import org.apache.lucene.analysis.TokenFilter; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.en.EnglishMinimalStemFilter; +import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; +import org.apache.lucene.analysis.tokenattributes.KeywordAttribute; + +import java.io.IOException; + +public final class EnglishPluralStemFilter extends TokenFilter { + private final EnglishPluralStemmer stemmer = new EnglishPluralStemmer(); + private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); + private final KeywordAttribute keywordAttr = addAttribute(KeywordAttribute.class); + + public EnglishPluralStemFilter(TokenStream input) { + super(input); + } + + @Override + public boolean incrementToken() throws IOException { + if (input.incrementToken()) { + if (!keywordAttr.isKeyword()) { + final int newlen = stemmer.stem(termAtt.buffer(), termAtt.length()); + termAtt.setLength(newlen); + } + return true; + } else { + return false; + } + } + + /** + * Plural stemmer for English based on the {@link EnglishMinimalStemFilter} + *

    + * This stemmer removes plurals but beyond EnglishMinimalStemFilter adds + * four new suffix rules to remove dangling e characters: + *

      + *
    • xes - "boxes" becomes "box"
    • + *
    • sses - "dresses" becomes "dress"
    • + *
    • shes - "dishes" becomes "dish"
    • + *
    • tches - "watches" becomes "watch"
    • + *
    + * See https://github.com/elastic/elasticsearch/issues/42892 + *

    + * In addition the s stemmer logic is amended so that + *

      + *
    • ees->ee so that bees matches bee
    • + *
    • ies->y only on longer words to that ties matches tie
    • + *
    • oes->o rule so that tomatoes matches tomato but retains e for some words eg shoes to shoe
    • + *
    + */ + public static class EnglishPluralStemmer { + + // Words ending in oes that retain the e when stemmed + public static final char[][] oesExceptions = { "shoes".toCharArray(), "canoes".toCharArray(), "oboes".toCharArray() }; + // Words ending in ches that retain the e when stemmed + public static final char[][] chesExceptions = { + "cliches".toCharArray(), + "avalanches".toCharArray(), + "mustaches".toCharArray(), + "moustaches".toCharArray(), + "quiches".toCharArray(), + "headaches".toCharArray(), + "heartaches".toCharArray(), + "porsches".toCharArray(), + "tranches".toCharArray(), + "caches".toCharArray() }; + + @SuppressWarnings("fallthrough") + public int stem(char s[], int len) { + if (len < 3 || s[len - 1] != 's') return len; + + switch (s[len - 2]) { + case 'u': + case 's': + return len; + case 'e': + // Modified ies->y logic from original s-stemmer - only work on strings > 4 + // so spies -> spy still but pies->pie. + // The original code also special-cased aies and eies for no good reason as far as I can tell. + // ( no words of consequence - eg http://www.thefreedictionary.com/words-that-end-in-aies ) + if (len > 4 && s[len - 3] == 'i') { + s[len - 3] = 'y'; + return len - 2; + } + + // Suffix rules to remove any dangling "e" + if (len > 3) { + // xes (but >1 prefix so we can stem "boxes->box" but keep "axes->axe") + if (len > 4 && s[len - 3] == 'x') { + return len - 2; + } + // oes + if (len > 3 && s[len - 3] == 'o') { + if (isException(s, len, oesExceptions)) { + // Only remove the S + return len - 1; + } + // Remove the es + return len - 2; + } + if (len > 4) { + // shes/sses + if (s[len - 4] == 's' && (s[len - 3] == 'h' || s[len - 3] == 's')) { + return len - 2; + } + + // ches + if (len > 4) { + if (s[len - 4] == 'c' && s[len - 3] == 'h') { + if (isException(s, len, chesExceptions)) { + // Only remove the S + return len - 1; + } + // Remove the es + return len - 2; + + } + } + } + } + + default: + return len - 1; + } + } + + private boolean isException(char[] s, int len, char[][] exceptionsList) { + for (char[] oesRule : exceptionsList) { + int rulePos = oesRule.length - 1; + int sPos = len - 1; + boolean matched = true; + while (rulePos >= 0 && sPos >= 0) { + if (oesRule[rulePos] != s[sPos]) { + matched = false; + break; + } + rulePos--; + sPos--; + } + if (matched) { + return true; + } + } + return false; + } + } + +} diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/StemmerTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/StemmerTokenFilterFactory.java index 5d96f01265cf6..fc045447e159e 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/StemmerTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/StemmerTokenFilterFactory.java @@ -154,6 +154,8 @@ public TokenStream create(TokenStream tokenStream) { return new SnowballFilter(tokenStream, new EnglishStemmer()); } else if ("minimal_english".equalsIgnoreCase(language) || "minimalEnglish".equalsIgnoreCase(language)) { return new EnglishMinimalStemFilter(tokenStream); + } else if ("plural_english".equalsIgnoreCase(language) || "pluralEnglish".equalsIgnoreCase(language)) { + return new EnglishPluralStemFilter(tokenStream); } else if ("possessive_english".equalsIgnoreCase(language) || "possessiveEnglish".equalsIgnoreCase(language)) { return new EnglishPossessiveFilter(tokenStream); diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/StemmerTokenFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/StemmerTokenFilterFactoryTests.java index 2cd7b74cd8c35..18d3727475065 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/StemmerTokenFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/StemmerTokenFilterFactoryTests.java @@ -111,6 +111,83 @@ public void testPorter2FilterFactory() throws IOException { } } + public void testEnglishPluralFilter() throws IOException { + int iters = scaledRandomIntBetween(20, 100); + for (int i = 0; i < iters; i++) { + + Version v = VersionUtils.randomVersion(random()); + Settings settings = Settings.builder() + .put("index.analysis.filter.my_plurals.type", "stemmer") + .put("index.analysis.filter.my_plurals.language", "plural_english") + .put("index.analysis.analyzer.my_plurals.tokenizer", "whitespace") + .put("index.analysis.analyzer.my_plurals.filter", "my_plurals") + .put(SETTING_VERSION_CREATED, v) + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + + OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings, PLUGIN); + TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_plurals"); + assertThat(tokenFilter, instanceOf(StemmerTokenFilterFactory.class)); + Tokenizer tokenizer = new WhitespaceTokenizer(); + tokenizer.setReader(new StringReader("dresses")); + TokenStream create = tokenFilter.create(tokenizer); + IndexAnalyzers indexAnalyzers = analysis.indexAnalyzers; + NamedAnalyzer analyzer = indexAnalyzers.get("my_plurals"); + assertThat(create, instanceOf(EnglishPluralStemFilter.class)); + + // Check old EnglishMinimalStemmer ("S" stemmer) logic + assertAnalyzesTo(analyzer, "phones", new String[] { "phone" }); + assertAnalyzesTo(analyzer, "horses", new String[] { "horse" }); + assertAnalyzesTo(analyzer, "cameras", new String[] { "camera" }); + + // The orginal s stemmer gives up on stemming oes words because English has no fixed rule for the stem + // (see https://howtospell.co.uk/making-O-words-plural ) + // This stemmer removes the es but retains e for a small number of exceptions + assertAnalyzesTo(analyzer, "mosquitoes", new String[] { "mosquito" }); + assertAnalyzesTo(analyzer, "heroes", new String[] { "hero" }); + // oes exceptions that retain the e. + assertAnalyzesTo(analyzer, "shoes", new String[] { "shoe" }); + assertAnalyzesTo(analyzer, "horseshoes", new String[] { "horseshoe" }); + assertAnalyzesTo(analyzer, "canoes", new String[] { "canoe" }); + assertAnalyzesTo(analyzer, "oboes", new String[] { "oboe" }); + + // Check improved EnglishPluralStemFilter logic + // sses + assertAnalyzesTo(analyzer, "dresses", new String[] { "dress" }); + assertAnalyzesTo(analyzer, "possess", new String[] { "possess" }); + assertAnalyzesTo(analyzer, "possesses", new String[] { "possess" }); + // xes + assertAnalyzesTo(analyzer, "boxes", new String[] { "box" }); + assertAnalyzesTo(analyzer, "axes", new String[] { "axe" }); + // shes + assertAnalyzesTo(analyzer, "dishes", new String[] { "dish" }); + assertAnalyzesTo(analyzer, "washes", new String[] { "wash" }); + // ees + assertAnalyzesTo(analyzer, "employees", new String[] { "employee" }); + assertAnalyzesTo(analyzer, "bees", new String[] { "bee" }); + // tch + assertAnalyzesTo(analyzer, "watches", new String[] { "watch" }); + assertAnalyzesTo(analyzer, "itches", new String[] { "itch" }); + // ies->y but only for length >4 + assertAnalyzesTo(analyzer, "spies", new String[] { "spy" }); + assertAnalyzesTo(analyzer, "ties", new String[] { "tie" }); + assertAnalyzesTo(analyzer, "lies", new String[] { "lie" }); + assertAnalyzesTo(analyzer, "pies", new String[] { "pie" }); + assertAnalyzesTo(analyzer, "dies", new String[] { "die" }); + + assertAnalyzesTo(analyzer, "lunches", new String[] { "lunch" }); + assertAnalyzesTo(analyzer, "avalanches", new String[] { "avalanche" }); + assertAnalyzesTo(analyzer, "headaches", new String[] { "headache" }); + assertAnalyzesTo(analyzer, "caches", new String[] { "cache" }); + assertAnalyzesTo(analyzer, "beaches", new String[] { "beach" }); + assertAnalyzesTo(analyzer, "britches", new String[] { "britch" }); + assertAnalyzesTo(analyzer, "cockroaches", new String[] { "cockroach" }); + assertAnalyzesTo(analyzer, "cliches", new String[] { "cliche" }); + assertAnalyzesTo(analyzer, "quiches", new String[] { "quiche" }); + + } + } + public void testMultipleLanguagesThrowsException() throws IOException { Version v = VersionUtils.randomVersion(random()); Settings settings = Settings.builder() From beb99150a84d54bb26182d50e363beea79d1a09a Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Wed, 12 Oct 2022 14:49:01 -0500 Subject: [PATCH 18/39] Install and configure Log4j JUL adapter for Lucene 9.4 (#4754) Adds log4j-jul dependency (along with license and notice files) and configures the java.util.logging log4j adapater to route all JUL logs to the proper opensearch locations setup through the LogConfigurator for Lucene 9.4 compatibility. Signed-off-by: Nicholas Knize --- CHANGELOG.md | 1 + client/rest/build.gradle | 1 + qa/os/build.gradle | 1 + server/build.gradle | 1 + server/licenses/log4j-jul-2.17.1.jar.sha1 | 1 + server/licenses/log4j-jul-LICENSE.txt | 202 ++++++++++++++++++ server/licenses/log4j-jul-NOTICE.txt | 20 ++ .../common/logging/LogConfigurator.java | 1 + 8 files changed, 228 insertions(+) create mode 100644 server/licenses/log4j-jul-2.17.1.jar.sha1 create mode 100644 server/licenses/log4j-jul-LICENSE.txt create mode 100644 server/licenses/log4j-jul-NOTICE.txt diff --git a/CHANGELOG.md b/CHANGELOG.md index 3cdedbab9cef0..28a542206eb5a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -137,6 +137,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Addition of Doc values on the GeoShape Field - Addition of GeoShape ValueSource level code interfaces for accessing the DocValues. - Addition of Missing Value feature in the GeoShape Aggregations. +- Install and configure Log4j JUL Adapter for Lucene 9.4 ([#4754](https://github.com/opensearch-project/OpenSearch/pull/4754)) ### Changed ### Deprecated ### Removed diff --git a/client/rest/build.gradle b/client/rest/build.gradle index 178359948f32e..eacef14d17ce2 100644 --- a/client/rest/build.gradle +++ b/client/rest/build.gradle @@ -56,6 +56,7 @@ dependencies { testImplementation "net.bytebuddy:byte-buddy:${versions.bytebuddy}" testImplementation "org.apache.logging.log4j:log4j-api:${versions.log4j}" testImplementation "org.apache.logging.log4j:log4j-core:${versions.log4j}" + testImplementation "org.apache.logging.log4j:log4j-jul:${versions.log4j}" testImplementation "org.apache.logging.log4j:log4j-slf4j-impl:${versions.log4j}" } diff --git a/qa/os/build.gradle b/qa/os/build.gradle index 92c5e4f154ad8..9a1e6f781faec 100644 --- a/qa/os/build.gradle +++ b/qa/os/build.gradle @@ -42,6 +42,7 @@ dependencies { api "org.apache.httpcomponents:fluent-hc:${versions.httpclient}" api "org.apache.logging.log4j:log4j-api:${versions.log4j}" api "org.apache.logging.log4j:log4j-core:${versions.log4j}" + api "org.apache.logging.log4j:log4j-jul:${versions.log4j}" api "org.apache.logging.log4j:log4j-jcl:${versions.log4j}" api "commons-codec:commons-codec:${versions.commonscodec}" api "commons-logging:commons-logging:${versions.commonslogging}" diff --git a/server/build.gradle b/server/build.gradle index 9d9d12e798eab..d50be48afc023 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -129,6 +129,7 @@ dependencies { // logging api "org.apache.logging.log4j:log4j-api:${versions.log4j}" + api "org.apache.logging.log4j:log4j-jul:${versions.log4j}" api "org.apache.logging.log4j:log4j-core:${versions.log4j}", optional // jna diff --git a/server/licenses/log4j-jul-2.17.1.jar.sha1 b/server/licenses/log4j-jul-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..4afb381a696e9 --- /dev/null +++ b/server/licenses/log4j-jul-2.17.1.jar.sha1 @@ -0,0 +1 @@ +881333b463d47828eda7443b19811763367b1916 \ No newline at end of file diff --git a/server/licenses/log4j-jul-LICENSE.txt b/server/licenses/log4j-jul-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/server/licenses/log4j-jul-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/server/licenses/log4j-jul-NOTICE.txt b/server/licenses/log4j-jul-NOTICE.txt new file mode 100644 index 0000000000000..243a0391fb574 --- /dev/null +++ b/server/licenses/log4j-jul-NOTICE.txt @@ -0,0 +1,20 @@ +Apache Log4j +Copyright 1999-2021 Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +ResolverUtil.java +Copyright 2005-2006 Tim Fennell + +Dumbster SMTP test server +Copyright 2004 Jason Paul Kitchen + +TypeUtil.java +Copyright 2002-2012 Ramnivas Laddad, Juergen Hoeller, Chris Beams + +picocli (http://picocli.info) +Copyright 2017 Remko Popma + +TimeoutBlockingWaitStrategy.java and parts of Util.java +Copyright 2011 LMAX Ltd. diff --git a/server/src/main/java/org/opensearch/common/logging/LogConfigurator.java b/server/src/main/java/org/opensearch/common/logging/LogConfigurator.java index e42727b2aa2af..c0405f9e52b77 100644 --- a/server/src/main/java/org/opensearch/common/logging/LogConfigurator.java +++ b/server/src/main/java/org/opensearch/common/logging/LogConfigurator.java @@ -259,6 +259,7 @@ private static void configureLoggerLevels(final Settings settings) { */ @SuppressForbidden(reason = "sets system property for logging configuration") private static void setLogConfigurationSystemProperty(final Path logsPath, final Settings settings) { + System.setProperty("java.util.logging.manager", "org.apache.logging.log4j.jul.LogManager"); System.setProperty("opensearch.logs.base_path", logsPath.toString()); System.setProperty("opensearch.logs.cluster_name", ClusterName.CLUSTER_NAME_SETTING.get(settings).value()); System.setProperty("opensearch.logs.node_name", Node.NODE_NAME_SETTING.get(settings)); From d15795a7aca488c1fadb04b3c8d9f1a3b02e4056 Mon Sep 17 00:00:00 2001 From: Anshu Agarwal Date: Thu, 13 Oct 2022 01:35:36 +0530 Subject: [PATCH 19/39] Fix weighted routing metadata deserialization error during node restart (#4691) * Fix weighted routing metadata deserialization error during node restart Signed-off-by: Anshu Agarwal --- CHANGELOG.md | 1 + .../cluster/routing/WeightedRoutingIT.java | 42 +++++++++++++++++++ .../metadata/WeightedRoutingMetadata.java | 2 - 3 files changed, 43 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 28a542206eb5a..09d7cb8ff79df 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -74,6 +74,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Update to Apache Lucene 9.4.0 ([#4661](https://github.com/opensearch-project/OpenSearch/pull/4661)) - Controlling discovery for decommissioned nodes ([#4590](https://github.com/opensearch-project/OpenSearch/pull/4590)) - Backport Apache Lucene version change for 2.4.0 ([#4677](https://github.com/opensearch-project/OpenSearch/pull/4677)) +- Fix weighted routing metadata deserialization error on process restart ([#4691](https://github.com/opensearch-project/OpenSearch/pull/4691)) - Refactor Base Action class javadocs to OpenSearch.API ([#4732](https://github.com/opensearch-project/OpenSearch/pull/4732)) - Migrate client transports to Apache HttpClient / Core 5.x ([#4459](https://github.com/opensearch-project/OpenSearch/pull/4459)) ### Deprecated diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java index 6cf8292095c6a..61f82877bf12b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java @@ -242,4 +242,46 @@ public void testGetWeightedRouting_WeightsAreSet() throws IOException { assertEquals(weightedRouting, weightedRoutingResponse.weights()); assertEquals("3.0", weightedRoutingResponse.getLocalNodeWeight()); } + + public void testWeightedRoutingMetadataOnOSProcessRestart() throws Exception { + Settings commonSettings = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .build(); + + internalCluster().startNodes( + Settings.builder().put(commonSettings).put("node.attr.zone", "a").build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "b").build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "c").build() + ); + + logger.info("--> waiting for nodes to form a cluster"); + ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("3").execute().actionGet(); + assertThat(health.isTimedOut(), equalTo(false)); + + ensureGreen(); + + logger.info("--> setting shard routing weights for weighted round robin"); + Map weights = Map.of("a", 1.0, "b", 2.0, "c", 3.0); + WeightedRouting weightedRouting = new WeightedRouting("zone", weights); + // put api call to set weights + ClusterPutWeightedRoutingResponse response = client().admin() + .cluster() + .prepareWeightedRouting() + .setWeightedRouting(weightedRouting) + .get(); + assertEquals(response.isAcknowledged(), true); + + ensureStableCluster(3); + + // routing weights are set in cluster metadata + assertNotNull(internalCluster().clusterService().state().metadata().weightedRoutingMetadata()); + + ensureGreen(); + + // Restart a random data node and check that OS process comes healthy + internalCluster().restartRandomDataNode(); + ensureGreen(); + assertNotNull(internalCluster().clusterService().state().metadata().weightedRoutingMetadata()); + } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/WeightedRoutingMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/WeightedRoutingMetadata.java index 27beb21f28f7c..07cdc949c4529 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/WeightedRoutingMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/WeightedRoutingMetadata.java @@ -90,8 +90,6 @@ public static WeightedRoutingMetadata fromXContent(XContentParser parser) throws Map weights = new HashMap<>(); WeightedRouting weightedRouting = null; XContentParser.Token token; - // move to the first alias - parser.nextToken(); String awarenessField = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { From 431bdeb1bb17090206ed2e26a486b0ddd00f08bb Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Wed, 12 Oct 2022 21:03:49 -0700 Subject: [PATCH 20/39] Introduce experimental searchable snapshot API (#4680) This commit adds a new parameter to the snapshot restore API to restore to a new type of "remote snapshot" index where, unlike traditional snapshot restores, the index data is not all downloaded to disk and instead is read on-demand at search time. The feature is functional with this commit, and includes a simple end-to-end integration test, but is far from complete. See tracking issue #2919 for the rest of the work planned/underway. All new capabilities are gated behind a new searchable snapshot feature flag. Signed-off-by: Andrew Ross Signed-off-by: Andrew Ross --- CHANGELOG.md | 2 + .../snapshots/SearchableSnapshotIT.java | 135 ++++++++++++++ .../restore/RestoreSnapshotRequest.java | 78 +++++++- .../RestoreSnapshotRequestBuilder.java | 8 + .../cluster/routing/RecoverySource.java | 33 +++- .../common/settings/IndexScopedSettings.java | 11 +- .../opensearch/common/util/FeatureFlags.java | 8 + ...ransportNodesListGatewayStartedShards.java | 3 +- .../org/opensearch/index/IndexModule.java | 54 ++++-- .../org/opensearch/index/IndexSettings.java | 24 +++ .../opensearch/index/shard/IndexShard.java | 35 ++-- .../RemoveCorruptedShardDataCommand.java | 3 +- .../index/shard/ShardStateMetadata.java | 47 ++++- .../opensearch/index/shard/StoreRecovery.java | 3 - .../InMemoryRemoteSnapshotDirectory.java | 169 ++++++++++++++++++ .../store/RemoteSnapshotDirectoryFactory.java | 49 +++++ .../opensearch/indices/IndicesService.java | 12 +- .../main/java/org/opensearch/node/Node.java | 20 ++- .../opensearch/snapshots/RestoreService.java | 42 ++++- .../index/shard/IndexShardTests.java | 10 +- .../index/shard/ShardPathTests.java | 15 +- .../index/store/FsDirectoryFactoryTests.java | 4 +- .../test/store/MockFSDirectoryFactory.java | 7 +- 23 files changed, 705 insertions(+), 67 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java create mode 100644 server/src/main/java/org/opensearch/index/store/InMemoryRemoteSnapshotDirectory.java create mode 100644 server/src/main/java/org/opensearch/index/store/RemoteSnapshotDirectoryFactory.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 09d7cb8ff79df..4310103803fbe 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,6 +29,8 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Recommission API changes for service layer ([#4320](https://github.com/opensearch-project/OpenSearch/pull/4320)) - Update GeoGrid base class access modifier to support extensibility ([#4572](https://github.com/opensearch-project/OpenSearch/pull/4572)) - Add a new node role 'search' which is dedicated to provide search capability ([#4689](https://github.com/opensearch-project/OpenSearch/pull/4689)) +- Introduce experimental searchable snapshot API ([#4680](https://github.com/opensearch-project/OpenSearch/pull/4680)) + ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 - Bumps `reactor-netty-http` from 1.0.18 to 1.0.23 diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java new file mode 100644 index 0000000000000..96fcf0053c9ab --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java @@ -0,0 +1,135 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ +package org.opensearch.snapshots; + +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.List; + +import org.hamcrest.MatcherAssert; +import org.junit.BeforeClass; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; +import org.opensearch.client.Client; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.routing.GroupShardsIterator; +import org.opensearch.cluster.routing.ShardIterator; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.io.PathUtils; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.index.Index; +import org.opensearch.monitor.fs.FsInfo; + +import com.carrotsearch.randomizedtesting.generators.RandomPicks; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; +import static org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.FS; +import static org.opensearch.common.util.CollectionUtils.iterableAsArrayList; + +public final class SearchableSnapshotIT extends AbstractSnapshotIntegTestCase { + + @BeforeClass + public static void assumeFeatureFlag() { + assumeTrue( + "Searchable snapshot feature flag is enabled", + Boolean.parseBoolean(System.getProperty(FeatureFlags.SEARCHABLE_SNAPSHOT)) + ); + } + + @Override + protected boolean addMockInternalEngine() { + return false; + } + + public void testCreateSearchableSnapshot() throws Exception { + final Client client = client(); + createRepository("test-repo", "fs"); + createIndex( + "test-idx-1", + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0").put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1").build() + ); + createIndex( + "test-idx-2", + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0").put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1").build() + ); + ensureGreen(); + indexRandomDocs("test-idx-1", 100); + indexRandomDocs("test-idx-2", 100); + + logger.info("--> snapshot"); + final CreateSnapshotResponse createSnapshotResponse = client.admin() + .cluster() + .prepareCreateSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true) + .setIndices("test-idx-1", "test-idx-2") + .get(); + MatcherAssert.assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + MatcherAssert.assertThat( + createSnapshotResponse.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) + ); + + assertTrue(client.admin().indices().prepareDelete("test-idx-1", "test-idx-2").get().isAcknowledged()); + + logger.info("--> restore indices as 'remote_snapshot'"); + client.admin() + .cluster() + .prepareRestoreSnapshot("test-repo", "test-snap") + .setRenamePattern("(.+)") + .setRenameReplacement("$1-copy") + .setStorageType(RestoreSnapshotRequest.StorageType.REMOTE_SNAPSHOT) + .setWaitForCompletion(true) + .execute() + .actionGet(); + ensureGreen(); + + assertDocCount("test-idx-1-copy", 100L); + assertDocCount("test-idx-2-copy", 100L); + assertIndexDirectoryDoesNotExist("test-idx-1-copy", "test-idx-2-copy"); + } + + /** + * Picks a shard out of the cluster state for each given index and asserts + * that the 'index' directory does not exist in the node's file system. + * This assertion is digging a bit into the implementation details to + * verify that the Lucene segment files are not copied from the snapshot + * repository to the node's local disk for a remote snapshot index. + */ + private void assertIndexDirectoryDoesNotExist(String... indexNames) { + final ClusterState state = client().admin().cluster().prepareState().get().getState(); + for (String indexName : indexNames) { + final Index index = state.metadata().index(indexName).getIndex(); + // Get the primary shards for the given index + final GroupShardsIterator shardIterators = state.getRoutingTable() + .activePrimaryShardsGrouped(new String[] { indexName }, false); + // Randomly pick one of the shards + final List iterators = iterableAsArrayList(shardIterators); + final ShardIterator shardIterator = RandomPicks.randomFrom(random(), iterators); + final ShardRouting shardRouting = shardIterator.nextOrNull(); + assertNotNull(shardRouting); + assertTrue(shardRouting.primary()); + assertTrue(shardRouting.assignedToNode()); + // Get the file system stats for the assigned node + final String nodeId = shardRouting.currentNodeId(); + final NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats(nodeId).addMetric(FS.metricName()).get(); + for (FsInfo.Path info : nodeStats.getNodes().get(0).getFs()) { + // Build the expected path for the index data for a "normal" + // index and assert it does not exist + final String path = info.getPath(); + final Path file = PathUtils.get(path) + .resolve("indices") + .resolve(index.getUUID()) + .resolve(Integer.toString(shardRouting.getId())) + .resolve("index"); + MatcherAssert.assertThat("Expect file not to exist: " + file, Files.exists(file), is(false)); + } + } + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index 1b673217a248b..3ecf5ab19c0e4 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.cluster.snapshots.restore; import org.opensearch.LegacyESVersion; +import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; @@ -42,6 +43,7 @@ import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.ToXContentObject; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentType; @@ -68,6 +70,38 @@ public class RestoreSnapshotRequest extends ClusterManagerNodeRequest source) { } else { throw new IllegalArgumentException("malformed ignore_index_settings section, should be an array of strings"); } + } else if (name.equals("storage_type")) { + if (FeatureFlags.isEnabled(FeatureFlags.SEARCHABLE_SNAPSHOT)) { + if (entry.getValue() instanceof String) { + storageType(StorageType.fromString((String) entry.getValue())); + } else { + throw new IllegalArgumentException("malformed storage_type"); + } + } else { + throw new IllegalArgumentException( + "Unsupported parameter " + name + ". Feature flag is not enabled for this experimental feature" + ); + } } else { if (IndicesOptions.isIndicesOptions(name) == false) { throw new IllegalArgumentException("Unknown parameter " + name); @@ -579,6 +648,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.value(ignoreIndexSetting); } builder.endArray(); + if (FeatureFlags.isEnabled(FeatureFlags.SEARCHABLE_SNAPSHOT) && storageType != null) { + storageType.toXContent(builder); + } builder.endObject(); return builder; } @@ -605,7 +677,8 @@ public boolean equals(Object o) { && Objects.equals(renameReplacement, that.renameReplacement) && Objects.equals(indexSettings, that.indexSettings) && Arrays.equals(ignoreIndexSettings, that.ignoreIndexSettings) - && Objects.equals(snapshotUuid, that.snapshotUuid); + && Objects.equals(snapshotUuid, that.snapshotUuid) + && Objects.equals(storageType, that.storageType); } @Override @@ -621,7 +694,8 @@ public int hashCode() { partial, includeAliases, indexSettings, - snapshotUuid + snapshotUuid, + storageType ); result = 31 * result + Arrays.hashCode(indices); result = 31 * result + Arrays.hashCode(ignoreIndexSettings); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java index 68397851699fb..0104637a00035 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java @@ -248,4 +248,12 @@ public RestoreSnapshotRequestBuilder setIgnoreIndexSettings(List ignoreI request.ignoreIndexSettings(ignoreIndexSettings); return this; } + + /** + * Sets the storage type + */ + public RestoreSnapshotRequestBuilder setStorageType(RestoreSnapshotRequest.StorageType storageType) { + request.storageType(storageType); + return this; + } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java b/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java index 539773296ed74..728bf9d1ae90e 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java @@ -38,6 +38,7 @@ import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.io.stream.Writeable; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.ToXContent; import org.opensearch.common.xcontent.ToXContentObject; import org.opensearch.common.xcontent.XContentBuilder; @@ -257,12 +258,24 @@ public static class SnapshotRecoverySource extends RecoverySource { private final Snapshot snapshot; private final IndexId index; private final Version version; + private final boolean isSearchableSnapshot; public SnapshotRecoverySource(String restoreUUID, Snapshot snapshot, Version version, IndexId indexId) { + this(restoreUUID, snapshot, version, indexId, false); + } + + public SnapshotRecoverySource( + String restoreUUID, + Snapshot snapshot, + Version version, + IndexId indexId, + boolean isSearchableSnapshot + ) { this.restoreUUID = restoreUUID; this.snapshot = Objects.requireNonNull(snapshot); this.version = Objects.requireNonNull(version); this.index = Objects.requireNonNull(indexId); + this.isSearchableSnapshot = isSearchableSnapshot; } SnapshotRecoverySource(StreamInput in) throws IOException { @@ -274,6 +287,11 @@ public SnapshotRecoverySource(String restoreUUID, Snapshot snapshot, Version ver } else { index = new IndexId(in.readString(), IndexMetadata.INDEX_UUID_NA_VALUE); } + if (FeatureFlags.isEnabled(FeatureFlags.SEARCHABLE_SNAPSHOT) && in.getVersion().onOrAfter(Version.V_3_0_0)) { + isSearchableSnapshot = in.readBoolean(); + } else { + isSearchableSnapshot = false; + } } public String restoreUUID() { @@ -298,6 +316,10 @@ public Version version() { return version; } + public boolean isSearchableSnapshot() { + return isSearchableSnapshot; + } + @Override protected void writeAdditionalFields(StreamOutput out) throws IOException { out.writeString(restoreUUID); @@ -308,6 +330,9 @@ protected void writeAdditionalFields(StreamOutput out) throws IOException { } else { out.writeString(index.getName()); } + if (FeatureFlags.isEnabled(FeatureFlags.SEARCHABLE_SNAPSHOT) && out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeBoolean(isSearchableSnapshot); + } } @Override @@ -321,7 +346,8 @@ public void addAdditionalFields(XContentBuilder builder, ToXContent.Params param .field("snapshot", snapshot.getSnapshotId().getName()) .field("version", version.toString()) .field("index", index.getName()) - .field("restoreUUID", restoreUUID); + .field("restoreUUID", restoreUUID) + .field("isSearchableSnapshot", isSearchableSnapshot); } @Override @@ -342,12 +368,13 @@ public boolean equals(Object o) { return restoreUUID.equals(that.restoreUUID) && snapshot.equals(that.snapshot) && index.equals(that.index) - && version.equals(that.version); + && version.equals(that.version) + && isSearchableSnapshot == that.isSearchableSnapshot; } @Override public int hashCode() { - return Objects.hash(restoreUUID, snapshot, index, version); + return Objects.hash(restoreUUID, snapshot, index, version, isSearchableSnapshot); } } diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 7be9adc786f24..079fc38415328 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -221,12 +221,19 @@ public final class IndexScopedSettings extends AbstractScopedSettings { */ public static final Map> FEATURE_FLAGGED_INDEX_SETTINGS = Map.of( FeatureFlags.REPLICATION_TYPE, - Collections.singletonList(IndexMetadata.INDEX_REPLICATION_TYPE_SETTING), + List.of(IndexMetadata.INDEX_REPLICATION_TYPE_SETTING), FeatureFlags.REMOTE_STORE, - Arrays.asList( + List.of( IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING, IndexMetadata.INDEX_REMOTE_TRANSLOG_STORE_ENABLED_SETTING, IndexMetadata.INDEX_REMOTE_STORE_REPOSITORY_SETTING + ), + FeatureFlags.SEARCHABLE_SNAPSHOT, + List.of( + IndexSettings.SEARCHABLE_SNAPSHOT_REPOSITORY, + IndexSettings.SEARCHABLE_SNAPSHOT_INDEX_ID, + IndexSettings.SEARCHABLE_SNAPSHOT_ID_NAME, + IndexSettings.SEARCHABLE_SNAPSHOT_ID_UUID ) ); diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index fa39dc9ac5aa0..7297479776da9 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -29,6 +29,14 @@ public class FeatureFlags { */ public static final String REMOTE_STORE = "opensearch.experimental.feature.remote_store.enabled"; + /** + * Gates the functionality of a new parameter to the snapshot restore API + * that allows for creation of a new index type that searches a snapshot + * directly in a remote repository without restoring all index data to disk + * ahead of time. + */ + public static final String SEARCHABLE_SNAPSHOT = "opensearch.experimental.feature.searchable_snapshot.enabled"; + /** * Used to test feature flags whose values are expected to be booleans. * This method returns true if the value is "true" (case-insensitive), diff --git a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java index 5b79ca5970e63..fb114bff9aa12 100644 --- a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java +++ b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java @@ -159,7 +159,8 @@ protected NodeGatewayStartedShards nodeOperation(NodeRequest request) { nodeEnv.availableShardPaths(request.shardId) ); if (shardStateMetadata != null) { - if (indicesService.getShardOrNull(shardId) == null) { + if (indicesService.getShardOrNull(shardId) == null + && shardStateMetadata.indexDataLocation == ShardStateMetadata.IndexDataLocation.LOCAL) { final String customDataPath; if (request.getCustomDataPath() != null) { customDataPath = request.getCustomDataPath(); diff --git a/server/src/main/java/org/opensearch/index/IndexModule.java b/server/src/main/java/org/opensearch/index/IndexModule.java index e52a2ba39ed52..2a462f6165184 100644 --- a/server/src/main/java/org/opensearch/index/IndexModule.java +++ b/server/src/main/java/org/opensearch/index/IndexModule.java @@ -70,12 +70,14 @@ import org.opensearch.index.shard.SearchOperationListener; import org.opensearch.index.similarity.SimilarityService; import org.opensearch.index.store.FsDirectoryFactory; +import org.opensearch.index.store.RemoteSnapshotDirectoryFactory; import org.opensearch.indices.IndicesQueryCache; import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.mapper.MapperRegistry; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.plugins.IndexStorePlugin; +import org.opensearch.repositories.RepositoriesService; import org.opensearch.script.ScriptService; import org.opensearch.search.aggregations.support.ValuesSourceRegistry; import org.opensearch.threadpool.ThreadPool; @@ -94,6 +96,7 @@ import java.util.function.BooleanSupplier; import java.util.function.Consumer; import java.util.function.Function; +import java.util.function.Supplier; /** * IndexModule represents the central extension point for index level custom implementations like: @@ -390,15 +393,6 @@ IndexEventListener freeze() { // pkg private for testing } } - public static boolean isBuiltinType(String storeType) { - for (Type type : Type.values()) { - if (type.match(storeType)) { - return true; - } - } - return false; - } - /** * Type of file system * @@ -409,7 +403,8 @@ public enum Type { NIOFS("niofs"), MMAPFS("mmapfs"), SIMPLEFS("simplefs"), - FS("fs"); + FS("fs"), + REMOTE_SNAPSHOT("remote_snapshot"); private final String settingsKey; private final boolean deprecated; @@ -426,7 +421,7 @@ public enum Type { private static final Map TYPES; static { - final Map types = new HashMap<>(4); + final Map types = new HashMap<>(values().length); for (final Type type : values()) { types.put(type.settingsKey, type); } @@ -441,6 +436,10 @@ public boolean isDeprecated() { return deprecated; } + static boolean containsSettingsKey(String key) { + return TYPES.containsKey(key); + } + public static Type fromSettingsKey(final String key) { final Type type = TYPES.get(key); if (type == null) { @@ -459,6 +458,13 @@ public boolean match(String setting) { return getSettingsKey().equals(setting); } + /** + * Convenience method to check whether the given IndexSettings contains + * an {@link #INDEX_STORE_TYPE_SETTING} set to the value of this type. + */ + public boolean match(IndexSettings settings) { + return match(INDEX_STORE_TYPE_SETTING.get(settings.getSettings())); + } } public static Type defaultStoreType(final boolean allowMmap) { @@ -562,7 +568,7 @@ private static IndexStorePlugin.DirectoryFactory getDirectoryFactory( if (storeType.isEmpty() || Type.FS.getSettingsKey().equals(storeType)) { type = defaultStoreType(allowMmap); } else { - if (isBuiltinType(storeType)) { + if (Type.containsSettingsKey(storeType)) { type = Type.fromSettingsKey(storeType); } else { type = null; @@ -572,7 +578,7 @@ private static IndexStorePlugin.DirectoryFactory getDirectoryFactory( throw new IllegalArgumentException("store type [" + storeType + "] is not allowed because mmap is disabled"); } final IndexStorePlugin.DirectoryFactory factory; - if (storeType.isEmpty() || isBuiltinType(storeType)) { + if (storeType.isEmpty()) { factory = DEFAULT_DIRECTORY_FACTORY; } else { factory = indexStoreFactories.get(storeType); @@ -641,4 +647,26 @@ private void ensureNotFrozen() { } } + public static Map createBuiltInDirectoryFactories( + Supplier repositoriesService + ) { + final Map factories = new HashMap<>(); + for (Type type : Type.values()) { + switch (type) { + case HYBRIDFS: + case NIOFS: + case FS: + case MMAPFS: + case SIMPLEFS: + factories.put(type.getSettingsKey(), DEFAULT_DIRECTORY_FACTORY); + break; + case REMOTE_SNAPSHOT: + factories.put(type.getSettingsKey(), new RemoteSnapshotDirectoryFactory(repositoriesService)); + break; + default: + throw new IllegalStateException("No directory factory mapping for built-in type " + type); + } + } + return factories; + } } diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 00daea147f16f..7648f0a192ce7 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -550,6 +550,30 @@ public final class IndexSettings { Property.Dynamic ); + public static final Setting SEARCHABLE_SNAPSHOT_REPOSITORY = Setting.simpleString( + "index.searchable_snapshot.repository", + Property.IndexScope, + Property.InternalIndex + ); + + public static final Setting SEARCHABLE_SNAPSHOT_ID_UUID = Setting.simpleString( + "index.searchable_snapshot.snapshot_id.uuid", + Property.IndexScope, + Property.InternalIndex + ); + + public static final Setting SEARCHABLE_SNAPSHOT_ID_NAME = Setting.simpleString( + "index.searchable_snapshot.snapshot_id.name", + Property.IndexScope, + Property.InternalIndex + ); + + public static final Setting SEARCHABLE_SNAPSHOT_INDEX_ID = Setting.simpleString( + "index.searchable_snapshot.index.id", + Property.IndexScope, + Property.InternalIndex + ); + private final Index index; private final Version version; private final Logger logger; diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 52ecc5bc66607..3a3c4b19a02f6 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -678,7 +678,7 @@ public void onFailure(Exception e) { this.shardRouting = newRouting; assert this.shardRouting.primary() == false || this.shardRouting.started() == false || // note that we use started and not - // active to avoid relocating shards + // active to avoid relocating shards this.indexShardOperationPermits.isBlocked() || // if permits are blocked, we are still transitioning this.replicationTracker.isPrimaryMode() : "a started primary with non-pending operation term must be in primary mode " + this.shardRouting; @@ -1990,7 +1990,12 @@ public void openEngineAndRecoverFromTranslog() throws IOException { translogRecoveryStats::incrementRecoveredOperations ); }; - loadGlobalCheckpointToReplicationTracker(); + + // Do not load the global checkpoint if this is a remote snapshot index + if (IndexModule.Type.REMOTE_SNAPSHOT.match(indexSettings) == false) { + loadGlobalCheckpointToReplicationTracker(); + } + innerOpenEngineAndTranslog(replicationTracker); getEngine().translogManager() .recoverFromTranslog(translogRecoveryRunner, getEngine().getProcessedLocalCheckpoint(), Long.MAX_VALUE); @@ -3089,13 +3094,18 @@ public void startRecovery( } break; case SNAPSHOT: - final String repo = ((SnapshotRecoverySource) recoveryState.getRecoverySource()).snapshot().getRepository(); - executeRecovery( - "from snapshot", - recoveryState, - recoveryListener, - l -> restoreFromRepository(repositoriesService.repository(repo), l) - ); + final SnapshotRecoverySource recoverySource = (SnapshotRecoverySource) recoveryState.getRecoverySource(); + if (recoverySource.isSearchableSnapshot()) { + executeRecovery("from snapshot (remote)", recoveryState, recoveryListener, this::recoverFromStore); + } else { + final String repo = recoverySource.snapshot().getRepository(); + executeRecovery( + "from snapshot", + recoveryState, + recoveryListener, + l -> restoreFromRepository(repositoriesService.repository(repo), l) + ); + } break; case LOCAL_SHARDS: final IndexMetadata indexMetadata = indexSettings().getIndexMetadata(); @@ -3256,10 +3266,15 @@ private static void persistMetadata( writeReason = "routing changed from " + currentRouting + " to " + newRouting; } logger.trace("{} writing shard state, reason [{}]", shardId, writeReason); + + final ShardStateMetadata.IndexDataLocation indexDataLocation = IndexSettings.SEARCHABLE_SNAPSHOT_REPOSITORY.exists( + indexSettings.getSettings() + ) ? ShardStateMetadata.IndexDataLocation.REMOTE : ShardStateMetadata.IndexDataLocation.LOCAL; final ShardStateMetadata newShardStateMetadata = new ShardStateMetadata( newRouting.primary(), indexSettings.getUUID(), - newRouting.allocationId() + newRouting.allocationId(), + indexDataLocation ); ShardStateMetadata.FORMAT.writeAndCleanup(newShardStateMetadata, shardPath.getShardStatePath()); } else { diff --git a/server/src/main/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommand.java b/server/src/main/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommand.java index ccc620fc8cf64..c7e380f842fa0 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommand.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommand.java @@ -484,7 +484,8 @@ private void newAllocationId(ShardPath shardPath, Terminal terminal) throws IOEx final ShardStateMetadata newShardStateMetadata = new ShardStateMetadata( shardStateMetadata.primary, shardStateMetadata.indexUUID, - newAllocationId + newAllocationId, + ShardStateMetadata.IndexDataLocation.LOCAL ); ShardStateMetadata.FORMAT.writeAndCleanup(newShardStateMetadata, shardStatePath); diff --git a/server/src/main/java/org/opensearch/index/shard/ShardStateMetadata.java b/server/src/main/java/org/opensearch/index/shard/ShardStateMetadata.java index 9cd9149cda913..9e334bc6ffd54 100644 --- a/server/src/main/java/org/opensearch/index/shard/ShardStateMetadata.java +++ b/server/src/main/java/org/opensearch/index/shard/ShardStateMetadata.java @@ -56,17 +56,39 @@ public final class ShardStateMetadata { private static final String PRIMARY_KEY = "primary"; private static final String INDEX_UUID_KEY = "index_uuid"; private static final String ALLOCATION_ID_KEY = "allocation_id"; + private static final String INDEX_DATA_LOCATION_KEY = "index_data_location"; + + /** + * Enumeration of types of data locations for an index + */ + public enum IndexDataLocation { + /** + * Indicates index data is on the local disk + */ + LOCAL, + /** + * Indicates index data is remote, such as for a searchable snapshot + * index + */ + REMOTE + } public final String indexUUID; public final boolean primary; @Nullable public final AllocationId allocationId; // can be null if we read from legacy format (see fromXContent and MultiDataPathUpgrader) + public final IndexDataLocation indexDataLocation; public ShardStateMetadata(boolean primary, String indexUUID, AllocationId allocationId) { + this(primary, indexUUID, allocationId, IndexDataLocation.LOCAL); + } + + public ShardStateMetadata(boolean primary, String indexUUID, AllocationId allocationId, IndexDataLocation indexDataLocation) { assert indexUUID != null; this.primary = primary; this.indexUUID = indexUUID; this.allocationId = allocationId; + this.indexDataLocation = Objects.requireNonNull(indexDataLocation); } @Override @@ -89,6 +111,9 @@ public boolean equals(Object o) { if (Objects.equals(allocationId, that.allocationId) == false) { return false; } + if (Objects.equals(indexDataLocation, that.indexDataLocation) == false) { + return false; + } return true; } @@ -98,17 +123,16 @@ public int hashCode() { int result = indexUUID.hashCode(); result = 31 * result + (allocationId != null ? allocationId.hashCode() : 0); result = 31 * result + (primary ? 1 : 0); + result = 31 * result + indexDataLocation.hashCode(); return result; } @Override public String toString() { - return "primary [" + primary + "], allocation [" + allocationId + "]"; + return "primary [" + primary + "], allocation [" + allocationId + "], index data location [" + indexDataLocation + "]"; } - public static final MetadataStateFormat FORMAT = new MetadataStateFormat( - SHARD_STATE_FILE_PREFIX - ) { + public static final MetadataStateFormat FORMAT = new MetadataStateFormat<>(SHARD_STATE_FILE_PREFIX) { @Override protected XContentBuilder newXContentBuilder(XContentType type, OutputStream stream) throws IOException { @@ -124,6 +148,11 @@ public void toXContent(XContentBuilder builder, ShardStateMetadata shardStateMet if (shardStateMetadata.allocationId != null) { builder.field(ALLOCATION_ID_KEY, shardStateMetadata.allocationId); } + // Omit the index data location field if it is LOCAL (the implicit default) + // to maintain compatibility for local indices + if (shardStateMetadata.indexDataLocation != IndexDataLocation.LOCAL) { + builder.field(INDEX_DATA_LOCATION_KEY, shardStateMetadata.indexDataLocation); + } } @Override @@ -136,6 +165,7 @@ public ShardStateMetadata fromXContent(XContentParser parser) throws IOException String currentFieldName = null; String indexUUID = IndexMetadata.INDEX_UUID_NA_VALUE; AllocationId allocationId = null; + IndexDataLocation indexDataLocation = IndexDataLocation.LOCAL; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); @@ -144,6 +174,13 @@ public ShardStateMetadata fromXContent(XContentParser parser) throws IOException primary = parser.booleanValue(); } else if (INDEX_UUID_KEY.equals(currentFieldName)) { indexUUID = parser.text(); + } else if (INDEX_DATA_LOCATION_KEY.equals(currentFieldName)) { + final String stringValue = parser.text(); + try { + indexDataLocation = IndexDataLocation.valueOf(stringValue); + } catch (IllegalArgumentException e) { + throw new CorruptStateException("unexpected value for data location [" + stringValue + "]"); + } } else { throw new CorruptStateException("unexpected field in shard state [" + currentFieldName + "]"); } @@ -160,7 +197,7 @@ public ShardStateMetadata fromXContent(XContentParser parser) throws IOException if (primary == null) { throw new CorruptStateException("missing value for [primary] in shard state"); } - return new ShardStateMetadata(primary, indexUUID, allocationId); + return new ShardStateMetadata(primary, indexUUID, allocationId, indexDataLocation); } }; } diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java index 06916c4cc87fe..6ca5036808818 100644 --- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java @@ -104,9 +104,6 @@ final class StoreRecovery { */ void recoverFromStore(final IndexShard indexShard, ActionListener listener) { if (canRecover(indexShard)) { - RecoverySource.Type recoveryType = indexShard.recoveryState().getRecoverySource().getType(); - assert recoveryType == RecoverySource.Type.EMPTY_STORE || recoveryType == RecoverySource.Type.EXISTING_STORE - : "expected store recovery type but was: " + recoveryType; ActionListener.completeWith(recoveryListener(indexShard, listener), () -> { logger.debug("starting recovery from store ..."); internalRecoverFromStore(indexShard); diff --git a/server/src/main/java/org/opensearch/index/store/InMemoryRemoteSnapshotDirectory.java b/server/src/main/java/org/opensearch/index/store/InMemoryRemoteSnapshotDirectory.java new file mode 100644 index 0000000000000..0757d88a4099a --- /dev/null +++ b/server/src/main/java/org/opensearch/index/store/InMemoryRemoteSnapshotDirectory.java @@ -0,0 +1,169 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.index.store; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.store.Lock; +import org.apache.lucene.store.NoLockFactory; +import org.apache.lucene.util.SetOnce; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.lucene.store.ByteArrayIndexInput; +import org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.snapshots.SnapshotId; + +/** + * Trivial in-memory implementation of a Directory that reads from a snapshot + * in a repository. This is functional but is only temporary to demonstrate + * functional searchable snapshot functionality. The proper implementation will + * be implemented per https://github.com/opensearch-project/OpenSearch/issues/3114. + * + * @opensearch.internal + */ +public final class InMemoryRemoteSnapshotDirectory extends Directory { + + private final BlobStoreRepository blobStoreRepository; + private final SnapshotId snapshotId; + private final BlobPath blobPath; + private final SetOnce blobContainer = new SetOnce<>(); + private final SetOnce> fileInfoMap = new SetOnce<>(); + + public InMemoryRemoteSnapshotDirectory(BlobStoreRepository blobStoreRepository, BlobPath blobPath, SnapshotId snapshotId) { + this.blobStoreRepository = blobStoreRepository; + this.snapshotId = snapshotId; + this.blobPath = blobPath; + } + + @Override + public String[] listAll() throws IOException { + return fileInfoMap().keySet().toArray(new String[0]); + } + + @Override + public void deleteFile(String name) throws IOException {} + + @Override + public IndexOutput createOutput(String name, IOContext context) { + return NoopIndexOutput.INSTANCE; + } + + @Override + public IndexInput openInput(String name, IOContext context) throws IOException { + final BlobStoreIndexShardSnapshot.FileInfo fileInfo = fileInfoMap().get(name); + + // Virtual files are contained entirely in the metadata hash field + if (fileInfo.name().startsWith("v__")) { + return new ByteArrayIndexInput(name, fileInfo.metadata().hash().bytes); + } + + try (InputStream is = blobContainer().readBlob(fileInfo.name())) { + return new ByteArrayIndexInput(name, is.readAllBytes()); + } + } + + @Override + public void close() throws IOException {} + + @Override + public long fileLength(String name) throws IOException { + initializeIfNecessary(); + return fileInfoMap.get().get(name).length(); + } + + @Override + public Set getPendingDeletions() throws IOException { + return Collections.emptySet(); + } + + @Override + public IndexOutput createTempOutput(String prefix, String suffix, IOContext context) { + throw new UnsupportedOperationException(); + } + + @Override + public void sync(Collection names) throws IOException {} + + @Override + public void syncMetaData() {} + + @Override + public void rename(String source, String dest) throws IOException {} + + @Override + public Lock obtainLock(String name) throws IOException { + return NoLockFactory.INSTANCE.obtainLock(null, null); + } + + static class NoopIndexOutput extends IndexOutput { + + final static NoopIndexOutput INSTANCE = new NoopIndexOutput(); + + NoopIndexOutput() { + super("noop", "noop"); + } + + @Override + public void close() throws IOException { + + } + + @Override + public long getFilePointer() { + return 0; + } + + @Override + public long getChecksum() throws IOException { + return 0; + } + + @Override + public void writeByte(byte b) throws IOException { + + } + + @Override + public void writeBytes(byte[] b, int offset, int length) throws IOException { + + } + } + + private BlobContainer blobContainer() { + initializeIfNecessary(); + return blobContainer.get(); + } + + private Map fileInfoMap() { + initializeIfNecessary(); + return fileInfoMap.get(); + } + + /** + * Bit of a hack to lazily initialize the blob store to avoid running afoul + * of the assertion in {@code BlobStoreRepository#assertSnapshotOrGenericThread}. + */ + private void initializeIfNecessary() { + if (blobContainer.get() == null || fileInfoMap.get() == null) { + blobContainer.set(blobStoreRepository.blobStore().blobContainer(blobPath)); + final BlobStoreIndexShardSnapshot snapshot = blobStoreRepository.loadShardSnapshot(blobContainer.get(), snapshotId); + fileInfoMap.set( + snapshot.indexFiles().stream().collect(Collectors.toMap(BlobStoreIndexShardSnapshot.FileInfo::physicalName, f -> f)) + ); + } + } +} diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSnapshotDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/RemoteSnapshotDirectoryFactory.java new file mode 100644 index 0000000000000..bf7806b836b65 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/store/RemoteSnapshotDirectoryFactory.java @@ -0,0 +1,49 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.index.store; + +import java.io.IOException; +import java.util.function.Supplier; + +import org.apache.lucene.store.Directory; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.shard.ShardPath; +import org.opensearch.plugins.IndexStorePlugin; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.Repository; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.snapshots.SnapshotId; + +/** + * Factory for a Directory implementation that can read directly from index + * data stored remotely in a repository. + * + * @opensearch.internal + */ +public final class RemoteSnapshotDirectoryFactory implements IndexStorePlugin.DirectoryFactory { + private final Supplier repositoriesService; + + public RemoteSnapshotDirectoryFactory(Supplier repositoriesService) { + this.repositoriesService = repositoriesService; + } + + @Override + public Directory newDirectory(IndexSettings indexSettings, ShardPath shardPath) throws IOException { + final String repositoryName = IndexSettings.SEARCHABLE_SNAPSHOT_REPOSITORY.get(indexSettings.getSettings()); + final Repository repository = repositoriesService.get().repository(repositoryName); + assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; + final BlobStoreRepository blobStoreRepository = (BlobStoreRepository) repository; + final BlobPath blobPath = new BlobPath().add("indices") + .add(IndexSettings.SEARCHABLE_SNAPSHOT_INDEX_ID.get(indexSettings.getSettings())) + .add(Integer.toString(shardPath.getShardId().getId())); + final SnapshotId snapshotId = new SnapshotId( + IndexSettings.SEARCHABLE_SNAPSHOT_ID_NAME.get(indexSettings.getSettings()), + IndexSettings.SEARCHABLE_SNAPSHOT_ID_UUID.get(indexSettings.getSettings()) + ); + return new InMemoryRemoteSnapshotDirectory(blobStoreRepository, blobPath, snapshotId); + } +} diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index f2961c0f3b13d..b2f48ccdd389c 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -109,6 +109,7 @@ import org.opensearch.index.engine.InternalEngineFactory; import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.engine.NoOpEngine; +import org.opensearch.index.engine.ReadOnlyEngine; import org.opensearch.index.fielddata.IndexFieldDataCache; import org.opensearch.index.flush.FlushStats; import org.opensearch.index.get.GetStats; @@ -130,6 +131,7 @@ import org.opensearch.index.shard.IndexingOperationListener; import org.opensearch.index.shard.IndexingStats; import org.opensearch.index.shard.ShardId; +import org.opensearch.index.translog.TranslogStats; import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; @@ -336,13 +338,6 @@ public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, lon this.metaStateService = metaStateService; this.engineFactoryProviders = engineFactoryProviders; - // do not allow any plugin-provided index store type to conflict with a built-in type - for (final String indexStoreType : directoryFactories.keySet()) { - if (IndexModule.isBuiltinType(indexStoreType)) { - throw new IllegalStateException("registered index store type [" + indexStoreType + "] conflicts with a built-in type"); - } - } - this.directoryFactories = directoryFactories; this.recoveryStateFactories = recoveryStateFactories; // doClose() is called when shutting down a node, yet there might still be ongoing requests @@ -769,6 +764,9 @@ private EngineFactory getEngineFactory(final IndexSettings idxSettings) { if (idxSettings.isSegRepEnabled()) { return new NRTReplicationEngineFactory(); } + if (IndexModule.Type.REMOTE_SNAPSHOT.match(idxSettings)) { + return config -> new ReadOnlyEngine(config, new SeqNoStats(0, 0, 0), new TranslogStats(), true, Function.identity(), false); + } return new InternalEngineFactory(); } else if (engineFactories.size() == 1) { assert engineFactories.get(0).isPresent(); diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 24300c884d194..504550378e14f 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -38,6 +38,7 @@ import org.apache.lucene.util.SetOnce; import org.opensearch.common.util.FeatureFlags; import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance; +import org.opensearch.index.IndexModule; import org.opensearch.index.IndexingPressureService; import org.opensearch.indices.replication.SegmentReplicationSourceFactory; import org.opensearch.indices.replication.SegmentReplicationTargetService; @@ -213,6 +214,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; @@ -604,11 +606,23 @@ protected Node( .map(plugin -> (Function>) plugin::getEngineFactory) .collect(Collectors.toList()); - final Map indexStoreFactories = pluginsService.filterPlugins(IndexStorePlugin.class) + final Map builtInDirectoryFactories = IndexModule.createBuiltInDirectoryFactories( + repositoriesServiceReference::get + ); + final Map directoryFactories = new HashMap<>(); + pluginsService.filterPlugins(IndexStorePlugin.class) .stream() .map(IndexStorePlugin::getDirectoryFactories) .flatMap(m -> m.entrySet().stream()) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)) + .forEach((k, v) -> { + // do not allow any plugin-provided index store type to conflict with a built-in type + if (builtInDirectoryFactories.containsKey(k)) { + throw new IllegalStateException("registered index store type [" + k + "] conflicts with a built-in type"); + } + directoryFactories.put(k, v); + }); + directoryFactories.putAll(builtInDirectoryFactories); final Map recoveryStateFactories = pluginsService.filterPlugins( IndexStorePlugin.class @@ -653,7 +667,7 @@ protected Node( client, metaStateService, engineFactoryProviders, - indexStoreFactories, + Map.copyOf(directoryFactories), searchModule.getValuesSourceRegistry(), recoveryStateFactories, remoteDirectoryFactory diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index ca5078c4d1c56..60c01d0b04639 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -84,7 +84,9 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.Index; +import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ShardId; @@ -421,20 +423,30 @@ public ClusterState execute(ClusterState currentState) { .getMaxNodeVersion() .minimumIndexCompatibilityVersion(); for (Map.Entry indexEntry : indices.entrySet()) { + String renamedIndexName = indexEntry.getKey(); String index = indexEntry.getValue(); boolean partial = checkPartial(index); - SnapshotRecoverySource recoverySource = new SnapshotRecoverySource( + + IndexMetadata snapshotIndexMetadata = updateIndexSettings( + metadata.index(index), + request.indexSettings(), + request.ignoreIndexSettings() + ); + final boolean isSearchableSnapshot = FeatureFlags.isEnabled(FeatureFlags.SEARCHABLE_SNAPSHOT) + && IndexModule.Type.REMOTE_SNAPSHOT.getSettingsKey().equals(request.storageType().toString()); + if (isSearchableSnapshot) { + snapshotIndexMetadata = addSnapshotToIndexSettings( + snapshotIndexMetadata, + snapshot, + repositoryData.resolveIndexId(index) + ); + } + final SnapshotRecoverySource recoverySource = new SnapshotRecoverySource( restoreUUID, snapshot, snapshotInfo.version(), - repositoryData.resolveIndexId(index) - ); - String renamedIndexName = indexEntry.getKey(); - IndexMetadata snapshotIndexMetadata = metadata.index(index); - snapshotIndexMetadata = updateIndexSettings( - snapshotIndexMetadata, - request.indexSettings(), - request.ignoreIndexSettings() + repositoryData.resolveIndexId(index), + isSearchableSnapshot ); try { snapshotIndexMetadata = metadataIndexUpgradeService.upgradeIndexMetadata( @@ -1207,4 +1219,16 @@ public void applyClusterState(ClusterChangedEvent event) { logger.warn("Failed to update restore state ", t); } } + + private static IndexMetadata addSnapshotToIndexSettings(IndexMetadata metadata, Snapshot snapshot, IndexId indexId) { + final Settings newSettings = Settings.builder() + .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.REMOTE_SNAPSHOT.getSettingsKey()) + .put(IndexSettings.SEARCHABLE_SNAPSHOT_REPOSITORY.getKey(), snapshot.getRepository()) + .put(IndexSettings.SEARCHABLE_SNAPSHOT_ID_UUID.getKey(), snapshot.getSnapshotId().getUUID()) + .put(IndexSettings.SEARCHABLE_SNAPSHOT_ID_NAME.getKey(), snapshot.getSnapshotId().getName()) + .put(IndexSettings.SEARCHABLE_SNAPSHOT_INDEX_ID.getKey(), indexId.getId()) + .put(metadata.getSettings()) + .build(); + return IndexMetadata.builder(metadata).settings(newSettings).build(); + } } diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index 27c0437236f63..470eeea771f2b 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -327,7 +327,8 @@ public void testShardStateMetaHashCodeEquals() { ShardStateMetadata meta = new ShardStateMetadata( randomBoolean(), randomRealisticUnicodeOfCodepointLengthBetween(1, 10), - allocationId + allocationId, + randomFrom(ShardStateMetadata.IndexDataLocation.values()) ); assertEquals(meta, new ShardStateMetadata(meta.primary, meta.indexUUID, meta.allocationId)); @@ -339,7 +340,12 @@ public void testShardStateMetaHashCodeEquals() { Set hashCodes = new HashSet<>(); for (int i = 0; i < 30; i++) { // just a sanity check that we impl hashcode allocationId = randomBoolean() ? null : randomAllocationId(); - meta = new ShardStateMetadata(randomBoolean(), randomRealisticUnicodeOfCodepointLengthBetween(1, 10), allocationId); + meta = new ShardStateMetadata( + randomBoolean(), + randomRealisticUnicodeOfCodepointLengthBetween(1, 10), + allocationId, + randomFrom(ShardStateMetadata.IndexDataLocation.values()) + ); hashCodes.add(meta.hashCode()); } assertTrue("more than one unique hashcode expected but got: " + hashCodes.size(), hashCodes.size() > 1); diff --git a/server/src/test/java/org/opensearch/index/shard/ShardPathTests.java b/server/src/test/java/org/opensearch/index/shard/ShardPathTests.java index beda468b45fb0..25ec7c7987855 100644 --- a/server/src/test/java/org/opensearch/index/shard/ShardPathTests.java +++ b/server/src/test/java/org/opensearch/index/shard/ShardPathTests.java @@ -35,6 +35,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; +import org.opensearch.gateway.WriteStateException; import org.opensearch.index.Index; import org.opensearch.test.OpenSearchTestCase; @@ -50,7 +51,7 @@ public void testLoadShardPath() throws IOException { ShardId shardId = new ShardId("foo", "0xDEADBEEF", 0); Path[] paths = env.availableShardPaths(shardId); Path path = randomFrom(paths); - ShardStateMetadata.FORMAT.writeAndCleanup(new ShardStateMetadata(true, "0xDEADBEEF", AllocationId.newInitializing()), path); + writeShardStateMetadata("0xDEADBEEF", path); ShardPath shardPath = ShardPath.loadShardPath(logger, env, shardId, ""); assertEquals(path, shardPath.getDataPath()); assertEquals("0xDEADBEEF", shardPath.getShardId().getIndex().getUUID()); @@ -66,7 +67,7 @@ public void testFailLoadShardPathOnMultiState() throws IOException { ShardId shardId = new ShardId("foo", indexUUID, 0); Path[] paths = env.availableShardPaths(shardId); assumeTrue("This test tests multi data.path but we only got one", paths.length > 1); - ShardStateMetadata.FORMAT.writeAndCleanup(new ShardStateMetadata(true, indexUUID, AllocationId.newInitializing()), paths); + writeShardStateMetadata(indexUUID, paths); Exception e = expectThrows(IllegalStateException.class, () -> ShardPath.loadShardPath(logger, env, shardId, "")); assertThat(e.getMessage(), containsString("more than one shard state found")); } @@ -77,7 +78,7 @@ public void testFailLoadShardPathIndexUUIDMissmatch() throws IOException { ShardId shardId = new ShardId("foo", "foobar", 0); Path[] paths = env.availableShardPaths(shardId); Path path = randomFrom(paths); - ShardStateMetadata.FORMAT.writeAndCleanup(new ShardStateMetadata(true, "0xDEADBEEF", AllocationId.newInitializing()), path); + writeShardStateMetadata("0xDEADBEEF", path); Exception e = expectThrows(IllegalStateException.class, () -> ShardPath.loadShardPath(logger, env, shardId, "")); assertThat(e.getMessage(), containsString("expected: foobar on shard path")); } @@ -121,7 +122,7 @@ public void testGetRootPaths() throws IOException { ShardId shardId = new ShardId("foo", indexUUID, 0); Path[] paths = env.availableShardPaths(shardId); Path path = randomFrom(paths); - ShardStateMetadata.FORMAT.writeAndCleanup(new ShardStateMetadata(true, indexUUID, AllocationId.newInitializing()), path); + writeShardStateMetadata(indexUUID, path); ShardPath shardPath = ShardPath.loadShardPath(logger, env, shardId, customDataPath); boolean found = false; for (Path p : env.nodeDataPaths()) { @@ -148,4 +149,10 @@ public void testGetRootPaths() throws IOException { } } + private static void writeShardStateMetadata(String indexUUID, Path... paths) throws WriteStateException { + ShardStateMetadata.FORMAT.writeAndCleanup( + new ShardStateMetadata(true, indexUUID, AllocationId.newInitializing(), ShardStateMetadata.IndexDataLocation.LOCAL), + paths + ); + } } diff --git a/server/src/test/java/org/opensearch/index/store/FsDirectoryFactoryTests.java b/server/src/test/java/org/opensearch/index/store/FsDirectoryFactoryTests.java index cf8d6677b4227..ce40de0e9aa71 100644 --- a/server/src/test/java/org/opensearch/index/store/FsDirectoryFactoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/FsDirectoryFactoryTests.java @@ -57,6 +57,8 @@ import java.util.Arrays; import java.util.Locale; +import static org.opensearch.test.store.MockFSDirectoryFactory.FILE_SYSTEM_BASED_STORE_TYPES; + public class FsDirectoryFactoryTests extends OpenSearchTestCase { public void testPreload() throws IOException { @@ -170,7 +172,7 @@ public void testStoreDirectory() throws IOException { // default doTestStoreDirectory(tempDir, null, IndexModule.Type.FS); // explicit directory impls - for (IndexModule.Type type : IndexModule.Type.values()) { + for (IndexModule.Type type : FILE_SYSTEM_BASED_STORE_TYPES) { doTestStoreDirectory(tempDir, type.name().toLowerCase(Locale.ROOT), type); } } diff --git a/test/framework/src/main/java/org/opensearch/test/store/MockFSDirectoryFactory.java b/test/framework/src/main/java/org/opensearch/test/store/MockFSDirectoryFactory.java index 47952af1cd06c..e38b62c419334 100644 --- a/test/framework/src/main/java/org/opensearch/test/store/MockFSDirectoryFactory.java +++ b/test/framework/src/main/java/org/opensearch/test/store/MockFSDirectoryFactory.java @@ -63,10 +63,15 @@ import java.io.PrintStream; import java.nio.charset.StandardCharsets; import java.util.Arrays; +import java.util.List; import java.util.Random; import java.util.Set; +import java.util.stream.Collectors; public class MockFSDirectoryFactory implements IndexStorePlugin.DirectoryFactory { + public static final List FILE_SYSTEM_BASED_STORE_TYPES = Arrays.stream(IndexModule.Type.values()) + .filter(t -> (t == IndexModule.Type.REMOTE_SNAPSHOT) == false) + .collect(Collectors.toUnmodifiableList()); public static final Setting RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING = Setting.doubleSetting( "index.store.mock.random.io_exception_rate_on_open", @@ -168,7 +173,7 @@ private Directory randomDirectoryService(Random random, IndexSettings indexSetti .put(indexSettings.getIndexMetadata().getSettings()) .put( IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), - RandomPicks.randomFrom(random, IndexModule.Type.values()).getSettingsKey() + RandomPicks.randomFrom(random, FILE_SYSTEM_BASED_STORE_TYPES).getSettingsKey() ) ) .build(); From 89550c0a9123ab808779449839d6f63560d0412d Mon Sep 17 00:00:00 2001 From: pranikum <109206473+pranikum@users.noreply.github.com> Date: Thu, 13 Oct 2022 17:24:38 +0530 Subject: [PATCH 21/39] Recommission api level support (#4604) * Add changes for Recommission API Signed-off-by: pranikum <109206473+pranikum@users.noreply.github.com> --- CHANGELOG.md | 2 +- .../client/RestHighLevelClientTests.java | 3 +- ...cluster.delete_decommission_awareness.json | 19 ++++ .../org/opensearch/action/ActionModule.java | 5 ++ .../delete/DeleteDecommissionStateAction.java | 25 ++++++ .../DeleteDecommissionStateRequest.java | 40 +++++++++ ...DeleteDecommissionStateRequestBuilder.java | 27 ++++++ .../DeleteDecommissionStateResponse.java | 36 ++++++++ ...ransportDeleteDecommissionStateAction.java | 86 +++++++++++++++++++ .../awareness/delete/package-info.java | 10 +++ .../opensearch/client/ClusterAdminClient.java | 17 ++++ .../java/org/opensearch/client/Requests.java | 8 ++ .../client/support/AbstractClient.java | 21 +++++ .../decommission/DecommissionService.java | 8 +- .../RestDeleteDecommissionStateAction.java | 52 +++++++++++ .../DeleteDecommissionStateRequestTests.java | 32 +++++++ .../DeleteDecommissionStateResponseTests.java | 29 +++++++ .../DecommissionServiceTests.java | 10 +-- ...estDeleteDecommissionStateActionTests.java | 40 +++++++++ 19 files changed, 459 insertions(+), 11 deletions(-) create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_decommission_awareness.json create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequest.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequestBuilder.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateResponse.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/TransportDeleteDecommissionStateAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/package-info.java create mode 100644 server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDeleteDecommissionStateAction.java create mode 100644 server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/DeleteDecommissionStateRequestTests.java create mode 100644 server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/DeleteDecommissionStateResponseTests.java create mode 100644 server/src/test/java/org/opensearch/rest/action/admin/cluster/RestDeleteDecommissionStateActionTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 4310103803fbe..346bf432e663b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,7 +30,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Update GeoGrid base class access modifier to support extensibility ([#4572](https://github.com/opensearch-project/OpenSearch/pull/4572)) - Add a new node role 'search' which is dedicated to provide search capability ([#4689](https://github.com/opensearch-project/OpenSearch/pull/4689)) - Introduce experimental searchable snapshot API ([#4680](https://github.com/opensearch-project/OpenSearch/pull/4680)) - +- Recommissioning of zone. REST layer support. ([#4624](https://github.com/opensearch-project/OpenSearch/pull/4604)) ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 - Bumps `reactor-netty-http` from 1.0.18 to 1.0.23 diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java index 9086fc9563c57..ab9982657a410 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java @@ -890,7 +890,8 @@ public void testApiNamingConventions() throws Exception { "cluster.put_weighted_routing", "cluster.get_weighted_routing", "cluster.put_decommission_awareness", - "cluster.get_decommission_awareness", }; + "cluster.get_decommission_awareness", + "cluster.delete_decommission_awareness", }; List booleanReturnMethods = Arrays.asList("security.enable_user", "security.disable_user", "security.change_password"); Set deprecatedMethods = new HashSet<>(); deprecatedMethods.add("indices.force_merge"); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_decommission_awareness.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_decommission_awareness.json new file mode 100644 index 0000000000000..13ea101169e60 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_decommission_awareness.json @@ -0,0 +1,19 @@ +{ + "cluster.delete_decommission_awareness": { + "documentation": { + "url": "https://opensearch.org/docs/latest/opensearch/rest-api/decommission/", + "description": "Delete any existing decommission." + }, + "stability": "experimental", + "url": { + "paths": [ + { + "path": "/_cluster/decommission/awareness/", + "methods": [ + "DELETE" + ] + } + ] + } + } +} diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index d959d6828a46b..c2c11d69f134c 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -42,6 +42,8 @@ import org.opensearch.action.admin.cluster.configuration.TransportClearVotingConfigExclusionsAction; import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateAction; import org.opensearch.action.admin.cluster.decommission.awareness.get.TransportGetDecommissionStateAction; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateAction; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.TransportDeleteDecommissionStateAction; import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionAction; import org.opensearch.action.admin.cluster.decommission.awareness.put.TransportDecommissionAction; import org.opensearch.action.admin.cluster.health.ClusterHealthAction; @@ -310,6 +312,7 @@ import org.opensearch.rest.action.admin.cluster.RestClusterStatsAction; import org.opensearch.rest.action.admin.cluster.RestClusterUpdateSettingsAction; import org.opensearch.rest.action.admin.cluster.RestCreateSnapshotAction; +import org.opensearch.rest.action.admin.cluster.RestDeleteDecommissionStateAction; import org.opensearch.rest.action.admin.cluster.RestDeleteRepositoryAction; import org.opensearch.rest.action.admin.cluster.RestDeleteSnapshotAction; import org.opensearch.rest.action.admin.cluster.RestDeleteStoredScriptAction; @@ -699,6 +702,7 @@ public void reg // Decommission actions actions.register(DecommissionAction.INSTANCE, TransportDecommissionAction.class); actions.register(GetDecommissionStateAction.INSTANCE, TransportGetDecommissionStateAction.class); + actions.register(DeleteDecommissionStateAction.INSTANCE, TransportDeleteDecommissionStateAction.class); return unmodifiableMap(actions.getRegistry()); } @@ -879,6 +883,7 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestDeletePitAction()); registerHandler.accept(new RestGetAllPitsAction(nodesInCluster)); registerHandler.accept(new RestPitSegmentsAction(nodesInCluster)); + registerHandler.accept(new RestDeleteDecommissionStateAction()); for (ActionPlugin plugin : actionPlugins) { for (RestHandler handler : plugin.getRestHandlers( diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateAction.java new file mode 100644 index 0000000000000..3aff666d388be --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateAction.java @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.delete; + +import org.opensearch.action.ActionType; + +/** + * Delete decommission state action. + * + * @opensearch.internal + */ +public class DeleteDecommissionStateAction extends ActionType { + public static final DeleteDecommissionStateAction INSTANCE = new DeleteDecommissionStateAction(); + public static final String NAME = "cluster:admin/decommission/awareness/delete"; + + private DeleteDecommissionStateAction() { + super(NAME, DeleteDecommissionStateResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequest.java new file mode 100644 index 0000000000000..205be54a36c33 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequest.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.delete; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Request for deleting decommission request. + * + * @opensearch.internal + */ +public class DeleteDecommissionStateRequest extends ClusterManagerNodeRequest { + + public DeleteDecommissionStateRequest() {} + + public DeleteDecommissionStateRequest(StreamInput in) throws IOException { + super(in); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequestBuilder.java new file mode 100644 index 0000000000000..08f194c53f18e --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequestBuilder.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.delete; + +import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; +import org.opensearch.client.OpenSearchClient; + +/** + * Builder for Delete decommission request. + * + * @opensearch.internal + */ +public class DeleteDecommissionStateRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< + DeleteDecommissionStateRequest, + DeleteDecommissionStateResponse, + DeleteDecommissionStateRequestBuilder> { + + public DeleteDecommissionStateRequestBuilder(OpenSearchClient client, DeleteDecommissionStateAction action) { + super(client, action, new DeleteDecommissionStateRequest()); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateResponse.java new file mode 100644 index 0000000000000..2ff634966586a --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateResponse.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.delete; + +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Response returned after deletion of decommission request. + * + * @opensearch.internal + */ +public class DeleteDecommissionStateResponse extends AcknowledgedResponse { + + public DeleteDecommissionStateResponse(StreamInput in) throws IOException { + super(in); + } + + public DeleteDecommissionStateResponse(boolean acknowledged) { + super(acknowledged); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/TransportDeleteDecommissionStateAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/TransportDeleteDecommissionStateAction.java new file mode 100644 index 0000000000000..7d8f4bdd8304c --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/TransportDeleteDecommissionStateAction.java @@ -0,0 +1,86 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.delete; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.ActionListener; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.decommission.DecommissionService; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; + +/** + * Transport action for delete decommission. + * + * @opensearch.internal + */ +public class TransportDeleteDecommissionStateAction extends TransportClusterManagerNodeAction< + DeleteDecommissionStateRequest, + DeleteDecommissionStateResponse> { + + private static final Logger logger = LogManager.getLogger(TransportDeleteDecommissionStateAction.class); + private final DecommissionService decommissionService; + + @Inject + public TransportDeleteDecommissionStateAction( + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + DecommissionService decommissionService + ) { + super( + DeleteDecommissionStateAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + DeleteDecommissionStateRequest::new, + indexNameExpressionResolver + ); + this.decommissionService = decommissionService; + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected DeleteDecommissionStateResponse read(StreamInput in) throws IOException { + return new DeleteDecommissionStateResponse(in); + } + + @Override + protected ClusterBlockException checkBlock(DeleteDecommissionStateRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + + @Override + protected void clusterManagerOperation( + DeleteDecommissionStateRequest request, + ClusterState state, + ActionListener listener + ) { + logger.info("Received delete decommission Request [{}]", request); + this.decommissionService.startRecommissionAction(listener); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/package-info.java new file mode 100644 index 0000000000000..c2cfc03baa45e --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Delete decommission transport handlers. */ +package org.opensearch.action.admin.cluster.decommission.awareness.delete; diff --git a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java index c811b788d9cf6..5551c45ba9e57 100644 --- a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java +++ b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java @@ -37,6 +37,9 @@ import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainRequest; import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainRequestBuilder; import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainResponse; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateRequest; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateRequestBuilder; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateResponse; import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateRequest; import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateRequestBuilder; import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateResponse; @@ -864,4 +867,18 @@ public interface ClusterAdminClient extends OpenSearchClient { */ GetDecommissionStateRequestBuilder prepareGetDecommission(); + /** + * Deletes the decommission metadata. + */ + ActionFuture deleteDecommissionState(DeleteDecommissionStateRequest request); + + /** + * Deletes the decommission metadata. + */ + void deleteDecommissionState(DeleteDecommissionStateRequest request, ActionListener listener); + + /** + * Deletes the decommission metadata. + */ + DeleteDecommissionStateRequestBuilder prepareDeleteDecommissionRequest(); } diff --git a/server/src/main/java/org/opensearch/client/Requests.java b/server/src/main/java/org/opensearch/client/Requests.java index c8729a0d498f4..87219dda49825 100644 --- a/server/src/main/java/org/opensearch/client/Requests.java +++ b/server/src/main/java/org/opensearch/client/Requests.java @@ -32,6 +32,7 @@ package org.opensearch.client; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateRequest; import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateRequest; import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionRequest; import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; @@ -588,4 +589,11 @@ public static DecommissionRequest decommissionRequest() { public static GetDecommissionStateRequest getDecommissionStateRequest() { return new GetDecommissionStateRequest(); } + + /** + * Creates a new delete decommission request. + */ + public static DeleteDecommissionStateRequest deleteDecommissionStateRequest() { + return new DeleteDecommissionStateRequest(); + } } diff --git a/server/src/main/java/org/opensearch/client/support/AbstractClient.java b/server/src/main/java/org/opensearch/client/support/AbstractClient.java index 65199b22e8e72..bd82299725435 100644 --- a/server/src/main/java/org/opensearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/opensearch/client/support/AbstractClient.java @@ -43,6 +43,10 @@ import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainRequest; import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainRequestBuilder; import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainResponse; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateAction; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateRequest; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateRequestBuilder; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateResponse; import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateAction; import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateRequest; import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateRequestBuilder; @@ -1405,6 +1409,23 @@ public GetDecommissionStateRequestBuilder prepareGetDecommission() { return new GetDecommissionStateRequestBuilder(this, GetDecommissionStateAction.INSTANCE); } + @Override + public ActionFuture deleteDecommissionState(DeleteDecommissionStateRequest request) { + return execute(DeleteDecommissionStateAction.INSTANCE, request); + } + + @Override + public void deleteDecommissionState( + DeleteDecommissionStateRequest request, + ActionListener listener + ) { + execute(DeleteDecommissionStateAction.INSTANCE, request, listener); + } + + @Override + public DeleteDecommissionStateRequestBuilder prepareDeleteDecommissionRequest() { + return new DeleteDecommissionStateRequestBuilder(this, DeleteDecommissionStateAction.INSTANCE); + } } static class IndicesAdmin implements IndicesAdminClient { diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java index 5def2733b5ded..b2c8bfbc0cdc8 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java @@ -13,8 +13,8 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.OpenSearchTimeoutException; import org.opensearch.action.ActionListener; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateResponse; import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateObserver; import org.opensearch.cluster.ClusterStateUpdateTask; @@ -479,7 +479,7 @@ public void onFailure(Exception e) { }; } - public void startRecommissionAction(final ActionListener listener) { + public void startRecommissionAction(final ActionListener listener) { /* * For abandoned requests, we might not really know if it actually restored the exclusion list. * And can land up in cases where even after recommission, exclusions are set(which is unexpected). @@ -502,7 +502,7 @@ public void onFailure(Exception e) { }, false); } - void deleteDecommissionState(ActionListener listener) { + void deleteDecommissionState(ActionListener listener) { clusterService.submitStateUpdateTask("delete_decommission_state", new ClusterStateUpdateTask(Priority.URGENT) { @Override public ClusterState execute(ClusterState currentState) { @@ -523,7 +523,7 @@ public void onFailure(String source, Exception e) { public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { // Cluster state processed for deleting the decommission attribute. assert newState.metadata().decommissionAttributeMetadata() == null; - listener.onResponse(new AcknowledgedResponse(true)); + listener.onResponse(new DeleteDecommissionStateResponse(true)); } }); } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDeleteDecommissionStateAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDeleteDecommissionStateAction.java new file mode 100644 index 0000000000000..9fd7ae2248c30 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDeleteDecommissionStateAction.java @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.admin.cluster; + +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateRequest; +import org.opensearch.client.Requests; +import org.opensearch.client.node.NodeClient; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestToXContentListener; + +import java.io.IOException; +import java.util.List; + +import static java.util.Collections.singletonList; +import static org.opensearch.rest.RestRequest.Method.DELETE; + +/** + * Clears the decommission metadata. + * + * @opensearch.api + */ +public class RestDeleteDecommissionStateAction extends BaseRestHandler { + + @Override + public List routes() { + return singletonList(new Route(DELETE, "/_cluster/decommission/awareness")); + } + + @Override + public String getName() { + return "delete_decommission_state_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + DeleteDecommissionStateRequest deleteDecommissionStateRequest = createRequest(); + return channel -> client.admin() + .cluster() + .deleteDecommissionState(deleteDecommissionStateRequest, new RestToXContentListener<>(channel)); + } + + DeleteDecommissionStateRequest createRequest() { + return Requests.deleteDecommissionStateRequest(); + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/DeleteDecommissionStateRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/DeleteDecommissionStateRequestTests.java new file mode 100644 index 0000000000000..1a95b77cc1024 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/DeleteDecommissionStateRequestTests.java @@ -0,0 +1,32 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness; + +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateRequest; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class DeleteDecommissionStateRequestTests extends OpenSearchTestCase { + + public void testSerialization() throws IOException { + final DeleteDecommissionStateRequest originalRequest = new DeleteDecommissionStateRequest(); + + final DeleteDecommissionStateRequest cloneRequest; + try (BytesStreamOutput out = new BytesStreamOutput()) { + originalRequest.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + cloneRequest = new DeleteDecommissionStateRequest(in); + } + } + assertEquals(cloneRequest.clusterManagerNodeTimeout(), originalRequest.clusterManagerNodeTimeout()); + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/DeleteDecommissionStateResponseTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/DeleteDecommissionStateResponseTests.java new file mode 100644 index 0000000000000..085eda3e9d0e7 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/DeleteDecommissionStateResponseTests.java @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness; + +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateResponse; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class DeleteDecommissionStateResponseTests extends OpenSearchTestCase { + + public void testSerialization() throws IOException { + final DeleteDecommissionStateResponse originalResponse = new DeleteDecommissionStateResponse(true); + + final DeleteDecommissionStateResponse deserialized = copyWriteable( + originalResponse, + writableRegistry(), + DeleteDecommissionStateResponse::new + ); + assertEquals(deserialized, originalResponse); + + } +} diff --git a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java index 840ce1634a68e..7dee51b7713f9 100644 --- a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java @@ -15,9 +15,9 @@ import org.mockito.Mockito; import org.opensearch.Version; import org.opensearch.action.ActionListener; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateResponse; import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionResponse; import org.opensearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.CoordinationMetadata; @@ -217,9 +217,9 @@ public void testClearClusterDecommissionState() throws InterruptedException { .metadata(Metadata.builder().putCustom(DecommissionAttributeMetadata.TYPE, decommissionAttributeMetadata).build()) .build(); - ActionListener listener = new ActionListener() { + ActionListener listener = new ActionListener<>() { @Override - public void onResponse(AcknowledgedResponse decommissionResponse) { + public void onResponse(DeleteDecommissionStateResponse decommissionResponse) { DecommissionAttributeMetadata metadata = clusterService.state().metadata().custom(DecommissionAttributeMetadata.TYPE); assertNull(metadata); countDownLatch.countDown(); @@ -268,9 +268,9 @@ public void testDeleteDecommissionAttributeClearVotingExclusion() { public void testClusterUpdateTaskForDeletingDecommission() throws InterruptedException { final CountDownLatch countDownLatch = new CountDownLatch(1); - ActionListener listener = new ActionListener<>() { + ActionListener listener = new ActionListener<>() { @Override - public void onResponse(AcknowledgedResponse response) { + public void onResponse(DeleteDecommissionStateResponse response) { assertTrue(response.isAcknowledged()); assertNull(clusterService.state().metadata().decommissionAttributeMetadata()); countDownLatch.countDown(); diff --git a/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestDeleteDecommissionStateActionTests.java b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestDeleteDecommissionStateActionTests.java new file mode 100644 index 0000000000000..01f988efdf6eb --- /dev/null +++ b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestDeleteDecommissionStateActionTests.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.admin.cluster; + +import org.junit.Before; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateRequest; +import org.opensearch.rest.RestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.test.rest.RestActionTestCase; + +import java.util.List; + +public class RestDeleteDecommissionStateActionTests extends RestActionTestCase { + + private RestDeleteDecommissionStateAction action; + + @Before + public void setupAction() { + action = new RestDeleteDecommissionStateAction(); + controller().registerHandler(action); + } + + public void testRoutes() { + List routes = action.routes(); + RestHandler.Route route = routes.get(0); + assertEquals(route.getMethod(), RestRequest.Method.DELETE); + assertEquals("/_cluster/decommission/awareness", route.getPath()); + } + + public void testCreateRequest() { + DeleteDecommissionStateRequest request = action.createRequest(); + assertNotNull(request); + } +} From 1a7aabaf6c28e9e633090673ba4c55a1cdd904b5 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Thu, 13 Oct 2022 10:03:26 -0700 Subject: [PATCH 22/39] Fix assertion in test of ShardStateMetadata (#4774) This test randomly fails if the enum selected isn't the same as the one choosen by the legacy constructor. Signed-off-by: Andrew Ross Signed-off-by: Andrew Ross --- CHANGELOG.md | 1 + .../index/shard/IndexShardTests.java | 25 ++++++++++++++----- 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 346bf432e663b..95aa3e10a39fd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -128,6 +128,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fix new race condition in DecommissionControllerTests ([4688](https://github.com/opensearch-project/OpenSearch/pull/4688)) - Fix SearchStats (de)serialization (caused by https://github.com/opensearch-project/OpenSearch/pull/4616) ([#4697](https://github.com/opensearch-project/OpenSearch/pull/4697)) - Fixing Gradle warnings associated with publishPluginZipPublicationToXxx tasks ([#4696](https://github.com/opensearch-project/OpenSearch/pull/4696)) +- Fixed randomly failing test ([4774](https://github.com/opensearch-project/OpenSearch/pull/4774)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index 470eeea771f2b..72b77bb706065 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -331,12 +331,26 @@ public void testShardStateMetaHashCodeEquals() { randomFrom(ShardStateMetadata.IndexDataLocation.values()) ); - assertEquals(meta, new ShardStateMetadata(meta.primary, meta.indexUUID, meta.allocationId)); - assertEquals(meta.hashCode(), new ShardStateMetadata(meta.primary, meta.indexUUID, meta.allocationId).hashCode()); + assertEquals(meta, new ShardStateMetadata(meta.primary, meta.indexUUID, meta.allocationId, meta.indexDataLocation)); + assertEquals( + meta.hashCode(), + new ShardStateMetadata(meta.primary, meta.indexUUID, meta.allocationId, meta.indexDataLocation).hashCode() + ); - assertFalse(meta.equals(new ShardStateMetadata(!meta.primary, meta.indexUUID, meta.allocationId))); - assertFalse(meta.equals(new ShardStateMetadata(!meta.primary, meta.indexUUID + "foo", meta.allocationId))); - assertFalse(meta.equals(new ShardStateMetadata(!meta.primary, meta.indexUUID + "foo", randomAllocationId()))); + assertNotEquals(meta, new ShardStateMetadata(!meta.primary, meta.indexUUID, meta.allocationId, meta.indexDataLocation)); + assertNotEquals(meta, new ShardStateMetadata(!meta.primary, meta.indexUUID + "foo", meta.allocationId, meta.indexDataLocation)); + assertNotEquals(meta, new ShardStateMetadata(!meta.primary, meta.indexUUID, randomAllocationId(), meta.indexDataLocation)); + assertNotEquals( + meta, + new ShardStateMetadata( + !meta.primary, + meta.indexUUID, + randomAllocationId(), + meta.indexDataLocation == ShardStateMetadata.IndexDataLocation.LOCAL + ? ShardStateMetadata.IndexDataLocation.REMOTE + : ShardStateMetadata.IndexDataLocation.LOCAL + ) + ); Set hashCodes = new HashSet<>(); for (int i = 0; i < 30; i++) { // just a sanity check that we impl hashcode allocationId = randomBoolean() ? null : randomAllocationId(); @@ -349,7 +363,6 @@ public void testShardStateMetaHashCodeEquals() { hashCodes.add(meta.hashCode()); } assertTrue("more than one unique hashcode expected but got: " + hashCodes.size(), hashCodes.size() > 1); - } public void testClosesPreventsNewOperations() throws Exception { From 18f1fa36a7722391c88db80e41ded2027271f6d9 Mon Sep 17 00:00:00 2001 From: Anshu Agarwal Date: Thu, 13 Oct 2022 23:42:16 +0530 Subject: [PATCH 23/39] Delete API for weighted round robin search routing (#4400) * Delete API for weighted round robin search routing Signed-off-by: Anshu Agarwal --- CHANGELOG.md | 1 + .../client/RestHighLevelClientTests.java | 1 + .../api/cluster.delete_weighted_routing.json | 19 +++ .../cluster/routing/WeightedRoutingIT.java | 64 +++++++ .../search/SearchWeightedRoutingIT.java | 161 ++++++++++++++++++ .../org/opensearch/action/ActionModule.java | 6 + .../ClusterDeleteWeightedRoutingAction.java | 25 +++ .../ClusterDeleteWeightedRoutingRequest.java | 44 +++++ ...erDeleteWeightedRoutingRequestBuilder.java | 27 +++ .../ClusterDeleteWeightedRoutingResponse.java | 37 ++++ .../TransportDeleteWeightedRoutingAction.java | 86 ++++++++++ .../routing/weighted/delete/package-info.java | 10 ++ .../opensearch/client/ClusterAdminClient.java | 18 ++ .../java/org/opensearch/client/Requests.java | 10 ++ .../client/support/AbstractClient.java | 22 +++ .../routing/WeightedRoutingService.java | 30 ++++ ...estClusterDeleteWeightedRoutingAction.java | 53 ++++++ .../routing/WeightedRoutingServiceTests.java | 28 +++ 18 files changed, 642 insertions(+) create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_weighted_routing.json create mode 100644 server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequest.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequestBuilder.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingResponse.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/TransportDeleteWeightedRoutingAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/package-info.java create mode 100644 server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterDeleteWeightedRoutingAction.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 95aa3e10a39fd..8e580b2b101af 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -67,6 +67,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - GET api for weighted shard routing([#4275](https://github.com/opensearch-project/OpenSearch/pull/4275/)) - Unmute test RelocationIT.testRelocationWhileIndexingRandom ([#4580](https://github.com/opensearch-project/OpenSearch/pull/4580)) - Add DecommissionService and helper to execute awareness attribute decommissioning ([#4084](https://github.com/opensearch-project/OpenSearch/pull/4084)) +- Delete api for weighted shard routing([#4400](https://github.com/opensearch-project/OpenSearch/pull/4400/)) - Further simplification of the ZIP publication implementation ([#4360](https://github.com/opensearch-project/OpenSearch/pull/4360)) - Relax visibility of the HTTP_CHANNEL_KEY and HTTP_SERVER_CHANNEL_KEY to make it possible for the plugins to access associated Netty4HttpChannel / Netty4HttpServerChannel instance ([#4638](https://github.com/opensearch-project/OpenSearch/pull/4638)) - Load the deprecated master role in a dedicated method instead of in setAdditionalRoles() ([#4582](https://github.com/opensearch-project/OpenSearch/pull/4582)) diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java index ab9982657a410..d522bf8c4d005 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java @@ -889,6 +889,7 @@ public void testApiNamingConventions() throws Exception { "remote_store.restore", "cluster.put_weighted_routing", "cluster.get_weighted_routing", + "cluster.delete_weighted_routing", "cluster.put_decommission_awareness", "cluster.get_decommission_awareness", "cluster.delete_decommission_awareness", }; diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_weighted_routing.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_weighted_routing.json new file mode 100644 index 0000000000000..2cd4081b645e8 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_weighted_routing.json @@ -0,0 +1,19 @@ +{ + "cluster.delete_weighted_routing": { + "documentation": { + "url": "https://opensearch.org/docs/latest/opensearch/rest-api/weighted-routing/delete", + "description": "Delete weighted shard routing weights" + }, + "stability": "stable", + "url": { + "paths": [ + { + "path": "/_cluster/routing/awareness/weights", + "methods": [ + "DELETE" + ] + } + ] + } + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java index 61f82877bf12b..bba07d878a42c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java @@ -9,6 +9,7 @@ package org.opensearch.cluster.routing; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingResponse; import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingResponse; import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingResponse; import org.opensearch.common.settings.Settings; @@ -284,4 +285,67 @@ public void testWeightedRoutingMetadataOnOSProcessRestart() throws Exception { ensureGreen(); assertNotNull(internalCluster().clusterService().state().metadata().weightedRoutingMetadata()); } + + public void testDeleteWeightedRouting_WeightsNotSet() { + Settings commonSettings = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .build(); + + internalCluster().startNodes( + Settings.builder().put(commonSettings).put("node.attr.zone", "a").build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "b").build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "c").build() + ); + + logger.info("--> waiting for nodes to form a cluster"); + ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("3").execute().actionGet(); + assertThat(health.isTimedOut(), equalTo(false)); + + ensureGreen(); + + assertNull(internalCluster().clusterService().state().metadata().weightedRoutingMetadata()); + + // delete weighted routing metadata + ClusterDeleteWeightedRoutingResponse deleteResponse = client().admin().cluster().prepareDeleteWeightedRouting().get(); + assertTrue(deleteResponse.isAcknowledged()); + assertNull(internalCluster().clusterService().state().metadata().weightedRoutingMetadata()); + } + + public void testDeleteWeightedRouting_WeightsAreSet() { + Settings commonSettings = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .build(); + + internalCluster().startNodes( + Settings.builder().put(commonSettings).put("node.attr.zone", "a").build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "b").build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "c").build() + ); + + logger.info("--> waiting for nodes to form a cluster"); + ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("3").execute().actionGet(); + assertThat(health.isTimedOut(), equalTo(false)); + + ensureGreen(); + + logger.info("--> setting shard routing weights for weighted round robin"); + Map weights = Map.of("a", 1.0, "b", 2.0, "c", 3.0); + WeightedRouting weightedRouting = new WeightedRouting("zone", weights); + + // put api call to set weights + ClusterPutWeightedRoutingResponse response = client().admin() + .cluster() + .prepareWeightedRouting() + .setWeightedRouting(weightedRouting) + .get(); + assertEquals(response.isAcknowledged(), true); + assertNotNull(internalCluster().clusterService().state().metadata().weightedRoutingMetadata()); + + // delete weighted routing metadata + ClusterDeleteWeightedRoutingResponse deleteResponse = client().admin().cluster().prepareDeleteWeightedRouting().get(); + assertTrue(deleteResponse.isAcknowledged()); + assertNull(internalCluster().clusterService().state().metadata().weightedRoutingMetadata()); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java new file mode 100644 index 0000000000000..097775b7ab4ac --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java @@ -0,0 +1,161 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search; + +import org.junit.Assert; +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.action.admin.cluster.node.stats.NodeStats; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingResponse; +import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingResponse; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.WeightedRouting; +import org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.index.search.stats.SearchStats; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0, minNumDataNodes = 3) +public class SearchWeightedRoutingIT extends OpenSearchIntegTestCase { + @Override + protected int numberOfReplicas() { + return 2; + } + + public void testSearchWithWRRShardRouting() throws IOException { + Settings commonSettings = Settings.builder() + .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "zone" + ".values", "a,b,c") + .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), "zone") + .build(); + + logger.info("--> starting 6 nodes on different zones"); + List nodes = internalCluster().startNodes( + Settings.builder().put(commonSettings).put("node.attr.zone", "a").build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "b").build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "b").build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "a").build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "c").build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "c").build() + ); + + String A_0 = nodes.get(0); + String B_0 = nodes.get(1); + String B_1 = nodes.get(2); + String A_1 = nodes.get(3); + String C_0 = nodes.get(4); + String C_1 = nodes.get(5); + + logger.info("--> waiting for nodes to form a cluster"); + ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("6").execute().actionGet(); + assertThat(health.isTimedOut(), equalTo(false)); + + assertAcked( + prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 10).put("index.number_of_replicas", 2)) + ); + ensureGreen(); + logger.info("--> creating indices for test"); + for (int i = 0; i < 100; i++) { + client().prepareIndex("test_" + i).setId("" + i).setSource("field_" + i, "value_" + i).get(); + } + + logger.info("--> setting shard routing weights for weighted round robin"); + Map weights = Map.of("a", 1.0, "b", 1.0, "c", 0.0); + WeightedRouting weightedRouting = new WeightedRouting("zone", weights); + + ClusterPutWeightedRoutingResponse response = client().admin() + .cluster() + .prepareWeightedRouting() + .setWeightedRouting(weightedRouting) + .get(); + assertEquals(response.isAcknowledged(), true); + + Set hitNodes = new HashSet<>(); + // making search requests + for (int i = 0; i < 50; i++) { + SearchResponse searchResponse = internalCluster().client(randomFrom(A_0, A_1, B_0, B_1)) + .prepareSearch() + .setQuery(QueryBuilders.matchAllQuery()) + .get(); + assertEquals(searchResponse.getFailedShards(), 0); + for (int j = 0; j < searchResponse.getHits().getHits().length; j++) { + hitNodes.add(searchResponse.getHits().getAt(j).getShard().getNodeId()); + } + } + // search should not go to nodes in zone c + assertThat(hitNodes.size(), lessThanOrEqualTo(4)); + DiscoveryNodes dataNodes = internalCluster().clusterService().state().nodes(); + List nodeIdsFromZoneWithWeightZero = new ArrayList<>(); + for (DiscoveryNode node : dataNodes) { + if (node.getAttributes().get("zone").equals("c")) { + nodeIdsFromZoneWithWeightZero.add(node.getId()); + } + } + for (String nodeId : nodeIdsFromZoneWithWeightZero) { + assertFalse(hitNodes.contains(nodeId)); + } + + NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().execute().actionGet(); + for (NodeStats stat : nodeStats.getNodes()) { + SearchStats.Stats searchStats = stat.getIndices().getSearch().getTotal(); + if (stat.getNode().getAttributes().get("zone").equals("c")) { + assertEquals(0, searchStats.getQueryCount()); + assertEquals(0, searchStats.getFetchCount()); + + } else { + Assert.assertTrue(searchStats.getQueryCount() > 0L); + Assert.assertTrue(searchStats.getFetchCount() > 0L); + } + } + + logger.info("--> deleted shard routing weights for weighted round robin"); + + ClusterDeleteWeightedRoutingResponse deleteResponse = client().admin().cluster().prepareDeleteWeightedRouting().get(); + assertEquals(deleteResponse.isAcknowledged(), true); + + hitNodes = new HashSet<>(); + // making search requests + for (int i = 0; i < 100; i++) { + SearchResponse searchResponse = internalCluster().client(randomFrom(A_0, A_1, B_0, B_1)) + .prepareSearch() + .setQuery(QueryBuilders.matchAllQuery()) + .get(); + assertEquals(searchResponse.getFailedShards(), 0); + for (int j = 0; j < searchResponse.getHits().getHits().length; j++) { + hitNodes.add(searchResponse.getHits().getAt(j).getShard().getNodeId()); + } + } + + // Check shard routing requests hit data nodes in zone c + for (String nodeId : nodeIdsFromZoneWithWeightZero) { + assertFalse(!hitNodes.contains(nodeId)); + } + nodeStats = client().admin().cluster().prepareNodesStats().execute().actionGet(); + + for (NodeStats stat : nodeStats.getNodes()) { + SearchStats.Stats searchStats = stat.getIndices().getSearch().getTotal(); + Assert.assertTrue(searchStats.getQueryCount() > 0L); + Assert.assertTrue(searchStats.getFetchCount() > 0L); + } + } + +} diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index c2c11d69f134c..84bc9b395c5dc 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -85,6 +85,8 @@ import org.opensearch.action.admin.cluster.settings.TransportClusterUpdateSettingsAction; import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsAction; import org.opensearch.action.admin.cluster.shards.TransportClusterSearchShardsAction; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingAction; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.TransportDeleteWeightedRoutingAction; import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingAction; import org.opensearch.action.admin.cluster.shards.routing.weighted.get.TransportGetWeightedRoutingAction; import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterAddWeightedRoutingAction; @@ -302,6 +304,7 @@ import org.opensearch.rest.action.admin.cluster.RestClearVotingConfigExclusionsAction; import org.opensearch.rest.action.admin.cluster.RestCloneSnapshotAction; import org.opensearch.rest.action.admin.cluster.RestClusterAllocationExplainAction; +import org.opensearch.rest.action.admin.cluster.RestClusterDeleteWeightedRoutingAction; import org.opensearch.rest.action.admin.cluster.RestClusterGetSettingsAction; import org.opensearch.rest.action.admin.cluster.RestClusterGetWeightedRoutingAction; import org.opensearch.rest.action.admin.cluster.RestClusterHealthAction; @@ -580,6 +583,7 @@ public void reg actions.register(ClusterAddWeightedRoutingAction.INSTANCE, TransportAddWeightedRoutingAction.class); actions.register(ClusterGetWeightedRoutingAction.INSTANCE, TransportGetWeightedRoutingAction.class); + actions.register(ClusterDeleteWeightedRoutingAction.INSTANCE, TransportDeleteWeightedRoutingAction.class); actions.register(IndicesStatsAction.INSTANCE, TransportIndicesStatsAction.class); actions.register(IndicesSegmentsAction.INSTANCE, TransportIndicesSegmentsAction.class); actions.register(IndicesShardStoresAction.INSTANCE, TransportIndicesShardStoresAction.class); @@ -766,8 +770,10 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestCloseIndexAction()); registerHandler.accept(new RestOpenIndexAction()); registerHandler.accept(new RestAddIndexBlockAction()); + registerHandler.accept(new RestClusterPutWeightedRoutingAction()); registerHandler.accept(new RestClusterGetWeightedRoutingAction()); + registerHandler.accept(new RestClusterDeleteWeightedRoutingAction()); registerHandler.accept(new RestUpdateSettingsAction()); registerHandler.accept(new RestGetSettingsAction()); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingAction.java new file mode 100644 index 0000000000000..aa438cd31b934 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingAction.java @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.shards.routing.weighted.delete; + +import org.opensearch.action.ActionType; + +/** + * Action to delete weights for weighted round-robin shard routing policy. + * + * @opensearch.internal + */ +public class ClusterDeleteWeightedRoutingAction extends ActionType { + public static final ClusterDeleteWeightedRoutingAction INSTANCE = new ClusterDeleteWeightedRoutingAction(); + public static final String NAME = "cluster:admin/routing/awareness/weights/delete"; + + private ClusterDeleteWeightedRoutingAction() { + super(NAME, ClusterDeleteWeightedRoutingResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequest.java new file mode 100644 index 0000000000000..71eab8ff35a2d --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequest.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.shards.routing.weighted.delete; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Request to delete weights for weighted round-robin shard routing policy. + * + * @opensearch.internal + */ +public class ClusterDeleteWeightedRoutingRequest extends ClusterManagerNodeRequest { + public ClusterDeleteWeightedRoutingRequest() {} + + public ClusterDeleteWeightedRoutingRequest(StreamInput in) throws IOException { + super(in); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + } + + @Override + public String toString() { + return "ClusterDeleteWeightedRoutingRequest"; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequestBuilder.java new file mode 100644 index 0000000000000..19976ac6b07aa --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequestBuilder.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.shards.routing.weighted.delete; + +import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; +import org.opensearch.client.OpenSearchClient; + +/** + * Request builder to delete weights for weighted round-robin shard routing policy. + * + * @opensearch.internal + */ +public class ClusterDeleteWeightedRoutingRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< + ClusterDeleteWeightedRoutingRequest, + ClusterDeleteWeightedRoutingResponse, + ClusterDeleteWeightedRoutingRequestBuilder> { + + public ClusterDeleteWeightedRoutingRequestBuilder(OpenSearchClient client, ClusterDeleteWeightedRoutingAction action) { + super(client, action, new ClusterDeleteWeightedRoutingRequest()); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingResponse.java new file mode 100644 index 0000000000000..b98ac6c0c55be --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingResponse.java @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.shards.routing.weighted.delete; + +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Response from deleting weights for weighted round-robin search routing policy. + * + * @opensearch.internal + */ +public class ClusterDeleteWeightedRoutingResponse extends AcknowledgedResponse { + + ClusterDeleteWeightedRoutingResponse(StreamInput in) throws IOException { + super(in); + } + + public ClusterDeleteWeightedRoutingResponse(boolean acknowledged) { + super(acknowledged); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/TransportDeleteWeightedRoutingAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/TransportDeleteWeightedRoutingAction.java new file mode 100644 index 0000000000000..8f88d8af71b70 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/TransportDeleteWeightedRoutingAction.java @@ -0,0 +1,86 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.shards.routing.weighted.delete; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.ActionListener; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.routing.WeightedRoutingService; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; + +/** + * Transport action for deleting weights for weighted round-robin search routing policy + * + * @opensearch.internal + */ +public class TransportDeleteWeightedRoutingAction extends TransportClusterManagerNodeAction< + ClusterDeleteWeightedRoutingRequest, + ClusterDeleteWeightedRoutingResponse> { + + private static final Logger logger = LogManager.getLogger(TransportDeleteWeightedRoutingAction.class); + + private final WeightedRoutingService weightedRoutingService; + + @Inject + public TransportDeleteWeightedRoutingAction( + TransportService transportService, + ClusterService clusterService, + WeightedRoutingService weightedRoutingService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super( + ClusterDeleteWeightedRoutingAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + ClusterDeleteWeightedRoutingRequest::new, + indexNameExpressionResolver + ); + this.weightedRoutingService = weightedRoutingService; + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected ClusterDeleteWeightedRoutingResponse read(StreamInput in) throws IOException { + return new ClusterDeleteWeightedRoutingResponse(in); + } + + @Override + protected ClusterBlockException checkBlock(ClusterDeleteWeightedRoutingRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + + @Override + protected void clusterManagerOperation( + ClusterDeleteWeightedRoutingRequest request, + ClusterState state, + ActionListener listener + ) throws Exception { + weightedRoutingService.deleteWeightedRoutingMetadata(request, listener); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/package-info.java new file mode 100644 index 0000000000000..d24c88ec674f3 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** delete weighted-round robin shard routing weights. */ +package org.opensearch.action.admin.cluster.shards.routing.weighted.delete; diff --git a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java index 5551c45ba9e57..77ddb5e17c742 100644 --- a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java +++ b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java @@ -95,6 +95,9 @@ import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsRequest; import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsRequestBuilder; import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsResponse; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingRequest; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingRequestBuilder; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingResponse; import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingRequest; import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingRequestBuilder; import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingResponse; @@ -837,6 +840,21 @@ public interface ClusterAdminClient extends OpenSearchClient { */ ClusterGetWeightedRoutingRequestBuilder prepareGetWeightedRouting(); + /** + * Deletes weights for weighted round-robin search routing policy. + */ + ActionFuture deleteWeightedRouting(ClusterDeleteWeightedRoutingRequest request); + + /** + * Deletes weights for weighted round-robin search routing policy. + */ + void deleteWeightedRouting(ClusterDeleteWeightedRoutingRequest request, ActionListener listener); + + /** + * Deletes weights for weighted round-robin search routing policy. + */ + ClusterDeleteWeightedRoutingRequestBuilder prepareDeleteWeightedRouting(); + /** * Decommission awareness attribute */ diff --git a/server/src/main/java/org/opensearch/client/Requests.java b/server/src/main/java/org/opensearch/client/Requests.java index 87219dda49825..21f2a2d906602 100644 --- a/server/src/main/java/org/opensearch/client/Requests.java +++ b/server/src/main/java/org/opensearch/client/Requests.java @@ -52,6 +52,7 @@ import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsRequest; import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingRequest; import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingRequest; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingRequest; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; import org.opensearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; @@ -572,6 +573,15 @@ public static ClusterGetWeightedRoutingRequest getWeightedRoutingRequest(String return new ClusterGetWeightedRoutingRequest(attributeName); } + /** + * Deletes weights for weighted round-robin search routing policy + * + * @return delete weight request + */ + public static ClusterDeleteWeightedRoutingRequest deleteWeightedRoutingRequest() { + return new ClusterDeleteWeightedRoutingRequest(); + } + /** * Creates a new decommission request. * diff --git a/server/src/main/java/org/opensearch/client/support/AbstractClient.java b/server/src/main/java/org/opensearch/client/support/AbstractClient.java index bd82299725435..b42010d4253d5 100644 --- a/server/src/main/java/org/opensearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/opensearch/client/support/AbstractClient.java @@ -122,6 +122,10 @@ import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsRequest; import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsRequestBuilder; import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsResponse; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingAction; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingRequest; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingRequestBuilder; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingResponse; import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingAction; import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingRequest; import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingRequestBuilder; @@ -1328,6 +1332,24 @@ public ClusterGetWeightedRoutingRequestBuilder prepareGetWeightedRouting() { return new ClusterGetWeightedRoutingRequestBuilder(this, ClusterGetWeightedRoutingAction.INSTANCE); } + @Override + public ActionFuture deleteWeightedRouting(ClusterDeleteWeightedRoutingRequest request) { + return execute(ClusterDeleteWeightedRoutingAction.INSTANCE, request); + } + + @Override + public void deleteWeightedRouting( + ClusterDeleteWeightedRoutingRequest request, + ActionListener listener + ) { + execute(ClusterDeleteWeightedRoutingAction.INSTANCE, request, listener); + } + + @Override + public ClusterDeleteWeightedRoutingRequestBuilder prepareDeleteWeightedRouting() { + return new ClusterDeleteWeightedRoutingRequestBuilder(this, ClusterDeleteWeightedRoutingAction.INSTANCE); + } + @Override public void deleteDanglingIndex(DeleteDanglingIndexRequest request, ActionListener listener) { execute(DeleteDanglingIndexAction.INSTANCE, request, listener); diff --git a/server/src/main/java/org/opensearch/cluster/routing/WeightedRoutingService.java b/server/src/main/java/org/opensearch/cluster/routing/WeightedRoutingService.java index 7ff2b23630a3c..54f2c81aea384 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/WeightedRoutingService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/WeightedRoutingService.java @@ -12,7 +12,9 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.action.ActionListener; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingRequest; import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingResponse; import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingRequest; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; @@ -106,6 +108,34 @@ private boolean checkIfSameWeightsInMetadata( return newWeightedRoutingMetadata.getWeightedRouting().equals(oldWeightedRoutingMetadata.getWeightedRouting()); } + public void deleteWeightedRoutingMetadata( + final ClusterDeleteWeightedRoutingRequest request, + final ActionListener listener + ) { + clusterService.submitStateUpdateTask("delete_weighted_routing", new ClusterStateUpdateTask(Priority.URGENT) { + @Override + public ClusterState execute(ClusterState currentState) { + logger.info("Deleting weighted routing metadata from the cluster state"); + Metadata.Builder mdBuilder = Metadata.builder(currentState.metadata()); + mdBuilder.removeCustom(WeightedRoutingMetadata.TYPE); + return ClusterState.builder(currentState).metadata(mdBuilder).build(); + } + + @Override + public void onFailure(String source, Exception e) { + logger.error("failed to remove weighted routing metadata from cluster state", e); + listener.onFailure(e); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + logger.debug("cluster weighted routing metadata change is processed by all the nodes"); + assert newState.metadata().weightedRoutingMetadata() == null; + listener.onResponse(new ClusterDeleteWeightedRoutingResponse(true)); + } + }); + } + List getAwarenessAttributes() { return awarenessAttributes; } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterDeleteWeightedRoutingAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterDeleteWeightedRoutingAction.java new file mode 100644 index 0000000000000..9742cc373d520 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterDeleteWeightedRoutingAction.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.admin.cluster; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingRequest; +import org.opensearch.client.Requests; +import org.opensearch.client.node.NodeClient; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestToXContentListener; + +import java.io.IOException; +import java.util.List; + +import static java.util.Collections.singletonList; +import static org.opensearch.rest.RestRequest.Method.DELETE; + +/** + * Delete Weighted Round Robin based shard routing weights + * + * @opensearch.api + * + */ +public class RestClusterDeleteWeightedRoutingAction extends BaseRestHandler { + + private static final Logger logger = LogManager.getLogger(RestClusterDeleteWeightedRoutingAction.class); + + @Override + public List routes() { + return singletonList(new Route(DELETE, "/_cluster/routing/awareness/weights")); + } + + @Override + public String getName() { + return "delete_weighted_routing_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + ClusterDeleteWeightedRoutingRequest clusterDeleteWeightedRoutingRequest = Requests.deleteWeightedRoutingRequest(); + return channel -> client.admin() + .cluster() + .deleteWeightedRouting(clusterDeleteWeightedRoutingRequest, new RestToXContentListener<>(channel)); + } +} diff --git a/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java b/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java index 557c5c7ac910d..fc5d46ef84c79 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java @@ -12,7 +12,9 @@ import org.junit.Before; import org.opensearch.Version; import org.opensearch.action.ActionListener; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingRequest; import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingResponse; import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterAddWeightedRoutingAction; import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingRequestBuilder; import org.opensearch.client.node.NodeClient; @@ -233,6 +235,32 @@ public void onFailure(Exception e) { assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); } + public void testDeleteWeightedRoutingMetadata() throws InterruptedException { + Map weights = Map.of("zone_A", 1.0, "zone_B", 1.0, "zone_C", 1.0); + ClusterState state = clusterService.state(); + state = setWeightedRoutingWeights(state, weights); + ClusterState.Builder builder = ClusterState.builder(state); + ClusterServiceUtils.setState(clusterService, builder); + + ClusterDeleteWeightedRoutingRequest clusterDeleteWeightedRoutingRequest = new ClusterDeleteWeightedRoutingRequest(); + final CountDownLatch countDownLatch = new CountDownLatch(1); + ActionListener listener = new ActionListener() { + @Override + public void onResponse(ClusterDeleteWeightedRoutingResponse clusterDeleteWeightedRoutingResponse) { + assertTrue(clusterDeleteWeightedRoutingResponse.isAcknowledged()); + assertNull(clusterService.state().metadata().weightedRoutingMetadata()); + countDownLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + fail("on failure shouldn't have been called"); + } + }; + weightedRoutingService.deleteWeightedRoutingMetadata(clusterDeleteWeightedRoutingRequest, listener); + assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); + } + public void testVerifyAwarenessAttribute_InvalidAttributeName() { assertThrows( "invalid awareness attribute %s requested for updating weighted routing", From 12f26d3d10c413aae6b1abffbe384169fcaea0f7 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Thu, 13 Oct 2022 16:36:59 -0400 Subject: [PATCH 24/39] Update Jackson Databind to 2.13.4.2 (addressing CVE-2022-42003) (#4779) Signed-off-by: Andriy Redko Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + buildSrc/version.properties | 2 +- .../upgrade-cli/licenses/jackson-databind-2.13.4.2.jar.sha1 | 1 + .../tools/upgrade-cli/licenses/jackson-databind-2.13.4.jar.sha1 | 1 - .../ingest-geoip/licenses/jackson-databind-2.13.4.2.jar.sha1 | 1 + modules/ingest-geoip/licenses/jackson-databind-2.13.4.jar.sha1 | 1 - .../discovery-ec2/licenses/jackson-databind-2.13.4.2.jar.sha1 | 1 + plugins/discovery-ec2/licenses/jackson-databind-2.13.4.jar.sha1 | 1 - .../licenses/jackson-databind-2.13.4.2.jar.sha1 | 1 + .../repository-azure/licenses/jackson-databind-2.13.4.jar.sha1 | 1 - .../repository-hdfs/licenses/jackson-databind-2.13.4.2.jar.sha1 | 1 + .../repository-hdfs/licenses/jackson-databind-2.13.4.jar.sha1 | 1 - .../repository-s3/licenses/jackson-databind-2.13.4.2.jar.sha1 | 1 + plugins/repository-s3/licenses/jackson-databind-2.13.4.jar.sha1 | 1 - 14 files changed, 8 insertions(+), 7 deletions(-) create mode 100644 distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.4.2.jar.sha1 delete mode 100644 distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.4.jar.sha1 create mode 100644 modules/ingest-geoip/licenses/jackson-databind-2.13.4.2.jar.sha1 delete mode 100644 modules/ingest-geoip/licenses/jackson-databind-2.13.4.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/jackson-databind-2.13.4.2.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/jackson-databind-2.13.4.jar.sha1 create mode 100644 plugins/repository-azure/licenses/jackson-databind-2.13.4.2.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/jackson-databind-2.13.4.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/jackson-databind-2.13.4.2.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/jackson-databind-2.13.4.jar.sha1 create mode 100644 plugins/repository-s3/licenses/jackson-databind-2.13.4.2.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/jackson-databind-2.13.4.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 8e580b2b101af..f3d6c2e05553e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -54,6 +54,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Bumps `jempbox` from 1.8.16 to 1.8.17 ([#4550](https://github.com/opensearch-project/OpenSearch/pull/4550)) - Bumps `hadoop-hdfs` from 3.3.3 to 3.3.4 ([#4644](https://github.com/opensearch-project/OpenSearch/pull/4644)) - Bumps `jna` from 5.11.0 to 5.12.1 ([#4656](https://github.com/opensearch-project/OpenSearch/pull/4656)) +- Update Jackson Databind to 2.13.4.2 (addressing CVE-2022-42003) ([#4779](https://github.com/opensearch-project/OpenSearch/pull/4779)) ### Changed - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) - Use RemoteSegmentStoreDirectory instead of RemoteDirectory ([#4240](https://github.com/opensearch-project/OpenSearch/pull/4240)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index a779389b3ca82..08784c82a4cc4 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -10,7 +10,7 @@ bundled_jdk = 17.0.4+8 spatial4j = 0.7 jts = 1.15.0 jackson = 2.13.4 -jackson_databind = 2.13.4 +jackson_databind = 2.13.4.2 snakeyaml = 1.32 icu4j = 70.1 supercsv = 2.4.0 diff --git a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.4.2.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.4.2.jar.sha1 new file mode 100644 index 0000000000000..a7782e8aac18d --- /dev/null +++ b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.4.2.jar.sha1 @@ -0,0 +1 @@ +325c06bdfeb628cfb80ebaaf1a26cc1eb558a585 \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.4.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.4.jar.sha1 deleted file mode 100644 index fcc6491d1f78d..0000000000000 --- a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -98b0edfa8e4084078f10b7b356c300ded4a71491 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-databind-2.13.4.2.jar.sha1 b/modules/ingest-geoip/licenses/jackson-databind-2.13.4.2.jar.sha1 new file mode 100644 index 0000000000000..a7782e8aac18d --- /dev/null +++ b/modules/ingest-geoip/licenses/jackson-databind-2.13.4.2.jar.sha1 @@ -0,0 +1 @@ +325c06bdfeb628cfb80ebaaf1a26cc1eb558a585 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-databind-2.13.4.jar.sha1 b/modules/ingest-geoip/licenses/jackson-databind-2.13.4.jar.sha1 deleted file mode 100644 index fcc6491d1f78d..0000000000000 --- a/modules/ingest-geoip/licenses/jackson-databind-2.13.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -98b0edfa8e4084078f10b7b356c300ded4a71491 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.13.4.2.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.13.4.2.jar.sha1 new file mode 100644 index 0000000000000..a7782e8aac18d --- /dev/null +++ b/plugins/discovery-ec2/licenses/jackson-databind-2.13.4.2.jar.sha1 @@ -0,0 +1 @@ +325c06bdfeb628cfb80ebaaf1a26cc1eb558a585 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.13.4.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.13.4.jar.sha1 deleted file mode 100644 index fcc6491d1f78d..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-databind-2.13.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -98b0edfa8e4084078f10b7b356c300ded4a71491 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-databind-2.13.4.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-databind-2.13.4.2.jar.sha1 new file mode 100644 index 0000000000000..a7782e8aac18d --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-databind-2.13.4.2.jar.sha1 @@ -0,0 +1 @@ +325c06bdfeb628cfb80ebaaf1a26cc1eb558a585 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-databind-2.13.4.jar.sha1 b/plugins/repository-azure/licenses/jackson-databind-2.13.4.jar.sha1 deleted file mode 100644 index fcc6491d1f78d..0000000000000 --- a/plugins/repository-azure/licenses/jackson-databind-2.13.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -98b0edfa8e4084078f10b7b356c300ded4a71491 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/jackson-databind-2.13.4.2.jar.sha1 b/plugins/repository-hdfs/licenses/jackson-databind-2.13.4.2.jar.sha1 new file mode 100644 index 0000000000000..a7782e8aac18d --- /dev/null +++ b/plugins/repository-hdfs/licenses/jackson-databind-2.13.4.2.jar.sha1 @@ -0,0 +1 @@ +325c06bdfeb628cfb80ebaaf1a26cc1eb558a585 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/jackson-databind-2.13.4.jar.sha1 b/plugins/repository-hdfs/licenses/jackson-databind-2.13.4.jar.sha1 deleted file mode 100644 index fcc6491d1f78d..0000000000000 --- a/plugins/repository-hdfs/licenses/jackson-databind-2.13.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -98b0edfa8e4084078f10b7b356c300ded4a71491 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-databind-2.13.4.2.jar.sha1 b/plugins/repository-s3/licenses/jackson-databind-2.13.4.2.jar.sha1 new file mode 100644 index 0000000000000..a7782e8aac18d --- /dev/null +++ b/plugins/repository-s3/licenses/jackson-databind-2.13.4.2.jar.sha1 @@ -0,0 +1 @@ +325c06bdfeb628cfb80ebaaf1a26cc1eb558a585 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-databind-2.13.4.jar.sha1 b/plugins/repository-s3/licenses/jackson-databind-2.13.4.jar.sha1 deleted file mode 100644 index fcc6491d1f78d..0000000000000 --- a/plugins/repository-s3/licenses/jackson-databind-2.13.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -98b0edfa8e4084078f10b7b356c300ded4a71491 \ No newline at end of file From 6bae823cefad7373a748ce833ce75d80f48b380b Mon Sep 17 00:00:00 2001 From: Ayush Kataria <31301636+ayushKataria@users.noreply.github.com> Date: Fri, 14 Oct 2022 04:30:50 +0530 Subject: [PATCH 25/39] =?UTF-8?q?Use=20ReplicationFailedException=20instea?= =?UTF-8?q?d=20of=20OpensearchException=20in=20Repl=E2=80=A6=20(#4725)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Use ReplicationFailedException instead of OpensearchException in ReplicationTarget Signed-off-by: Ayush Kataria * CHANGELOG.md updated Signed-off-by: Ayush Kataria * test fixes Signed-off-by: Ayush Kataria * spotless fix Signed-off-by: Ayush Kataria * spotless fix Signed-off-by: Ayush Kataria * fixes for failing test as suggested in PR comments Signed-off-by: Ayush Kataria Signed-off-by: Ayush Kataria --- CHANGELOG.md | 1 + .../indices/recovery/RecoveryFailedException.java | 4 ++-- .../indices/recovery/RecoveryListener.java | 4 ++-- .../opensearch/indices/recovery/RecoveryTarget.java | 8 ++++---- .../replication/SegmentReplicationTarget.java | 9 +++------ .../SegmentReplicationTargetService.java | 13 +++++++------ .../replication/common/ReplicationCollection.java | 8 +++----- .../common/ReplicationFailedException.java | 12 ++++++++++++ .../replication/common/ReplicationListener.java | 4 +--- .../replication/common/ReplicationTarget.java | 7 +++---- .../shard/SegmentReplicationIndexShardTests.java | 5 ++--- .../opensearch/indices/recovery/RecoveryTests.java | 4 ++-- .../SegmentReplicationTargetServiceTests.java | 5 +++-- .../replication/SegmentReplicationTargetTests.java | 12 ++++++------ .../recovery/ReplicationCollectionTests.java | 5 +++-- .../opensearch/index/shard/IndexShardTestCase.java | 10 +++++++--- 16 files changed, 61 insertions(+), 50 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f3d6c2e05553e..3a8bee55d881d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -78,6 +78,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Update to Apache Lucene 9.4.0 ([#4661](https://github.com/opensearch-project/OpenSearch/pull/4661)) - Controlling discovery for decommissioned nodes ([#4590](https://github.com/opensearch-project/OpenSearch/pull/4590)) - Backport Apache Lucene version change for 2.4.0 ([#4677](https://github.com/opensearch-project/OpenSearch/pull/4677)) +- Use ReplicationFailedException instead of OpensearchException in ReplicationTarget ([#4725](https://github.com/opensearch-project/OpenSearch/pull/4725)) - Fix weighted routing metadata deserialization error on process restart ([#4691](https://github.com/opensearch-project/OpenSearch/pull/4691)) - Refactor Base Action class javadocs to OpenSearch.API ([#4732](https://github.com/opensearch-project/OpenSearch/pull/4732)) - Migrate client transports to Apache HttpClient / Core 5.x ([#4459](https://github.com/opensearch-project/OpenSearch/pull/4459)) diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryFailedException.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryFailedException.java index f2665e8bbb1a7..12393ab12c95d 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryFailedException.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryFailedException.java @@ -32,11 +32,11 @@ package org.opensearch.indices.recovery; -import org.opensearch.OpenSearchException; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Nullable; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.index.shard.ShardId; +import org.opensearch.indices.replication.common.ReplicationFailedException; import java.io.IOException; @@ -45,7 +45,7 @@ * * @opensearch.internal */ -public class RecoveryFailedException extends OpenSearchException { +public class RecoveryFailedException extends ReplicationFailedException { public RecoveryFailedException(StartRecoveryRequest request, Throwable cause) { this(request, null, cause); diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryListener.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryListener.java index b93c054ffa4bf..c8c2fbfc896eb 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryListener.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryListener.java @@ -8,9 +8,9 @@ package org.opensearch.indices.recovery; -import org.opensearch.OpenSearchException; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.indices.cluster.IndicesClusterStateService; +import org.opensearch.indices.replication.common.ReplicationFailedException; import org.opensearch.indices.replication.common.ReplicationListener; import org.opensearch.indices.replication.common.ReplicationState; @@ -49,7 +49,7 @@ public void onDone(ReplicationState state) { } @Override - public void onFailure(ReplicationState state, OpenSearchException e, boolean sendShardFailure) { + public void onFailure(ReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { indicesClusterStateService.handleRecoveryFailure(shardRouting, sendShardFailure, e); } } diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java index 7acc6b8b54fdd..c1e29e0d866d8 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java @@ -37,7 +37,6 @@ import org.apache.lucene.index.IndexFormatTooOldException; import org.opensearch.Assertions; import org.opensearch.ExceptionsHelper; -import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.cluster.node.DiscoveryNode; @@ -56,10 +55,11 @@ import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.index.translog.Translog; +import org.opensearch.indices.replication.common.ReplicationCollection; +import org.opensearch.indices.replication.common.ReplicationFailedException; +import org.opensearch.indices.replication.common.ReplicationListener; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; import org.opensearch.indices.replication.common.ReplicationTarget; -import org.opensearch.indices.replication.common.ReplicationListener; -import org.opensearch.indices.replication.common.ReplicationCollection; import java.io.IOException; import java.nio.channels.FileChannel; @@ -135,7 +135,7 @@ public String description() { } @Override - public void notifyListener(OpenSearchException e, boolean sendShardFailure) { + public void notifyListener(ReplicationFailedException e, boolean sendShardFailure) { listener.onFailure(state(), new RecoveryFailedException(state(), e.getMessage(), e), sendShardFailure); } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java index 26bec2203c599..c12d9b7165ae0 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java @@ -18,7 +18,6 @@ import org.apache.lucene.store.ByteBuffersIndexInput; import org.apache.lucene.store.ChecksumIndexInput; import org.opensearch.ExceptionsHelper; -import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.StepListener; import org.opensearch.common.UUIDs; @@ -105,16 +104,14 @@ public String description() { } @Override - public void notifyListener(OpenSearchException e, boolean sendShardFailure) { + public void notifyListener(ReplicationFailedException e, boolean sendShardFailure) { // Cancellations still are passed to our SegmentReplicationListner as failures, if we have failed because of cancellation // update the stage. final Throwable cancelledException = ExceptionsHelper.unwrap(e, CancellableThreads.ExecutionCancelledException.class); if (cancelledException != null) { state.setStage(SegmentReplicationState.Stage.CANCELLED); - listener.onFailure(state(), (CancellableThreads.ExecutionCancelledException) cancelledException, sendShardFailure); - } else { - listener.onFailure(state(), e, sendShardFailure); } + listener.onFailure(state(), e, sendShardFailure); } @Override @@ -150,7 +147,7 @@ public void startReplication(ActionListener listener) { // SegmentReplicationSource does not share CancellableThreads. final CancellableThreads.ExecutionCancelledException executionCancelledException = new CancellableThreads.ExecutionCancelledException("replication was canceled reason [" + reason + "]"); - notifyListener(executionCancelledException, false); + notifyListener(new ReplicationFailedException("Segment replication failed", executionCancelledException), false); throw executionCancelledException; }); state.setStage(SegmentReplicationState.Stage.REPLICATING); diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java index 8fc53ccd3bc08..b633f0fa3b9a0 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java @@ -12,7 +12,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.ExceptionsHelper; -import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.Nullable; @@ -27,6 +26,7 @@ import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.ReplicationCollection; import org.opensearch.indices.replication.common.ReplicationCollection.ReplicationRef; +import org.opensearch.indices.replication.common.ReplicationFailedException; import org.opensearch.indices.replication.common.ReplicationListener; import org.opensearch.indices.replication.common.ReplicationState; import org.opensearch.tasks.Task; @@ -196,7 +196,7 @@ public void onReplicationDone(SegmentReplicationState state) { } @Override - public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { + public void onReplicationFailure(SegmentReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { logger.trace( () -> new ParameterizedMessage( "[shardId {}] [replication id {}] Replication failed, timing data: {}", @@ -249,13 +249,13 @@ default void onDone(ReplicationState state) { } @Override - default void onFailure(ReplicationState state, OpenSearchException e, boolean sendShardFailure) { + default void onFailure(ReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { onReplicationFailure((SegmentReplicationState) state, e, sendShardFailure); } void onReplicationDone(SegmentReplicationState state); - void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure); + void onReplicationFailure(SegmentReplicationState state, ReplicationFailedException e, boolean sendShardFailure); } /** @@ -293,13 +293,14 @@ public void onFailure(Exception e) { Throwable cause = ExceptionsHelper.unwrapCause(e); if (cause instanceof CancellableThreads.ExecutionCancelledException) { if (onGoingReplications.getTarget(replicationId) != null) { + IndexShard indexShard = onGoingReplications.getTarget(replicationId).indexShard(); // if the target still exists in our collection, the primary initiated the cancellation, fail the replication // but do not fail the shard. Cancellations initiated by this node from Index events will be removed with // onGoingReplications.cancel and not appear in the collection when this listener resolves. - onGoingReplications.fail(replicationId, (CancellableThreads.ExecutionCancelledException) cause, false); + onGoingReplications.fail(replicationId, new ReplicationFailedException(indexShard, cause), false); } } else { - onGoingReplications.fail(replicationId, new OpenSearchException("Segment Replication failed", e), true); + onGoingReplications.fail(replicationId, new ReplicationFailedException("Segment Replication failed", e), true); } } }); diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java index 20600856c9444..e918ac0a79691 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java @@ -34,8 +34,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.OpenSearchException; -import org.opensearch.OpenSearchTimeoutException; import org.opensearch.common.concurrent.AutoCloseableRefCounted; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; @@ -134,7 +132,7 @@ public T reset(final long id, final TimeValue activityTimeout) { } catch (Exception e) { // fail shard to be safe assert oldTarget != null; - oldTarget.notifyListener(new OpenSearchException("Unable to reset target", e), true); + oldTarget.notifyListener(new ReplicationFailedException("Unable to reset target", e), true); return null; } } @@ -187,7 +185,7 @@ public boolean cancel(long id, String reason) { * @param e exception with reason for the failure * @param sendShardFailure true a shard failed message should be sent to the master */ - public void fail(long id, OpenSearchException e, boolean sendShardFailure) { + public void fail(long id, ReplicationFailedException e, boolean sendShardFailure) { T removed = onGoingTargetEvents.remove(id); if (removed != null) { logger.trace("failing {}. Send shard failure: [{}]", removed.description(), sendShardFailure); @@ -299,7 +297,7 @@ protected void doRun() throws Exception { String message = "no activity after [" + checkInterval + "]"; fail( id, - new OpenSearchTimeoutException(message), + new ReplicationFailedException(message), true // to be safe, we don't know what go stuck ); return; diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationFailedException.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationFailedException.java index afdd0ce466f9b..23ad4d0e096b5 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationFailedException.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationFailedException.java @@ -38,4 +38,16 @@ public ReplicationFailedException(ShardId shardId, @Nullable String extraInfo, T public ReplicationFailedException(StreamInput in) throws IOException { super(in); } + + public ReplicationFailedException(Exception e) { + super(e); + } + + public ReplicationFailedException(String msg) { + super(msg); + } + + public ReplicationFailedException(String msg, Throwable cause) { + super(msg, cause); + } } diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationListener.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationListener.java index 0666f475d496a..1d3c48d084f60 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationListener.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationListener.java @@ -8,8 +8,6 @@ package org.opensearch.indices.replication.common; -import org.opensearch.OpenSearchException; - /** * Interface for listeners that run when there's a change in {@link ReplicationState} * @@ -19,5 +17,5 @@ public interface ReplicationListener { void onDone(ReplicationState state); - void onFailure(ReplicationState state, OpenSearchException e, boolean sendShardFailure); + void onFailure(ReplicationState state, ReplicationFailedException e, boolean sendShardFailure); } diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java index 42f4572fef3e4..1e2dcbb46256f 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java @@ -11,7 +11,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.store.RateLimiter; import org.opensearch.ExceptionsHelper; -import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.support.ChannelActionListener; import org.opensearch.common.CheckedFunction; @@ -78,7 +77,7 @@ public CancellableThreads cancellableThreads() { return cancellableThreads; } - public abstract void notifyListener(OpenSearchException e, boolean sendShardFailure); + public abstract void notifyListener(ReplicationFailedException e, boolean sendShardFailure); public ReplicationTarget(String name, IndexShard indexShard, ReplicationLuceneIndex stateIndex, ReplicationListener listener) { super(name); @@ -170,7 +169,7 @@ public void cancel(String reason) { * @param e exception that encapsulates the failure * @param sendShardFailure indicates whether to notify the master of the shard failure */ - public void fail(OpenSearchException e, boolean sendShardFailure) { + public void fail(ReplicationFailedException e, boolean sendShardFailure) { if (finished.compareAndSet(false, true)) { try { notifyListener(e, sendShardFailure); @@ -187,7 +186,7 @@ public void fail(OpenSearchException e, boolean sendShardFailure) { protected void ensureRefCount() { if (refCount() <= 0) { - throw new OpenSearchException( + throw new ReplicationFailedException( "ReplicationTarget is used but it's refcount is 0. Probably a mismatch between incRef/decRef calls" ); } diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java index da04ea1b9914b..b3cbd9b00f0b1 100644 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java @@ -11,7 +11,6 @@ import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.SegmentInfos; import org.junit.Assert; -import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.delete.DeleteRequest; import org.opensearch.action.index.IndexRequest; @@ -44,6 +43,7 @@ import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.CopyState; +import org.opensearch.indices.replication.common.ReplicationFailedException; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -670,8 +670,7 @@ public void onReplicationDone(SegmentReplicationState state) { } @Override - public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { - assertTrue(e instanceof CancellableThreads.ExecutionCancelledException); + public void onReplicationFailure(SegmentReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { assertFalse(sendShardFailure); assertEquals(SegmentReplicationState.Stage.CANCELLED, state.getStage()); latch.countDown(); diff --git a/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java index cc5100fba9010..3d04f808bc30c 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java @@ -41,7 +41,6 @@ import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.store.AlreadyClosedException; import org.opensearch.ExceptionsHelper; -import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.bulk.BulkShardRequest; @@ -70,6 +69,7 @@ import org.opensearch.index.store.Store; import org.opensearch.index.translog.SnapshotMatchers; import org.opensearch.index.translog.Translog; +import org.opensearch.indices.replication.common.ReplicationFailedException; import org.opensearch.indices.replication.common.ReplicationListener; import org.opensearch.indices.replication.common.ReplicationState; import org.opensearch.indices.replication.common.ReplicationType; @@ -471,7 +471,7 @@ public void onDone(ReplicationState state) { } @Override - public void onFailure(ReplicationState state, OpenSearchException e, boolean sendShardFailure) { + public void onFailure(ReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { assertThat(ExceptionsHelper.unwrap(e, IOException.class).getMessage(), equalTo("simulated")); } })) diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index 7437cb22e44d1..9a9c5b989dea9 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -22,6 +22,7 @@ import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.indices.replication.common.ReplicationFailedException; import org.opensearch.indices.replication.common.ReplicationType; import java.io.IOException; @@ -104,7 +105,7 @@ public void onReplicationDone(SegmentReplicationState state) { } @Override - public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { + public void onReplicationFailure(SegmentReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { logger.error("Unexpected error", e); Assert.fail("Test should succeed"); } @@ -149,7 +150,7 @@ public void onReplicationDone(SegmentReplicationState state) { } @Override - public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { + public void onReplicationFailure(SegmentReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { // failures leave state object in last entered stage. assertEquals(SegmentReplicationState.Stage.GET_CHECKPOINT_INFO, state.getStage()); assertEquals(expectedError, e.getCause()); diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java index 34ca94bba02f3..ffea4aaf6b7c4 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java @@ -27,7 +27,6 @@ import org.junit.Assert; import org.mockito.Mockito; import org.opensearch.ExceptionsHelper; -import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; @@ -40,6 +39,7 @@ import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.index.store.StoreTests; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.indices.replication.common.ReplicationFailedException; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.DummyShardLock; import org.opensearch.test.IndexSettingsModule; @@ -199,7 +199,7 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { assertEquals(exception, e.getCause().getCause()); - segrepTarget.fail(new OpenSearchException(e), false); + segrepTarget.fail(new ReplicationFailedException(e), false); } }); } @@ -242,7 +242,7 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { assertEquals(exception, e.getCause().getCause()); - segrepTarget.fail(new OpenSearchException(e), false); + segrepTarget.fail(new ReplicationFailedException(e), false); } }); } @@ -287,7 +287,7 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { assertEquals(exception, e.getCause()); - segrepTarget.fail(new OpenSearchException(e), false); + segrepTarget.fail(new ReplicationFailedException(e), false); } }); } @@ -332,7 +332,7 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { assertEquals(exception, e.getCause()); - segrepTarget.fail(new OpenSearchException(e), false); + segrepTarget.fail(new ReplicationFailedException(e), false); } }); } @@ -374,7 +374,7 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { assert (e instanceof IllegalStateException); - segrepTarget.fail(new OpenSearchException(e), false); + segrepTarget.fail(new ReplicationFailedException(e), false); } }); } diff --git a/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java b/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java index 1789dd3b2a288..75ac1075e8ee0 100644 --- a/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java +++ b/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java @@ -39,6 +39,7 @@ import org.opensearch.index.shard.ShardId; import org.opensearch.index.store.Store; import org.opensearch.indices.replication.common.ReplicationCollection; +import org.opensearch.indices.replication.common.ReplicationFailedException; import org.opensearch.indices.replication.common.ReplicationListener; import org.opensearch.indices.replication.common.ReplicationState; import org.opensearch.indices.recovery.RecoveryState; @@ -59,7 +60,7 @@ public void onDone(ReplicationState state) { } @Override - public void onFailure(ReplicationState state, OpenSearchException e, boolean sendShardFailure) { + public void onFailure(ReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { } }; @@ -93,7 +94,7 @@ public void onDone(ReplicationState state) { } @Override - public void onFailure(ReplicationState state, OpenSearchException e, boolean sendShardFailure) { + public void onFailure(ReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { failed.set(true); latch.countDown(); } diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 1fcdfd79c544e..f520206e0f866 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -39,7 +39,6 @@ import org.apache.lucene.store.IndexInput; import org.junit.Assert; import org.opensearch.ExceptionsHelper; -import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.flush.FlushRequest; @@ -119,6 +118,7 @@ import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.CopyState; import org.opensearch.indices.replication.common.ReplicationCollection; +import org.opensearch.indices.replication.common.ReplicationFailedException; import org.opensearch.indices.replication.common.ReplicationListener; import org.opensearch.indices.replication.common.ReplicationState; import org.opensearch.repositories.IndexId; @@ -181,7 +181,7 @@ public void onDone(ReplicationState state) { } @Override - public void onFailure(ReplicationState state, OpenSearchException e, boolean sendShardFailure) { + public void onFailure(ReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { throw new AssertionError(e); } }; @@ -1282,7 +1282,11 @@ public void onReplicationDone(SegmentReplicationState state) { } @Override - public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { + public void onReplicationFailure( + SegmentReplicationState state, + ReplicationFailedException e, + boolean sendShardFailure + ) { logger.error("Unexpected replication failure in test", e); Assert.fail("test replication should not fail: " + e); } From 5d7d83e5852ab38095ea02a1248ea40b4416cd4b Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Thu, 13 Oct 2022 16:48:50 -0700 Subject: [PATCH 26/39] Update version check after backport (#4786) Now that the searchable snapshot API change has been backported to 2.x, the version guard has to change from 3.0 to 2.4. Signed-off-by: Andrew Ross Signed-off-by: Andrew Ross --- CHANGELOG.md | 1 + .../cluster/snapshots/restore/RestoreSnapshotRequest.java | 4 ++-- .../java/org/opensearch/cluster/routing/RecoverySource.java | 4 ++-- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3a8bee55d881d..69fb6b42fe29f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -132,6 +132,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fix SearchStats (de)serialization (caused by https://github.com/opensearch-project/OpenSearch/pull/4616) ([#4697](https://github.com/opensearch-project/OpenSearch/pull/4697)) - Fixing Gradle warnings associated with publishPluginZipPublicationToXxx tasks ([#4696](https://github.com/opensearch-project/OpenSearch/pull/4696)) - Fixed randomly failing test ([4774](https://github.com/opensearch-project/OpenSearch/pull/4774)) +- Update version check after backport ([4786](https://github.com/opensearch-project/OpenSearch/pull/4786)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index 3ecf5ab19c0e4..5fd83244f3dea 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -152,7 +152,7 @@ public RestoreSnapshotRequest(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { snapshotUuid = in.readOptionalString(); } - if (FeatureFlags.isEnabled(FeatureFlags.SEARCHABLE_SNAPSHOT) && in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (FeatureFlags.isEnabled(FeatureFlags.SEARCHABLE_SNAPSHOT) && in.getVersion().onOrAfter(Version.V_2_4_0)) { storageType = in.readEnum(StorageType.class); } } @@ -182,7 +182,7 @@ public void writeTo(StreamOutput out) throws IOException { "restricting the snapshot UUID is forbidden in a cluster with version [" + out.getVersion() + "] nodes" ); } - if (FeatureFlags.isEnabled(FeatureFlags.SEARCHABLE_SNAPSHOT) && out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (FeatureFlags.isEnabled(FeatureFlags.SEARCHABLE_SNAPSHOT) && out.getVersion().onOrAfter(Version.V_2_4_0)) { out.writeEnum(storageType); } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java b/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java index 728bf9d1ae90e..3e4feb02686d6 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java @@ -287,7 +287,7 @@ public SnapshotRecoverySource( } else { index = new IndexId(in.readString(), IndexMetadata.INDEX_UUID_NA_VALUE); } - if (FeatureFlags.isEnabled(FeatureFlags.SEARCHABLE_SNAPSHOT) && in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (FeatureFlags.isEnabled(FeatureFlags.SEARCHABLE_SNAPSHOT) && in.getVersion().onOrAfter(Version.V_2_4_0)) { isSearchableSnapshot = in.readBoolean(); } else { isSearchableSnapshot = false; @@ -330,7 +330,7 @@ protected void writeAdditionalFields(StreamOutput out) throws IOException { } else { out.writeString(index.getName()); } - if (FeatureFlags.isEnabled(FeatureFlags.SEARCHABLE_SNAPSHOT) && out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (FeatureFlags.isEnabled(FeatureFlags.SEARCHABLE_SNAPSHOT) && out.getVersion().onOrAfter(Version.V_2_4_0)) { out.writeBoolean(isSearchableSnapshot); } } From d70886091042c12978acbecced80736c4f0284a9 Mon Sep 17 00:00:00 2001 From: Ketan Verma Date: Fri, 14 Oct 2022 15:50:54 +0530 Subject: [PATCH 27/39] Added in-flight cancellation of SearchShardTask based on resource consumption (#4575) * Added in-flight cancellation of SearchShardTask based on resource consumption This feature aims to identify and cancel resource intensive SearchShardTasks if they have breached certain thresholds. This will help in terminating problematic queries which can put nodes in duress and degrade the cluster performance. Signed-off-by: Ketan Verma --- CHANGELOG.md | 2 + .../org/opensearch/cluster/ClusterModule.java | 2 - .../common/settings/ClusterSettings.java | 20 +- .../opensearch/common/util/MovingAverage.java | 57 ++++ .../org/opensearch/common/util/Streak.java | 33 ++ .../opensearch/common/util/TokenBucket.java | 124 +++++++ .../main/java/org/opensearch/node/Node.java | 27 +- .../java/org/opensearch/node/NodeService.java | 10 +- .../SearchBackpressureService.java | 311 ++++++++++++++++++ .../backpressure/SearchBackpressureState.java | 57 ++++ .../search/backpressure/package-info.java | 12 + .../settings/NodeDuressSettings.java | 99 ++++++ .../settings/SearchBackpressureSettings.java | 213 ++++++++++++ .../settings/SearchShardTaskSettings.java | 160 +++++++++ .../backpressure/settings/package-info.java | 12 + .../trackers/NodeDuressTracker.java | 41 +++ .../trackers/TaskResourceUsageTracker.java | 50 +++ .../backpressure/trackers/package-info.java | 12 + .../org/opensearch/tasks/CancellableTask.java | 2 +- .../opensearch/tasks/TaskCancellation.java | 108 ++++++ .../tasks/TaskResourceTrackingService.java | 22 ++ .../org/opensearch/common/StreakTests.java | 34 ++ .../common/util/MovingAverageTests.java | 49 +++ .../common/util/TokenBucketTests.java | 77 +++++ .../SearchBackpressureServiceTests.java | 213 ++++++++++++ .../trackers/NodeDuressTrackerTests.java | 35 ++ .../tasks/TaskCancellationTests.java | 72 ++++ .../SearchBackpressureTestHelpers.java | 47 +++ 28 files changed, 1895 insertions(+), 6 deletions(-) create mode 100644 server/src/main/java/org/opensearch/common/util/MovingAverage.java create mode 100644 server/src/main/java/org/opensearch/common/util/Streak.java create mode 100644 server/src/main/java/org/opensearch/common/util/TokenBucket.java create mode 100644 server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java create mode 100644 server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureState.java create mode 100644 server/src/main/java/org/opensearch/search/backpressure/package-info.java create mode 100644 server/src/main/java/org/opensearch/search/backpressure/settings/NodeDuressSettings.java create mode 100644 server/src/main/java/org/opensearch/search/backpressure/settings/SearchBackpressureSettings.java create mode 100644 server/src/main/java/org/opensearch/search/backpressure/settings/SearchShardTaskSettings.java create mode 100644 server/src/main/java/org/opensearch/search/backpressure/settings/package-info.java create mode 100644 server/src/main/java/org/opensearch/search/backpressure/trackers/NodeDuressTracker.java create mode 100644 server/src/main/java/org/opensearch/search/backpressure/trackers/TaskResourceUsageTracker.java create mode 100644 server/src/main/java/org/opensearch/search/backpressure/trackers/package-info.java create mode 100644 server/src/main/java/org/opensearch/tasks/TaskCancellation.java create mode 100644 server/src/test/java/org/opensearch/common/StreakTests.java create mode 100644 server/src/test/java/org/opensearch/common/util/MovingAverageTests.java create mode 100644 server/src/test/java/org/opensearch/common/util/TokenBucketTests.java create mode 100644 server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java create mode 100644 server/src/test/java/org/opensearch/search/backpressure/trackers/NodeDuressTrackerTests.java create mode 100644 server/src/test/java/org/opensearch/tasks/TaskCancellationTests.java create mode 100644 test/framework/src/main/java/org/opensearch/search/backpressure/SearchBackpressureTestHelpers.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 69fb6b42fe29f..15dedbde22d41 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,8 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Add a new node role 'search' which is dedicated to provide search capability ([#4689](https://github.com/opensearch-project/OpenSearch/pull/4689)) - Introduce experimental searchable snapshot API ([#4680](https://github.com/opensearch-project/OpenSearch/pull/4680)) - Recommissioning of zone. REST layer support. ([#4624](https://github.com/opensearch-project/OpenSearch/pull/4604)) +- Added in-flight cancellation of SearchShardTask based on resource consumption ([#4565](https://github.com/opensearch-project/OpenSearch/pull/4565)) + ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 - Bumps `reactor-netty-http` from 1.0.18 to 1.0.23 diff --git a/server/src/main/java/org/opensearch/cluster/ClusterModule.java b/server/src/main/java/org/opensearch/cluster/ClusterModule.java index 3ac3a08d4848a..6279447485348 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterModule.java @@ -96,7 +96,6 @@ import org.opensearch.script.ScriptMetadata; import org.opensearch.snapshots.SnapshotsInfoService; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.tasks.TaskResultsService; import java.util.ArrayList; @@ -420,7 +419,6 @@ protected void configure() { bind(NodeMappingRefreshAction.class).asEagerSingleton(); bind(MappingUpdatedAction.class).asEagerSingleton(); bind(TaskResultsService.class).asEagerSingleton(); - bind(TaskResourceTrackingService.class).asEagerSingleton(); bind(AllocationDeciders.class).toInstance(allocationDeciders); bind(ShardsAllocator.class).toInstance(shardsAllocator); } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 54579031aac08..320cb5457b21c 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -41,6 +41,9 @@ import org.opensearch.index.ShardIndexingPressureMemoryManager; import org.opensearch.index.ShardIndexingPressureSettings; import org.opensearch.index.ShardIndexingPressureStore; +import org.opensearch.search.backpressure.settings.NodeDuressSettings; +import org.opensearch.search.backpressure.settings.SearchBackpressureSettings; +import org.opensearch.search.backpressure.settings.SearchShardTaskSettings; import org.opensearch.tasks.TaskManager; import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.watcher.ResourceWatcherService; @@ -581,7 +584,22 @@ public void apply(Settings value, Settings current, Settings previous) { ShardIndexingPressureMemoryManager.MAX_OUTSTANDING_REQUESTS, IndexingPressure.MAX_INDEXING_BYTES, TaskResourceTrackingService.TASK_RESOURCE_TRACKING_ENABLED, - TaskManager.TASK_RESOURCE_CONSUMERS_ENABLED + TaskManager.TASK_RESOURCE_CONSUMERS_ENABLED, + + // Settings related to search backpressure + SearchBackpressureSettings.SETTING_ENABLED, + SearchBackpressureSettings.SETTING_ENFORCED, + SearchBackpressureSettings.SETTING_CANCELLATION_RATIO, + SearchBackpressureSettings.SETTING_CANCELLATION_RATE, + SearchBackpressureSettings.SETTING_CANCELLATION_BURST, + NodeDuressSettings.SETTING_NUM_SUCCESSIVE_BREACHES, + NodeDuressSettings.SETTING_CPU_THRESHOLD, + NodeDuressSettings.SETTING_HEAP_THRESHOLD, + SearchShardTaskSettings.SETTING_TOTAL_HEAP_THRESHOLD, + SearchShardTaskSettings.SETTING_HEAP_THRESHOLD, + SearchShardTaskSettings.SETTING_HEAP_VARIANCE_THRESHOLD, + SearchShardTaskSettings.SETTING_CPU_TIME_THRESHOLD, + SearchShardTaskSettings.SETTING_ELAPSED_TIME_THRESHOLD ) ) ); diff --git a/server/src/main/java/org/opensearch/common/util/MovingAverage.java b/server/src/main/java/org/opensearch/common/util/MovingAverage.java new file mode 100644 index 0000000000000..650ba62ecd8c8 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/util/MovingAverage.java @@ -0,0 +1,57 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util; + +/** + * MovingAverage is used to calculate the moving average of last 'n' observations. + * + * @opensearch.internal + */ +public class MovingAverage { + private final int windowSize; + private final long[] observations; + + private long count = 0; + private long sum = 0; + private double average = 0; + + public MovingAverage(int windowSize) { + if (windowSize <= 0) { + throw new IllegalArgumentException("window size must be greater than zero"); + } + + this.windowSize = windowSize; + this.observations = new long[windowSize]; + } + + /** + * Records a new observation and evicts the n-th last observation. + */ + public synchronized double record(long value) { + long delta = value - observations[(int) (count % observations.length)]; + observations[(int) (count % observations.length)] = value; + + count++; + sum += delta; + average = (double) sum / Math.min(count, observations.length); + return average; + } + + public double getAverage() { + return average; + } + + public long getCount() { + return count; + } + + public boolean isReady() { + return count >= windowSize; + } +} diff --git a/server/src/main/java/org/opensearch/common/util/Streak.java b/server/src/main/java/org/opensearch/common/util/Streak.java new file mode 100644 index 0000000000000..5f6ad3021659e --- /dev/null +++ b/server/src/main/java/org/opensearch/common/util/Streak.java @@ -0,0 +1,33 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util; + +import java.util.concurrent.atomic.AtomicInteger; + +/** + * Streak is a data structure that keeps track of the number of successive successful events. + * + * @opensearch.internal + */ +public class Streak { + private final AtomicInteger successiveSuccessfulEvents = new AtomicInteger(); + + public int record(boolean isSuccessful) { + if (isSuccessful) { + return successiveSuccessfulEvents.incrementAndGet(); + } else { + successiveSuccessfulEvents.set(0); + return 0; + } + } + + public int length() { + return successiveSuccessfulEvents.get(); + } +} diff --git a/server/src/main/java/org/opensearch/common/util/TokenBucket.java b/server/src/main/java/org/opensearch/common/util/TokenBucket.java new file mode 100644 index 0000000000000..e47f152d71363 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/util/TokenBucket.java @@ -0,0 +1,124 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util; + +import java.util.Objects; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.LongSupplier; + +/** + * TokenBucket is used to limit the number of operations at a constant rate while allowing for short bursts. + * + * @opensearch.internal + */ +public class TokenBucket { + /** + * Defines a monotonically increasing counter. + * + * Usage examples: + * 1. clock = System::nanoTime can be used to perform rate-limiting per unit time + * 2. clock = AtomicLong::get can be used to perform rate-limiting per unit number of operations + */ + private final LongSupplier clock; + + /** + * Defines the number of tokens added to the bucket per clock cycle. + */ + private final double rate; + + /** + * Defines the capacity and the maximum number of operations that can be performed per clock cycle before + * the bucket runs out of tokens. + */ + private final double burst; + + /** + * Defines the current state of the token bucket. + */ + private final AtomicReference state; + + public TokenBucket(LongSupplier clock, double rate, double burst) { + this(clock, rate, burst, burst); + } + + public TokenBucket(LongSupplier clock, double rate, double burst, double initialTokens) { + if (rate <= 0.0) { + throw new IllegalArgumentException("rate must be greater than zero"); + } + + if (burst <= 0.0) { + throw new IllegalArgumentException("burst must be greater than zero"); + } + + this.clock = clock; + this.rate = rate; + this.burst = burst; + this.state = new AtomicReference<>(new State(Math.min(initialTokens, burst), clock.getAsLong())); + } + + /** + * If there are enough tokens in the bucket, it requests/deducts 'n' tokens and returns true. + * Otherwise, returns false and leaves the bucket untouched. + */ + public boolean request(double n) { + if (n <= 0) { + throw new IllegalArgumentException("requested tokens must be greater than zero"); + } + + // Refill tokens + State currentState, updatedState; + do { + currentState = state.get(); + long now = clock.getAsLong(); + double incr = (now - currentState.lastRefilledAt) * rate; + updatedState = new State(Math.min(currentState.tokens + incr, burst), now); + } while (state.compareAndSet(currentState, updatedState) == false); + + // Deduct tokens + do { + currentState = state.get(); + if (currentState.tokens < n) { + return false; + } + updatedState = new State(currentState.tokens - n, currentState.lastRefilledAt); + } while (state.compareAndSet(currentState, updatedState) == false); + + return true; + } + + public boolean request() { + return request(1.0); + } + + /** + * Represents an immutable token bucket state. + */ + private static class State { + final double tokens; + final double lastRefilledAt; + + public State(double tokens, double lastRefilledAt) { + this.tokens = tokens; + this.lastRefilledAt = lastRefilledAt; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + State state = (State) o; + return Double.compare(state.tokens, tokens) == 0 && Double.compare(state.lastRefilledAt, lastRefilledAt) == 0; + } + + @Override + public int hashCode() { + return Objects.hash(tokens, lastRefilledAt); + } + } +} diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 504550378e14f..54696b1f6b118 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -43,6 +43,8 @@ import org.opensearch.indices.replication.SegmentReplicationSourceFactory; import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.replication.SegmentReplicationSourceService; +import org.opensearch.search.backpressure.SearchBackpressureService; +import org.opensearch.search.backpressure.settings.SearchBackpressureSettings; import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.threadpool.RunnableTaskExecutionListener; import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; @@ -796,6 +798,23 @@ protected Node( // development. Then we can deprecate Getter and Setter for IndexingPressureService in ClusterService (#478). clusterService.setIndexingPressureService(indexingPressureService); + final TaskResourceTrackingService taskResourceTrackingService = new TaskResourceTrackingService( + settings, + clusterService.getClusterSettings(), + threadPool + ); + + final SearchBackpressureSettings searchBackpressureSettings = new SearchBackpressureSettings( + settings, + clusterService.getClusterSettings() + ); + + final SearchBackpressureService searchBackpressureService = new SearchBackpressureService( + searchBackpressureSettings, + taskResourceTrackingService, + threadPool + ); + final RecoverySettings recoverySettings = new RecoverySettings(settings, settingsModule.getClusterSettings()); RepositoriesModule repositoriesModule = new RepositoriesModule( this.environment, @@ -883,7 +902,8 @@ protected Node( responseCollectorService, searchTransportService, indexingPressureService, - searchModule.getValuesSourceRegistry().getUsageService() + searchModule.getValuesSourceRegistry().getUsageService(), + searchBackpressureService ); final SearchService searchService = newSearchService( @@ -941,6 +961,8 @@ protected Node( b.bind(AnalysisRegistry.class).toInstance(analysisModule.getAnalysisRegistry()); b.bind(IngestService.class).toInstance(ingestService); b.bind(IndexingPressureService.class).toInstance(indexingPressureService); + b.bind(TaskResourceTrackingService.class).toInstance(taskResourceTrackingService); + b.bind(SearchBackpressureService.class).toInstance(searchBackpressureService); b.bind(UsageService.class).toInstance(usageService); b.bind(AggregationUsageService.class).toInstance(searchModule.getValuesSourceRegistry().getUsageService()); b.bind(NamedWriteableRegistry.class).toInstance(namedWriteableRegistry); @@ -1104,6 +1126,7 @@ public Node start() throws NodeValidationException { injector.getInstance(SearchService.class).start(); injector.getInstance(FsHealthService.class).start(); nodeService.getMonitorService().start(); + nodeService.getSearchBackpressureService().start(); final ClusterService clusterService = injector.getInstance(ClusterService.class); @@ -1259,6 +1282,7 @@ private Node stop() { injector.getInstance(NodeConnectionsService.class).stop(); injector.getInstance(FsHealthService.class).stop(); nodeService.getMonitorService().stop(); + nodeService.getSearchBackpressureService().stop(); injector.getInstance(GatewayService.class).stop(); injector.getInstance(SearchService.class).stop(); injector.getInstance(TransportService.class).stop(); @@ -1318,6 +1342,7 @@ public synchronized void close() throws IOException { toClose.add(injector.getInstance(Discovery.class)); toClose.add(() -> stopWatch.stop().start("monitor")); toClose.add(nodeService.getMonitorService()); + toClose.add(nodeService.getSearchBackpressureService()); toClose.add(() -> stopWatch.stop().start("fsHealth")); toClose.add(injector.getInstance(FsHealthService.class)); toClose.add(() -> stopWatch.stop().start("gateway")); diff --git a/server/src/main/java/org/opensearch/node/NodeService.java b/server/src/main/java/org/opensearch/node/NodeService.java index ab98b47c7287b..f4e037db66ca2 100644 --- a/server/src/main/java/org/opensearch/node/NodeService.java +++ b/server/src/main/java/org/opensearch/node/NodeService.java @@ -53,6 +53,7 @@ import org.opensearch.plugins.PluginsService; import org.opensearch.script.ScriptService; import org.opensearch.search.aggregations.support.AggregationUsageService; +import org.opensearch.search.backpressure.SearchBackpressureService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -81,6 +82,7 @@ public class NodeService implements Closeable { private final SearchTransportService searchTransportService; private final IndexingPressureService indexingPressureService; private final AggregationUsageService aggregationUsageService; + private final SearchBackpressureService searchBackpressureService; private final Discovery discovery; @@ -101,7 +103,8 @@ public class NodeService implements Closeable { ResponseCollectorService responseCollectorService, SearchTransportService searchTransportService, IndexingPressureService indexingPressureService, - AggregationUsageService aggregationUsageService + AggregationUsageService aggregationUsageService, + SearchBackpressureService searchBackpressureService ) { this.settings = settings; this.threadPool = threadPool; @@ -119,6 +122,7 @@ public class NodeService implements Closeable { this.searchTransportService = searchTransportService; this.indexingPressureService = indexingPressureService; this.aggregationUsageService = aggregationUsageService; + this.searchBackpressureService = searchBackpressureService; clusterService.addStateApplier(ingestService); } @@ -203,6 +207,10 @@ public MonitorService getMonitorService() { return monitorService; } + public SearchBackpressureService getSearchBackpressureService() { + return searchBackpressureService; + } + @Override public void close() throws IOException { IOUtils.close(indicesService); diff --git a/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java b/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java new file mode 100644 index 0000000000000..885846a177d60 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java @@ -0,0 +1,311 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.backpressure; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.ExceptionsHelper; +import org.opensearch.action.search.SearchShardTask; +import org.opensearch.common.component.AbstractLifecycleComponent; +import org.opensearch.common.util.TokenBucket; +import org.opensearch.monitor.jvm.JvmStats; +import org.opensearch.monitor.process.ProcessProbe; +import org.opensearch.search.backpressure.settings.SearchBackpressureSettings; +import org.opensearch.search.backpressure.trackers.NodeDuressTracker; +import org.opensearch.search.backpressure.trackers.TaskResourceUsageTracker; +import org.opensearch.tasks.CancellableTask; +import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskCancellation; +import org.opensearch.tasks.TaskResourceTrackingService; +import org.opensearch.tasks.TaskResourceTrackingService.TaskCompletionListener; +import org.opensearch.threadpool.Scheduler; +import org.opensearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.LongSupplier; +import java.util.stream.Collectors; + +/** + * SearchBackpressureService is responsible for monitoring and cancelling in-flight search tasks if they are + * breaching resource usage limits when the node is in duress. + * + * @opensearch.internal + */ +public class SearchBackpressureService extends AbstractLifecycleComponent + implements + TaskCompletionListener, + SearchBackpressureSettings.Listener { + private static final Logger logger = LogManager.getLogger(SearchBackpressureService.class); + + private volatile Scheduler.Cancellable scheduledFuture; + + private final SearchBackpressureSettings settings; + private final TaskResourceTrackingService taskResourceTrackingService; + private final ThreadPool threadPool; + private final LongSupplier timeNanosSupplier; + + private final List nodeDuressTrackers; + private final List taskResourceUsageTrackers; + + private final AtomicReference taskCancellationRateLimiter = new AtomicReference<>(); + private final AtomicReference taskCancellationRatioLimiter = new AtomicReference<>(); + + // Currently, only the state of SearchShardTask is being tracked. + // This can be generalized to Map once we start supporting cancellation of SearchTasks as well. + private final SearchBackpressureState state = new SearchBackpressureState(); + + public SearchBackpressureService( + SearchBackpressureSettings settings, + TaskResourceTrackingService taskResourceTrackingService, + ThreadPool threadPool + ) { + this( + settings, + taskResourceTrackingService, + threadPool, + System::nanoTime, + List.of( + new NodeDuressTracker( + () -> ProcessProbe.getInstance().getProcessCpuPercent() / 100.0 >= settings.getNodeDuressSettings().getCpuThreshold() + ), + new NodeDuressTracker( + () -> JvmStats.jvmStats().getMem().getHeapUsedPercent() / 100.0 >= settings.getNodeDuressSettings().getHeapThreshold() + ) + ), + Collections.emptyList() + ); + } + + public SearchBackpressureService( + SearchBackpressureSettings settings, + TaskResourceTrackingService taskResourceTrackingService, + ThreadPool threadPool, + LongSupplier timeNanosSupplier, + List nodeDuressTrackers, + List taskResourceUsageTrackers + ) { + this.settings = settings; + this.settings.setListener(this); + this.taskResourceTrackingService = taskResourceTrackingService; + this.taskResourceTrackingService.addTaskCompletionListener(this); + this.threadPool = threadPool; + this.timeNanosSupplier = timeNanosSupplier; + this.nodeDuressTrackers = nodeDuressTrackers; + this.taskResourceUsageTrackers = taskResourceUsageTrackers; + + this.taskCancellationRateLimiter.set( + new TokenBucket(timeNanosSupplier, getSettings().getCancellationRateNanos(), getSettings().getCancellationBurst()) + ); + + this.taskCancellationRatioLimiter.set( + new TokenBucket(state::getCompletionCount, getSettings().getCancellationRatio(), getSettings().getCancellationBurst()) + ); + } + + void doRun() { + if (getSettings().isEnabled() == false) { + return; + } + + if (isNodeInDuress() == false) { + return; + } + + // We are only targeting in-flight cancellation of SearchShardTask for now. + List searchShardTasks = getSearchShardTasks(); + + // Force-refresh usage stats of these tasks before making a cancellation decision. + taskResourceTrackingService.refreshResourceStats(searchShardTasks.toArray(new Task[0])); + + // Skip cancellation if the increase in heap usage is not due to search requests. + if (isHeapUsageDominatedBySearch(searchShardTasks) == false) { + return; + } + + for (TaskCancellation taskCancellation : getTaskCancellations(searchShardTasks)) { + logger.debug( + "cancelling task [{}] due to high resource consumption [{}]", + taskCancellation.getTask().getId(), + taskCancellation.getReasonString() + ); + + if (getSettings().isEnforced() == false) { + continue; + } + + // Independently remove tokens from both token buckets. + boolean rateLimitReached = taskCancellationRateLimiter.get().request() == false; + boolean ratioLimitReached = taskCancellationRatioLimiter.get().request() == false; + + // Stop cancelling tasks if there are no tokens in either of the two token buckets. + if (rateLimitReached && ratioLimitReached) { + logger.debug("task cancellation limit reached"); + state.incrementLimitReachedCount(); + break; + } + + taskCancellation.cancel(); + state.incrementCancellationCount(); + } + } + + /** + * Returns true if the node is in duress consecutively for the past 'n' observations. + */ + boolean isNodeInDuress() { + boolean isNodeInDuress = false; + int numSuccessiveBreaches = getSettings().getNodeDuressSettings().getNumSuccessiveBreaches(); + + for (NodeDuressTracker tracker : nodeDuressTrackers) { + if (tracker.check() >= numSuccessiveBreaches) { + isNodeInDuress = true; // not breaking the loop so that each tracker's streak gets updated. + } + } + + return isNodeInDuress; + } + + /** + * Returns true if the increase in heap usage is due to search requests. + */ + boolean isHeapUsageDominatedBySearch(List searchShardTasks) { + long runningTasksHeapUsage = searchShardTasks.stream().mapToLong(task -> task.getTotalResourceStats().getMemoryInBytes()).sum(); + long searchTasksHeapThreshold = getSettings().getSearchShardTaskSettings().getTotalHeapThresholdBytes(); + if (runningTasksHeapUsage < searchTasksHeapThreshold) { + logger.debug("heap usage not dominated by search requests [{}/{}]", runningTasksHeapUsage, searchTasksHeapThreshold); + return false; + } + + return true; + } + + /** + * Filters and returns the list of currently running SearchShardTasks. + */ + List getSearchShardTasks() { + return taskResourceTrackingService.getResourceAwareTasks() + .values() + .stream() + .filter(task -> task instanceof SearchShardTask) + .map(task -> (CancellableTask) task) + .collect(Collectors.toUnmodifiableList()); + } + + /** + * Returns a TaskCancellation wrapper containing the list of reasons (possibly zero), along with an overall + * cancellation score for the given task. Cancelling a task with a higher score has better chance of recovering the + * node from duress. + */ + TaskCancellation getTaskCancellation(CancellableTask task) { + List reasons = new ArrayList<>(); + List callbacks = new ArrayList<>(); + + for (TaskResourceUsageTracker tracker : taskResourceUsageTrackers) { + Optional reason = tracker.cancellationReason(task); + if (reason.isPresent()) { + reasons.add(reason.get()); + callbacks.add(tracker::incrementCancellations); + } + } + + return new TaskCancellation(task, reasons, callbacks); + } + + /** + * Returns a list of TaskCancellations sorted by descending order of their cancellation scores. + */ + List getTaskCancellations(List tasks) { + return tasks.stream() + .map(this::getTaskCancellation) + .filter(TaskCancellation::isEligibleForCancellation) + .sorted(Comparator.reverseOrder()) + .collect(Collectors.toUnmodifiableList()); + } + + SearchBackpressureSettings getSettings() { + return settings; + } + + SearchBackpressureState getState() { + return state; + } + + @Override + public void onTaskCompleted(Task task) { + if (getSettings().isEnabled() == false) { + return; + } + + if (task instanceof SearchShardTask == false) { + return; + } + + SearchShardTask searchShardTask = (SearchShardTask) task; + if (searchShardTask.isCancelled() == false) { + state.incrementCompletionCount(); + } + + List exceptions = new ArrayList<>(); + for (TaskResourceUsageTracker tracker : taskResourceUsageTrackers) { + try { + tracker.update(searchShardTask); + } catch (Exception e) { + exceptions.add(e); + } + } + ExceptionsHelper.maybeThrowRuntimeAndSuppress(exceptions); + } + + @Override + public void onCancellationRatioChanged() { + taskCancellationRatioLimiter.set( + new TokenBucket(state::getCompletionCount, getSettings().getCancellationRatio(), getSettings().getCancellationBurst()) + ); + } + + @Override + public void onCancellationRateChanged() { + taskCancellationRateLimiter.set( + new TokenBucket(timeNanosSupplier, getSettings().getCancellationRateNanos(), getSettings().getCancellationBurst()) + ); + } + + @Override + public void onCancellationBurstChanged() { + onCancellationRatioChanged(); + onCancellationRateChanged(); + } + + @Override + protected void doStart() { + scheduledFuture = threadPool.scheduleWithFixedDelay(() -> { + try { + doRun(); + } catch (Exception e) { + logger.debug("failure in search search backpressure", e); + } + }, getSettings().getInterval(), ThreadPool.Names.GENERIC); + } + + @Override + protected void doStop() { + if (scheduledFuture != null) { + scheduledFuture.cancel(); + } + } + + @Override + protected void doClose() throws IOException {} +} diff --git a/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureState.java b/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureState.java new file mode 100644 index 0000000000000..a62231ec29ede --- /dev/null +++ b/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureState.java @@ -0,0 +1,57 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.backpressure; + +import java.util.concurrent.atomic.AtomicLong; + +/** + * Tracks the current state of task completions and cancellations. + * + * @opensearch.internal + */ +public class SearchBackpressureState { + /** + * The number of successful task completions. + */ + private final AtomicLong completionCount = new AtomicLong(); + + /** + * The number of task cancellations due to limit breaches. + */ + private final AtomicLong cancellationCount = new AtomicLong(); + + /** + * The number of times task cancellation limit was reached. + */ + private final AtomicLong limitReachedCount = new AtomicLong(); + + public long getCompletionCount() { + return completionCount.get(); + } + + long incrementCompletionCount() { + return completionCount.incrementAndGet(); + } + + public long getCancellationCount() { + return cancellationCount.get(); + } + + long incrementCancellationCount() { + return cancellationCount.incrementAndGet(); + } + + public long getLimitReachedCount() { + return limitReachedCount.get(); + } + + long incrementLimitReachedCount() { + return limitReachedCount.incrementAndGet(); + } +} diff --git a/server/src/main/java/org/opensearch/search/backpressure/package-info.java b/server/src/main/java/org/opensearch/search/backpressure/package-info.java new file mode 100644 index 0000000000000..36d216993b2fc --- /dev/null +++ b/server/src/main/java/org/opensearch/search/backpressure/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains classes responsible for search backpressure. + */ +package org.opensearch.search.backpressure; diff --git a/server/src/main/java/org/opensearch/search/backpressure/settings/NodeDuressSettings.java b/server/src/main/java/org/opensearch/search/backpressure/settings/NodeDuressSettings.java new file mode 100644 index 0000000000000..09c1e4fcef46c --- /dev/null +++ b/server/src/main/java/org/opensearch/search/backpressure/settings/NodeDuressSettings.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.backpressure.settings; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; + +/** + * Defines the settings for a node to be considered in duress. + * + * @opensearch.internal + */ +public class NodeDuressSettings { + private static class Defaults { + private static final int NUM_SUCCESSIVE_BREACHES = 3; + private static final double CPU_THRESHOLD = 0.9; + private static final double HEAP_THRESHOLD = 0.7; + } + + /** + * Defines the number of successive limit breaches after the node is marked "in duress". + */ + private volatile int numSuccessiveBreaches; + public static final Setting SETTING_NUM_SUCCESSIVE_BREACHES = Setting.intSetting( + "search_backpressure.node_duress.num_successive_breaches", + Defaults.NUM_SUCCESSIVE_BREACHES, + 1, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Defines the CPU usage threshold (in percentage) for a node to be considered "in duress". + */ + private volatile double cpuThreshold; + public static final Setting SETTING_CPU_THRESHOLD = Setting.doubleSetting( + "search_backpressure.node_duress.cpu_threshold", + Defaults.CPU_THRESHOLD, + 0.0, + 1.0, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Defines the heap usage threshold (in percentage) for a node to be considered "in duress". + */ + private volatile double heapThreshold; + public static final Setting SETTING_HEAP_THRESHOLD = Setting.doubleSetting( + "search_backpressure.node_duress.heap_threshold", + Defaults.HEAP_THRESHOLD, + 0.0, + 1.0, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public NodeDuressSettings(Settings settings, ClusterSettings clusterSettings) { + numSuccessiveBreaches = SETTING_NUM_SUCCESSIVE_BREACHES.get(settings); + clusterSettings.addSettingsUpdateConsumer(SETTING_NUM_SUCCESSIVE_BREACHES, this::setNumSuccessiveBreaches); + + cpuThreshold = SETTING_CPU_THRESHOLD.get(settings); + clusterSettings.addSettingsUpdateConsumer(SETTING_CPU_THRESHOLD, this::setCpuThreshold); + + heapThreshold = SETTING_HEAP_THRESHOLD.get(settings); + clusterSettings.addSettingsUpdateConsumer(SETTING_HEAP_THRESHOLD, this::setHeapThreshold); + } + + public int getNumSuccessiveBreaches() { + return numSuccessiveBreaches; + } + + private void setNumSuccessiveBreaches(int numSuccessiveBreaches) { + this.numSuccessiveBreaches = numSuccessiveBreaches; + } + + public double getCpuThreshold() { + return cpuThreshold; + } + + private void setCpuThreshold(double cpuThreshold) { + this.cpuThreshold = cpuThreshold; + } + + public double getHeapThreshold() { + return heapThreshold; + } + + private void setHeapThreshold(double heapThreshold) { + this.heapThreshold = heapThreshold; + } +} diff --git a/server/src/main/java/org/opensearch/search/backpressure/settings/SearchBackpressureSettings.java b/server/src/main/java/org/opensearch/search/backpressure/settings/SearchBackpressureSettings.java new file mode 100644 index 0000000000000..4834808d768f1 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/backpressure/settings/SearchBackpressureSettings.java @@ -0,0 +1,213 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.backpressure.settings; + +import org.apache.lucene.util.SetOnce; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; + +import java.util.concurrent.TimeUnit; + +/** + * Settings related to search backpressure and cancellation of in-flight requests. + * + * @opensearch.internal + */ +public class SearchBackpressureSettings { + private static class Defaults { + private static final long INTERVAL = 1000; + + private static final boolean ENABLED = true; + private static final boolean ENFORCED = false; + + private static final double CANCELLATION_RATIO = 0.1; + private static final double CANCELLATION_RATE = 0.003; + private static final double CANCELLATION_BURST = 10.0; + } + + /** + * Defines the interval (in millis) at which the SearchBackpressureService monitors and cancels tasks. + */ + private final TimeValue interval; + public static final Setting SETTING_INTERVAL = Setting.longSetting( + "search_backpressure.interval", + Defaults.INTERVAL, + 1, + Setting.Property.NodeScope + ); + + /** + * Defines whether search backpressure is enabled or not. + */ + private volatile boolean enabled; + public static final Setting SETTING_ENABLED = Setting.boolSetting( + "search_backpressure.enabled", + Defaults.ENABLED, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Defines whether in-flight cancellation of tasks is enabled or not. + */ + private volatile boolean enforced; + public static final Setting SETTING_ENFORCED = Setting.boolSetting( + "search_backpressure.enforced", + Defaults.ENFORCED, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Defines the percentage of tasks to cancel relative to the number of successful task completions. + * In other words, it is the number of tokens added to the bucket on each successful task completion. + */ + private volatile double cancellationRatio; + public static final Setting SETTING_CANCELLATION_RATIO = Setting.doubleSetting( + "search_backpressure.cancellation_ratio", + Defaults.CANCELLATION_RATIO, + 0.0, + 1.0, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Defines the number of tasks to cancel per unit time (in millis). + * In other words, it is the number of tokens added to the bucket each millisecond. + */ + private volatile double cancellationRate; + public static final Setting SETTING_CANCELLATION_RATE = Setting.doubleSetting( + "search_backpressure.cancellation_rate", + Defaults.CANCELLATION_RATE, + 0.0, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Defines the maximum number of tasks that can be cancelled before being rate-limited. + */ + private volatile double cancellationBurst; + public static final Setting SETTING_CANCELLATION_BURST = Setting.doubleSetting( + "search_backpressure.cancellation_burst", + Defaults.CANCELLATION_BURST, + 1.0, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Callback listeners. + */ + public interface Listener { + void onCancellationRatioChanged(); + + void onCancellationRateChanged(); + + void onCancellationBurstChanged(); + } + + private final SetOnce listener = new SetOnce<>(); + private final NodeDuressSettings nodeDuressSettings; + private final SearchShardTaskSettings searchShardTaskSettings; + + public SearchBackpressureSettings(Settings settings, ClusterSettings clusterSettings) { + this.nodeDuressSettings = new NodeDuressSettings(settings, clusterSettings); + this.searchShardTaskSettings = new SearchShardTaskSettings(settings, clusterSettings); + + interval = new TimeValue(SETTING_INTERVAL.get(settings)); + + enabled = SETTING_ENABLED.get(settings); + clusterSettings.addSettingsUpdateConsumer(SETTING_ENABLED, this::setEnabled); + + enforced = SETTING_ENFORCED.get(settings); + clusterSettings.addSettingsUpdateConsumer(SETTING_ENFORCED, this::setEnforced); + + cancellationRatio = SETTING_CANCELLATION_RATIO.get(settings); + clusterSettings.addSettingsUpdateConsumer(SETTING_CANCELLATION_RATIO, this::setCancellationRatio); + + cancellationRate = SETTING_CANCELLATION_RATE.get(settings); + clusterSettings.addSettingsUpdateConsumer(SETTING_CANCELLATION_RATE, this::setCancellationRate); + + cancellationBurst = SETTING_CANCELLATION_BURST.get(settings); + clusterSettings.addSettingsUpdateConsumer(SETTING_CANCELLATION_BURST, this::setCancellationBurst); + } + + public void setListener(Listener listener) { + this.listener.set(listener); + } + + public NodeDuressSettings getNodeDuressSettings() { + return nodeDuressSettings; + } + + public SearchShardTaskSettings getSearchShardTaskSettings() { + return searchShardTaskSettings; + } + + public TimeValue getInterval() { + return interval; + } + + public boolean isEnabled() { + return enabled; + } + + private void setEnabled(boolean enabled) { + this.enabled = enabled; + } + + public boolean isEnforced() { + return enforced; + } + + private void setEnforced(boolean enforced) { + this.enforced = enforced; + } + + public double getCancellationRatio() { + return cancellationRatio; + } + + private void setCancellationRatio(double cancellationRatio) { + this.cancellationRatio = cancellationRatio; + if (listener.get() != null) { + listener.get().onCancellationRatioChanged(); + } + } + + public double getCancellationRate() { + return cancellationRate; + } + + public double getCancellationRateNanos() { + return getCancellationRate() / TimeUnit.MILLISECONDS.toNanos(1); // rate per nanoseconds + } + + private void setCancellationRate(double cancellationRate) { + this.cancellationRate = cancellationRate; + if (listener.get() != null) { + listener.get().onCancellationRateChanged(); + } + } + + public double getCancellationBurst() { + return cancellationBurst; + } + + private void setCancellationBurst(double cancellationBurst) { + this.cancellationBurst = cancellationBurst; + if (listener.get() != null) { + listener.get().onCancellationBurstChanged(); + } + } +} diff --git a/server/src/main/java/org/opensearch/search/backpressure/settings/SearchShardTaskSettings.java b/server/src/main/java/org/opensearch/search/backpressure/settings/SearchShardTaskSettings.java new file mode 100644 index 0000000000000..1126dad78f554 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/backpressure/settings/SearchShardTaskSettings.java @@ -0,0 +1,160 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.backpressure.settings; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.monitor.jvm.JvmStats; + +/** + * Defines the settings related to the cancellation of SearchShardTasks. + * + * @opensearch.internal + */ +public class SearchShardTaskSettings { + private static final long HEAP_SIZE_BYTES = JvmStats.jvmStats().getMem().getHeapMax().getBytes(); + + private static class Defaults { + private static final double TOTAL_HEAP_THRESHOLD = 0.05; + private static final double HEAP_THRESHOLD = 0.005; + private static final double HEAP_VARIANCE_THRESHOLD = 2.0; + private static final long CPU_TIME_THRESHOLD = 15; + private static final long ELAPSED_TIME_THRESHOLD = 30000; + } + + /** + * Defines the heap usage threshold (in percentage) for the sum of heap usages across all search shard tasks + * before in-flight cancellation is applied. + */ + private volatile double totalHeapThreshold; + public static final Setting SETTING_TOTAL_HEAP_THRESHOLD = Setting.doubleSetting( + "search_backpressure.search_shard_task.total_heap_threshold", + Defaults.TOTAL_HEAP_THRESHOLD, + 0.0, + 1.0, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Defines the heap usage threshold (in percentage) for an individual task before it is considered for cancellation. + */ + private volatile double heapThreshold; + public static final Setting SETTING_HEAP_THRESHOLD = Setting.doubleSetting( + "search_backpressure.search_shard_task.heap_threshold", + Defaults.HEAP_THRESHOLD, + 0.0, + 1.0, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Defines the heap usage variance for an individual task before it is considered for cancellation. + * A task is considered for cancellation when taskHeapUsage is greater than or equal to heapUsageMovingAverage * variance. + */ + private volatile double heapVarianceThreshold; + public static final Setting SETTING_HEAP_VARIANCE_THRESHOLD = Setting.doubleSetting( + "search_backpressure.search_shard_task.heap_variance", + Defaults.HEAP_VARIANCE_THRESHOLD, + 0.0, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Defines the CPU usage threshold (in millis) for an individual task before it is considered for cancellation. + */ + private volatile long cpuTimeThreshold; + public static final Setting SETTING_CPU_TIME_THRESHOLD = Setting.longSetting( + "search_backpressure.search_shard_task.cpu_time_threshold", + Defaults.CPU_TIME_THRESHOLD, + 0, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Defines the elapsed time threshold (in millis) for an individual task before it is considered for cancellation. + */ + private volatile long elapsedTimeThreshold; + public static final Setting SETTING_ELAPSED_TIME_THRESHOLD = Setting.longSetting( + "search_backpressure.search_shard_task.elapsed_time_threshold", + Defaults.ELAPSED_TIME_THRESHOLD, + 0, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public SearchShardTaskSettings(Settings settings, ClusterSettings clusterSettings) { + totalHeapThreshold = SETTING_TOTAL_HEAP_THRESHOLD.get(settings); + clusterSettings.addSettingsUpdateConsumer(SETTING_TOTAL_HEAP_THRESHOLD, this::setTotalHeapThreshold); + + heapThreshold = SETTING_HEAP_THRESHOLD.get(settings); + clusterSettings.addSettingsUpdateConsumer(SETTING_HEAP_THRESHOLD, this::setHeapThreshold); + + heapVarianceThreshold = SETTING_HEAP_VARIANCE_THRESHOLD.get(settings); + clusterSettings.addSettingsUpdateConsumer(SETTING_HEAP_VARIANCE_THRESHOLD, this::setHeapVarianceThreshold); + + cpuTimeThreshold = SETTING_CPU_TIME_THRESHOLD.get(settings); + clusterSettings.addSettingsUpdateConsumer(SETTING_CPU_TIME_THRESHOLD, this::setCpuTimeThreshold); + + elapsedTimeThreshold = SETTING_ELAPSED_TIME_THRESHOLD.get(settings); + clusterSettings.addSettingsUpdateConsumer(SETTING_ELAPSED_TIME_THRESHOLD, this::setElapsedTimeThreshold); + } + + public double getTotalHeapThreshold() { + return totalHeapThreshold; + } + + public long getTotalHeapThresholdBytes() { + return (long) (HEAP_SIZE_BYTES * getTotalHeapThreshold()); + } + + private void setTotalHeapThreshold(double totalHeapThreshold) { + this.totalHeapThreshold = totalHeapThreshold; + } + + public double getHeapThreshold() { + return heapThreshold; + } + + public long getHeapThresholdBytes() { + return (long) (HEAP_SIZE_BYTES * getHeapThreshold()); + } + + private void setHeapThreshold(double heapThreshold) { + this.heapThreshold = heapThreshold; + } + + public double getHeapVarianceThreshold() { + return heapVarianceThreshold; + } + + private void setHeapVarianceThreshold(double heapVarianceThreshold) { + this.heapVarianceThreshold = heapVarianceThreshold; + } + + public long getCpuTimeThreshold() { + return cpuTimeThreshold; + } + + private void setCpuTimeThreshold(long cpuTimeThreshold) { + this.cpuTimeThreshold = cpuTimeThreshold; + } + + public long getElapsedTimeThreshold() { + return elapsedTimeThreshold; + } + + private void setElapsedTimeThreshold(long elapsedTimeThreshold) { + this.elapsedTimeThreshold = elapsedTimeThreshold; + } +} diff --git a/server/src/main/java/org/opensearch/search/backpressure/settings/package-info.java b/server/src/main/java/org/opensearch/search/backpressure/settings/package-info.java new file mode 100644 index 0000000000000..a853a139b096b --- /dev/null +++ b/server/src/main/java/org/opensearch/search/backpressure/settings/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains settings for search backpressure. + */ +package org.opensearch.search.backpressure.settings; diff --git a/server/src/main/java/org/opensearch/search/backpressure/trackers/NodeDuressTracker.java b/server/src/main/java/org/opensearch/search/backpressure/trackers/NodeDuressTracker.java new file mode 100644 index 0000000000000..8e35c724a8fef --- /dev/null +++ b/server/src/main/java/org/opensearch/search/backpressure/trackers/NodeDuressTracker.java @@ -0,0 +1,41 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.backpressure.trackers; + +import org.opensearch.common.util.Streak; + +import java.util.function.BooleanSupplier; + +/** + * NodeDuressTracker is used to check if the node is in duress. + * + * @opensearch.internal + */ +public class NodeDuressTracker { + /** + * Tracks the number of consecutive breaches. + */ + private final Streak breaches = new Streak(); + + /** + * Predicate that returns true when the node is in duress. + */ + private final BooleanSupplier isNodeInDuress; + + public NodeDuressTracker(BooleanSupplier isNodeInDuress) { + this.isNodeInDuress = isNodeInDuress; + } + + /** + * Evaluates the predicate and returns the number of consecutive breaches. + */ + public int check() { + return breaches.record(isNodeInDuress.getAsBoolean()); + } +} diff --git a/server/src/main/java/org/opensearch/search/backpressure/trackers/TaskResourceUsageTracker.java b/server/src/main/java/org/opensearch/search/backpressure/trackers/TaskResourceUsageTracker.java new file mode 100644 index 0000000000000..8f1842efa5771 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/backpressure/trackers/TaskResourceUsageTracker.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.backpressure.trackers; + +import org.opensearch.tasks.TaskCancellation; +import org.opensearch.tasks.Task; + +import java.util.Optional; +import java.util.concurrent.atomic.AtomicLong; + +/** + * TaskResourceUsageTracker is used to track completions and cancellations of search related tasks. + * + * @opensearch.internal + */ +public abstract class TaskResourceUsageTracker { + /** + * Counts the number of cancellations made due to this tracker. + */ + private final AtomicLong cancellations = new AtomicLong(); + + public long incrementCancellations() { + return cancellations.incrementAndGet(); + } + + public long getCancellations() { + return cancellations.get(); + } + + /** + * Returns a unique name for this tracker. + */ + public abstract String name(); + + /** + * Notifies the tracker to update its state when a task execution completes. + */ + public abstract void update(Task task); + + /** + * Returns the cancellation reason for the given task, if it's eligible for cancellation. + */ + public abstract Optional cancellationReason(Task task); +} diff --git a/server/src/main/java/org/opensearch/search/backpressure/trackers/package-info.java b/server/src/main/java/org/opensearch/search/backpressure/trackers/package-info.java new file mode 100644 index 0000000000000..da0532421391e --- /dev/null +++ b/server/src/main/java/org/opensearch/search/backpressure/trackers/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains trackers to check if resource usage limits are breached on a node or task level. + */ +package org.opensearch.search.backpressure.trackers; diff --git a/server/src/main/java/org/opensearch/tasks/CancellableTask.java b/server/src/main/java/org/opensearch/tasks/CancellableTask.java index 439be2b630e84..336f5b1f4c244 100644 --- a/server/src/main/java/org/opensearch/tasks/CancellableTask.java +++ b/server/src/main/java/org/opensearch/tasks/CancellableTask.java @@ -71,7 +71,7 @@ public CancellableTask( /** * This method is called by the task manager when this task is cancelled. */ - final void cancel(String reason) { + public void cancel(String reason) { assert reason != null; if (cancelled.compareAndSet(false, true)) { this.reason = reason; diff --git a/server/src/main/java/org/opensearch/tasks/TaskCancellation.java b/server/src/main/java/org/opensearch/tasks/TaskCancellation.java new file mode 100644 index 0000000000000..2c302172cf6bc --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/TaskCancellation.java @@ -0,0 +1,108 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.tasks; + +import org.opensearch.ExceptionsHelper; + +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; + +/** + * TaskCancellation is a wrapper for a task and its cancellation reasons. + * + * @opensearch.internal + */ +public class TaskCancellation implements Comparable { + private final CancellableTask task; + private final List reasons; + private final List onCancelCallbacks; + + public TaskCancellation(CancellableTask task, List reasons, List onCancelCallbacks) { + this.task = task; + this.reasons = reasons; + this.onCancelCallbacks = onCancelCallbacks; + } + + public CancellableTask getTask() { + return task; + } + + public List getReasons() { + return reasons; + } + + public String getReasonString() { + return reasons.stream().map(Reason::getMessage).collect(Collectors.joining(", ")); + } + + /** + * Cancels the task and invokes all onCancelCallbacks. + */ + public void cancel() { + if (isEligibleForCancellation() == false) { + return; + } + + task.cancel(getReasonString()); + + List exceptions = new ArrayList<>(); + for (Runnable callback : onCancelCallbacks) { + try { + callback.run(); + } catch (Exception e) { + exceptions.add(e); + } + } + ExceptionsHelper.maybeThrowRuntimeAndSuppress(exceptions); + } + + /** + * Returns the sum of all cancellation scores. + * + * A zero score indicates no reason to cancel the task. + * A task with a higher score suggests greater possibility of recovering the node when it is cancelled. + */ + public int totalCancellationScore() { + return reasons.stream().mapToInt(Reason::getCancellationScore).sum(); + } + + /** + * A task is eligible for cancellation if it has one or more cancellation reasons, and is not already cancelled. + */ + public boolean isEligibleForCancellation() { + return (task.isCancelled() == false) && (reasons.size() > 0); + } + + @Override + public int compareTo(TaskCancellation other) { + return Integer.compare(totalCancellationScore(), other.totalCancellationScore()); + } + + /** + * Represents the cancellation reason for a task. + */ + public static class Reason { + private final String message; + private final int cancellationScore; + + public Reason(String message, int cancellationScore) { + this.message = message; + this.cancellationScore = cancellationScore; + } + + public String getMessage() { + return message; + } + + public int getCancellationScore() { + return cancellationScore; + } + } +} diff --git a/server/src/main/java/org/opensearch/tasks/TaskResourceTrackingService.java b/server/src/main/java/org/opensearch/tasks/TaskResourceTrackingService.java index c3cad117390e4..b4806b531429e 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskResourceTrackingService.java +++ b/server/src/main/java/org/opensearch/tasks/TaskResourceTrackingService.java @@ -12,6 +12,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.ExceptionsHelper; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.inject.Inject; import org.opensearch.common.settings.ClusterSettings; @@ -50,6 +51,7 @@ public class TaskResourceTrackingService implements RunnableTaskExecutionListene private static final ThreadMXBean threadMXBean = (ThreadMXBean) ManagementFactory.getThreadMXBean(); private final ConcurrentMapLong resourceAwareTasks = ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency(); + private final List taskCompletionListeners = new ArrayList<>(); private final ThreadPool threadPool; private volatile boolean taskResourceTrackingEnabled; @@ -116,6 +118,16 @@ public void stopTracking(Task task) { } finally { resourceAwareTasks.remove(task.getId()); } + + List exceptions = new ArrayList<>(); + for (TaskCompletionListener listener : taskCompletionListeners) { + try { + listener.onTaskCompleted(task); + } catch (Exception e) { + exceptions.add(e); + } + } + ExceptionsHelper.maybeThrowRuntimeAndSuppress(exceptions); } /** @@ -245,4 +257,14 @@ private ThreadContext.StoredContext addTaskIdToThreadContext(Task task) { return storedContext; } + /** + * Listener that gets invoked when a task execution completes. + */ + public interface TaskCompletionListener { + void onTaskCompleted(Task task); + } + + public void addTaskCompletionListener(TaskCompletionListener listener) { + this.taskCompletionListeners.add(listener); + } } diff --git a/server/src/test/java/org/opensearch/common/StreakTests.java b/server/src/test/java/org/opensearch/common/StreakTests.java new file mode 100644 index 0000000000000..80080ad3e4027 --- /dev/null +++ b/server/src/test/java/org/opensearch/common/StreakTests.java @@ -0,0 +1,34 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common; + +import org.opensearch.common.util.Streak; +import org.opensearch.test.OpenSearchTestCase; + +public class StreakTests extends OpenSearchTestCase { + + public void testStreak() { + Streak streak = new Streak(); + + // Streak starts with zero. + assertEquals(0, streak.length()); + + // Streak increases on successive successful events. + streak.record(true); + assertEquals(1, streak.length()); + streak.record(true); + assertEquals(2, streak.length()); + streak.record(true); + assertEquals(3, streak.length()); + + // Streak resets to zero after an unsuccessful event. + streak.record(false); + assertEquals(0, streak.length()); + } +} diff --git a/server/src/test/java/org/opensearch/common/util/MovingAverageTests.java b/server/src/test/java/org/opensearch/common/util/MovingAverageTests.java new file mode 100644 index 0000000000000..415058992e081 --- /dev/null +++ b/server/src/test/java/org/opensearch/common/util/MovingAverageTests.java @@ -0,0 +1,49 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util; + +import org.opensearch.test.OpenSearchTestCase; + +public class MovingAverageTests extends OpenSearchTestCase { + + public void testMovingAverage() { + MovingAverage ma = new MovingAverage(5); + + // No observations + assertEquals(0.0, ma.getAverage(), 0.0); + assertEquals(0, ma.getCount()); + + // Not enough observations + ma.record(1); + ma.record(2); + ma.record(3); + assertEquals(2.0, ma.getAverage(), 0.0); + assertEquals(3, ma.getCount()); + assertFalse(ma.isReady()); + + // Enough observations + ma.record(4); + ma.record(5); + ma.record(6); + assertEquals(4, ma.getAverage(), 0.0); + assertEquals(6, ma.getCount()); + assertTrue(ma.isReady()); + } + + public void testMovingAverageWithZeroSize() { + try { + new MovingAverage(0); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("window size must be greater than zero")); + return; + } + + fail("exception should have been thrown"); + } +} diff --git a/server/src/test/java/org/opensearch/common/util/TokenBucketTests.java b/server/src/test/java/org/opensearch/common/util/TokenBucketTests.java new file mode 100644 index 0000000000000..a52e97cdd835c --- /dev/null +++ b/server/src/test/java/org/opensearch/common/util/TokenBucketTests.java @@ -0,0 +1,77 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util; + +import org.opensearch.test.OpenSearchTestCase; + +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.LongSupplier; + +public class TokenBucketTests extends OpenSearchTestCase { + + public void testTokenBucket() { + AtomicLong mockTimeNanos = new AtomicLong(); + LongSupplier mockTimeNanosSupplier = mockTimeNanos::get; + + // Token bucket that refills at 2 tokens/second and allows short bursts up to 3 operations. + TokenBucket tokenBucket = new TokenBucket(mockTimeNanosSupplier, 2.0 / TimeUnit.SECONDS.toNanos(1), 3); + + // Three operations succeed, fourth fails. + assertTrue(tokenBucket.request()); + assertTrue(tokenBucket.request()); + assertTrue(tokenBucket.request()); + assertFalse(tokenBucket.request()); + + // Clock moves ahead by one second. Two operations succeed, third fails. + mockTimeNanos.addAndGet(TimeUnit.SECONDS.toNanos(1)); + assertTrue(tokenBucket.request()); + assertTrue(tokenBucket.request()); + assertFalse(tokenBucket.request()); + + // Clock moves ahead by half a second. One operation succeeds, second fails. + mockTimeNanos.addAndGet(TimeUnit.MILLISECONDS.toNanos(500)); + assertTrue(tokenBucket.request()); + assertFalse(tokenBucket.request()); + + // Clock moves ahead by many seconds, but the token bucket should be capped at the 'burst' capacity. + mockTimeNanos.addAndGet(TimeUnit.SECONDS.toNanos(10)); + assertTrue(tokenBucket.request()); + assertTrue(tokenBucket.request()); + assertTrue(tokenBucket.request()); + assertFalse(tokenBucket.request()); + + // Ability to request fractional tokens. + mockTimeNanos.addAndGet(TimeUnit.MILLISECONDS.toNanos(250)); + assertFalse(tokenBucket.request(1.0)); + assertTrue(tokenBucket.request(0.5)); + } + + public void testTokenBucketWithInvalidRate() { + try { + new TokenBucket(System::nanoTime, -1, 2); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("rate must be greater than zero")); + return; + } + + fail("exception should have been thrown"); + } + + public void testTokenBucketWithInvalidBurst() { + try { + new TokenBucket(System::nanoTime, 1, 0); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("burst must be greater than zero")); + return; + } + + fail("exception should have been thrown"); + } +} diff --git a/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java b/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java new file mode 100644 index 0000000000000..ac5a8229718ba --- /dev/null +++ b/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java @@ -0,0 +1,213 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.backpressure; + +import org.opensearch.action.search.SearchShardTask; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.search.backpressure.settings.SearchBackpressureSettings; +import org.opensearch.search.backpressure.settings.SearchShardTaskSettings; +import org.opensearch.search.backpressure.trackers.NodeDuressTracker; +import org.opensearch.search.backpressure.trackers.TaskResourceUsageTracker; +import org.opensearch.tasks.CancellableTask; +import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskCancellation; +import org.opensearch.tasks.TaskResourceTrackingService; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ThreadPool; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.LongSupplier; + +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static org.opensearch.search.backpressure.SearchBackpressureTestHelpers.createMockTaskWithResourceStats; + +public class SearchBackpressureServiceTests extends OpenSearchTestCase { + + public void testIsNodeInDuress() { + TaskResourceTrackingService mockTaskResourceTrackingService = mock(TaskResourceTrackingService.class); + ThreadPool mockThreadPool = mock(ThreadPool.class); + + AtomicReference cpuUsage = new AtomicReference<>(); + AtomicReference heapUsage = new AtomicReference<>(); + NodeDuressTracker cpuUsageTracker = new NodeDuressTracker(() -> cpuUsage.get() >= 0.5); + NodeDuressTracker heapUsageTracker = new NodeDuressTracker(() -> heapUsage.get() >= 0.5); + + SearchBackpressureSettings settings = new SearchBackpressureSettings( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + + SearchBackpressureService service = new SearchBackpressureService( + settings, + mockTaskResourceTrackingService, + mockThreadPool, + System::nanoTime, + List.of(cpuUsageTracker, heapUsageTracker), + Collections.emptyList() + ); + + // Node not in duress. + cpuUsage.set(0.0); + heapUsage.set(0.0); + assertFalse(service.isNodeInDuress()); + + // Node in duress; but not for many consecutive data points. + cpuUsage.set(1.0); + heapUsage.set(1.0); + assertFalse(service.isNodeInDuress()); + + // Node in duress for consecutive data points. + assertFalse(service.isNodeInDuress()); + assertTrue(service.isNodeInDuress()); + + // Node not in duress anymore. + cpuUsage.set(0.0); + heapUsage.set(0.0); + assertFalse(service.isNodeInDuress()); + } + + public void testTrackerStateUpdateOnTaskCompletion() { + TaskResourceTrackingService mockTaskResourceTrackingService = mock(TaskResourceTrackingService.class); + ThreadPool mockThreadPool = mock(ThreadPool.class); + LongSupplier mockTimeNanosSupplier = () -> TimeUnit.SECONDS.toNanos(1234); + TaskResourceUsageTracker mockTaskResourceUsageTracker = mock(TaskResourceUsageTracker.class); + + SearchBackpressureSettings settings = new SearchBackpressureSettings( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + + SearchBackpressureService service = new SearchBackpressureService( + settings, + mockTaskResourceTrackingService, + mockThreadPool, + mockTimeNanosSupplier, + Collections.emptyList(), + List.of(mockTaskResourceUsageTracker) + ); + + // Record task completions to update the tracker state. Tasks other than SearchShardTask are ignored. + service.onTaskCompleted(createMockTaskWithResourceStats(CancellableTask.class, 100, 200)); + for (int i = 0; i < 100; i++) { + service.onTaskCompleted(createMockTaskWithResourceStats(SearchShardTask.class, 100, 200)); + } + assertEquals(100, service.getState().getCompletionCount()); + verify(mockTaskResourceUsageTracker, times(100)).update(any()); + } + + public void testInFlightCancellation() { + TaskResourceTrackingService mockTaskResourceTrackingService = mock(TaskResourceTrackingService.class); + ThreadPool mockThreadPool = mock(ThreadPool.class); + AtomicLong mockTime = new AtomicLong(0); + LongSupplier mockTimeNanosSupplier = mockTime::get; + NodeDuressTracker mockNodeDuressTracker = new NodeDuressTracker(() -> true); + + TaskResourceUsageTracker mockTaskResourceUsageTracker = new TaskResourceUsageTracker() { + @Override + public String name() { + return "mock_tracker"; + } + + @Override + public void update(Task task) {} + + @Override + public Optional cancellationReason(Task task) { + if (task.getTotalResourceStats().getCpuTimeInNanos() < 300) { + return Optional.empty(); + } + + return Optional.of(new TaskCancellation.Reason("limits exceeded", 5)); + } + }; + + // Mocking 'settings' with predictable rate limiting thresholds. + SearchBackpressureSettings settings = spy( + new SearchBackpressureSettings( + Settings.builder() + .put(SearchBackpressureSettings.SETTING_ENFORCED.getKey(), true) + .put(SearchBackpressureSettings.SETTING_CANCELLATION_RATIO.getKey(), 0.1) + .put(SearchBackpressureSettings.SETTING_CANCELLATION_RATE.getKey(), 0.003) + .put(SearchBackpressureSettings.SETTING_CANCELLATION_BURST.getKey(), 10.0) + .build(), + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ) + ); + + SearchBackpressureService service = new SearchBackpressureService( + settings, + mockTaskResourceTrackingService, + mockThreadPool, + mockTimeNanosSupplier, + List.of(mockNodeDuressTracker), + List.of(mockTaskResourceUsageTracker) + ); + + // Run two iterations so that node is marked 'in duress' from the third iteration onwards. + service.doRun(); + service.doRun(); + + // Mocking 'settings' with predictable searchHeapThresholdBytes so that cancellation logic doesn't get skipped. + long taskHeapUsageBytes = 500; + SearchShardTaskSettings shardTaskSettings = mock(SearchShardTaskSettings.class); + when(shardTaskSettings.getTotalHeapThresholdBytes()).thenReturn(taskHeapUsageBytes); + when(settings.getSearchShardTaskSettings()).thenReturn(shardTaskSettings); + + // Create a mix of low and high resource usage tasks (60 low + 15 high resource usage tasks). + Map activeTasks = new HashMap<>(); + for (long i = 0; i < 75; i++) { + if (i % 5 == 0) { + activeTasks.put(i, createMockTaskWithResourceStats(SearchShardTask.class, 500, taskHeapUsageBytes)); + } else { + activeTasks.put(i, createMockTaskWithResourceStats(SearchShardTask.class, 100, taskHeapUsageBytes)); + } + } + doReturn(activeTasks).when(mockTaskResourceTrackingService).getResourceAwareTasks(); + + // There are 15 tasks eligible for cancellation but only 10 will be cancelled (burst limit). + service.doRun(); + assertEquals(10, service.getState().getCancellationCount()); + assertEquals(1, service.getState().getLimitReachedCount()); + + // If the clock or completed task count haven't made sufficient progress, we'll continue to be rate-limited. + service.doRun(); + assertEquals(10, service.getState().getCancellationCount()); + assertEquals(2, service.getState().getLimitReachedCount()); + + // Simulate task completion to replenish some tokens. + // This will add 2 tokens (task count delta * cancellationRatio) to 'rateLimitPerTaskCompletion'. + for (int i = 0; i < 20; i++) { + service.onTaskCompleted(createMockTaskWithResourceStats(SearchShardTask.class, 100, taskHeapUsageBytes)); + } + service.doRun(); + assertEquals(12, service.getState().getCancellationCount()); + assertEquals(3, service.getState().getLimitReachedCount()); + + // Fast-forward the clock by one second to replenish some tokens. + // This will add 3 tokens (time delta * rate) to 'rateLimitPerTime'. + mockTime.addAndGet(TimeUnit.SECONDS.toNanos(1)); + service.doRun(); + assertEquals(15, service.getState().getCancellationCount()); + assertEquals(3, service.getState().getLimitReachedCount()); // no more tasks to cancel; limit not reached + } +} diff --git a/server/src/test/java/org/opensearch/search/backpressure/trackers/NodeDuressTrackerTests.java b/server/src/test/java/org/opensearch/search/backpressure/trackers/NodeDuressTrackerTests.java new file mode 100644 index 0000000000000..472ba95566523 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/backpressure/trackers/NodeDuressTrackerTests.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.backpressure.trackers; + +import org.opensearch.test.OpenSearchTestCase; + +import java.util.concurrent.atomic.AtomicReference; + +public class NodeDuressTrackerTests extends OpenSearchTestCase { + + public void testNodeDuressTracker() { + AtomicReference cpuUsage = new AtomicReference<>(0.0); + NodeDuressTracker tracker = new NodeDuressTracker(() -> cpuUsage.get() >= 0.5); + + // Node not in duress. + assertEquals(0, tracker.check()); + + // Node in duress; the streak must keep increasing. + cpuUsage.set(0.7); + assertEquals(1, tracker.check()); + assertEquals(2, tracker.check()); + assertEquals(3, tracker.check()); + + // Node not in duress anymore. + cpuUsage.set(0.3); + assertEquals(0, tracker.check()); + assertEquals(0, tracker.check()); + } +} diff --git a/server/src/test/java/org/opensearch/tasks/TaskCancellationTests.java b/server/src/test/java/org/opensearch/tasks/TaskCancellationTests.java new file mode 100644 index 0000000000000..d6a82702e074d --- /dev/null +++ b/server/src/test/java/org/opensearch/tasks/TaskCancellationTests.java @@ -0,0 +1,72 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.tasks; + +import org.opensearch.action.search.SearchShardTask; +import org.opensearch.search.backpressure.trackers.TaskResourceUsageTracker; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Optional; + +public class TaskCancellationTests extends OpenSearchTestCase { + + public void testTaskCancellation() { + SearchShardTask mockTask = new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()); + + TaskResourceUsageTracker mockTracker1 = createMockTaskResourceUsageTracker("mock_tracker_1"); + TaskResourceUsageTracker mockTracker2 = createMockTaskResourceUsageTracker("mock_tracker_2"); + TaskResourceUsageTracker mockTracker3 = createMockTaskResourceUsageTracker("mock_tracker_3"); + + List reasons = new ArrayList<>(); + List callbacks = List.of(mockTracker1::incrementCancellations, mockTracker2::incrementCancellations); + TaskCancellation taskCancellation = new TaskCancellation(mockTask, reasons, callbacks); + + // Task does not have any reason to be cancelled. + assertEquals(0, taskCancellation.totalCancellationScore()); + assertFalse(taskCancellation.isEligibleForCancellation()); + taskCancellation.cancel(); + assertEquals(0, mockTracker1.getCancellations()); + assertEquals(0, mockTracker2.getCancellations()); + assertEquals(0, mockTracker3.getCancellations()); + + // Task has one or more reasons to be cancelled. + reasons.add(new TaskCancellation.Reason("limits exceeded 1", 10)); + reasons.add(new TaskCancellation.Reason("limits exceeded 2", 20)); + reasons.add(new TaskCancellation.Reason("limits exceeded 3", 5)); + assertEquals(35, taskCancellation.totalCancellationScore()); + assertTrue(taskCancellation.isEligibleForCancellation()); + + // Cancel the task and validate the cancellation reason and invocation of callbacks. + taskCancellation.cancel(); + assertTrue(mockTask.getReasonCancelled().contains("limits exceeded 1, limits exceeded 2, limits exceeded 3")); + assertEquals(1, mockTracker1.getCancellations()); + assertEquals(1, mockTracker2.getCancellations()); + assertEquals(0, mockTracker3.getCancellations()); + } + + private static TaskResourceUsageTracker createMockTaskResourceUsageTracker(String name) { + return new TaskResourceUsageTracker() { + @Override + public String name() { + return name; + } + + @Override + public void update(Task task) {} + + @Override + public Optional cancellationReason(Task task) { + return Optional.empty(); + } + }; + } +} diff --git a/test/framework/src/main/java/org/opensearch/search/backpressure/SearchBackpressureTestHelpers.java b/test/framework/src/main/java/org/opensearch/search/backpressure/SearchBackpressureTestHelpers.java new file mode 100644 index 0000000000000..ba3653d0b4a84 --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/search/backpressure/SearchBackpressureTestHelpers.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.backpressure; + +import org.opensearch.tasks.CancellableTask; +import org.opensearch.tasks.TaskResourceUsage; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SearchBackpressureTestHelpers extends OpenSearchTestCase { + + public static T createMockTaskWithResourceStats(Class type, long cpuUsage, long heapUsage) { + return createMockTaskWithResourceStats(type, cpuUsage, heapUsage, 0); + } + + public static T createMockTaskWithResourceStats( + Class type, + long cpuUsage, + long heapUsage, + long startTimeNanos + ) { + T task = mock(type); + when(task.getTotalResourceStats()).thenReturn(new TaskResourceUsage(cpuUsage, heapUsage)); + when(task.getStartTimeNanos()).thenReturn(startTimeNanos); + + AtomicBoolean isCancelled = new AtomicBoolean(false); + doAnswer(invocation -> { + isCancelled.set(true); + return null; + }).when(task).cancel(anyString()); + doAnswer(invocation -> isCancelled.get()).when(task).isCancelled(); + + return task; + } +} From f1995b951abab34935bf8e5dee91097efbf5503e Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Fri, 14 Oct 2022 09:37:02 -0700 Subject: [PATCH 28/39] Bump Tika from 2.4.0 to 2.5.0 addressing CVE-2022-33879. (#4791) * Bump Tika from 2.4.0 to 2.5.0 addressing CVE-2022-33879. Signed-off-by: Marc Handalian * Add missing SHAs. Signed-off-by: Marc Handalian * Update changelog with PR info. Signed-off-by: Marc Handalian Signed-off-by: Marc Handalian --- CHANGELOG.md | 1 + plugins/ingest-attachment/build.gradle | 2 +- plugins/ingest-attachment/licenses/tika-core-2.4.0.jar.sha1 | 1 - plugins/ingest-attachment/licenses/tika-core-2.5.0.jar.sha1 | 1 + .../licenses/tika-langdetect-optimaize-2.4.0.jar.sha1 | 1 - .../licenses/tika-langdetect-optimaize-2.5.0.jar.sha1 | 1 + .../licenses/tika-parsers-standard-package-2.4.0.jar.sha1 | 1 - .../licenses/tika-parsers-standard-package-2.5.0.jar.sha1 | 1 + 8 files changed, 5 insertions(+), 4 deletions(-) delete mode 100644 plugins/ingest-attachment/licenses/tika-core-2.4.0.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/tika-core-2.5.0.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.4.0.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.5.0.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.4.0.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.5.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 15dedbde22d41..cfbbe01ae8494 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -57,6 +57,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Bumps `hadoop-hdfs` from 3.3.3 to 3.3.4 ([#4644](https://github.com/opensearch-project/OpenSearch/pull/4644)) - Bumps `jna` from 5.11.0 to 5.12.1 ([#4656](https://github.com/opensearch-project/OpenSearch/pull/4656)) - Update Jackson Databind to 2.13.4.2 (addressing CVE-2022-42003) ([#4779](https://github.com/opensearch-project/OpenSearch/pull/4779)) +- Bumps `tika` from 2.4.0 to 2.5.0 ([#4791](https://github.com/opensearch-project/OpenSearch/pull/4791)) ### Changed - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) - Use RemoteSegmentStoreDirectory instead of RemoteDirectory ([#4240](https://github.com/opensearch-project/OpenSearch/pull/4240)) diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 8f952f7619ac1..7bf67769cda10 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -38,7 +38,7 @@ opensearchplugin { } versions << [ - 'tika' : '2.4.0', + 'tika' : '2.5.0', 'pdfbox': '2.0.25', 'poi' : '5.2.2', 'mime4j': '0.8.3' diff --git a/plugins/ingest-attachment/licenses/tika-core-2.4.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-core-2.4.0.jar.sha1 deleted file mode 100644 index 373b7ec63138a..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-core-2.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -97b2454943127857a8304319be658d6d7ff4fff1 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-core-2.5.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-core-2.5.0.jar.sha1 new file mode 100644 index 0000000000000..419f01c631375 --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-core-2.5.0.jar.sha1 @@ -0,0 +1 @@ +7f9f35e4827726b062ac2b0ad0fd361837a50ac9 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.4.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.4.0.jar.sha1 deleted file mode 100644 index cf724f4ee1de4..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -57901d6088b0e34999e25af6b363ccec959b5e61 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.5.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.5.0.jar.sha1 new file mode 100644 index 0000000000000..a9e47ff8a8a86 --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.5.0.jar.sha1 @@ -0,0 +1 @@ +649574dca8f19d991ac25894c40284446dc5cf50 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.4.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.4.0.jar.sha1 deleted file mode 100644 index ec03a055a6f6d..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -83522360364a93e819eaec74f393bc56ed1d466a \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.5.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.5.0.jar.sha1 new file mode 100644 index 0000000000000..d648183868034 --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.5.0.jar.sha1 @@ -0,0 +1 @@ +2b9268511c34d8a1098f0565438cb8077fcf845d \ No newline at end of file From e44158d4d10d4f8905895ffa50bf9398b8550667 Mon Sep 17 00:00:00 2001 From: Leonidas Spyropoulos Date: Fri, 14 Oct 2022 17:52:59 +0100 Subject: [PATCH 29/39] Apply reproducible builds to plugins (#4746) As per gradle [docs] add support to remove timestamps and package with same order which is required from [reproducible] builds This is a result of the discussion in https://github.com/opensearch-project/opensearch-plugin-template-java/pull/38 to apply for all plugins going forward. [docs]: https://docs.gradle.org/current/userguide/working_with_files.html#sec:reproducible_archives [reproducible]: https://reproducible-builds.org/ Signed-off-by: Leonidas Spyropoulos Signed-off-by: Leonidas Spyropoulos Co-authored-by: Daniel (dB.) Doubrovkine --- CHANGELOG.md | 1 + .../org/opensearch/gradle/plugin/PluginBuildPlugin.groovy | 8 +++++++- .../opensearch/gradle/plugin/PluginBuildPluginTests.java | 5 +++++ 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cfbbe01ae8494..963ccdcdc9a6b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,6 +32,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Introduce experimental searchable snapshot API ([#4680](https://github.com/opensearch-project/OpenSearch/pull/4680)) - Recommissioning of zone. REST layer support. ([#4624](https://github.com/opensearch-project/OpenSearch/pull/4604)) - Added in-flight cancellation of SearchShardTask based on resource consumption ([#4565](https://github.com/opensearch-project/OpenSearch/pull/4565)) +- Apply reproducible builds configuration for OpenSearch plugins through gradle plugin ([#4746](https://github.com/opensearch-project/OpenSearch/pull/4746)) ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 diff --git a/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy index 31677965ab0d3..b7c78991a0da3 100644 --- a/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy @@ -29,13 +29,13 @@ package org.opensearch.gradle.plugin import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin +import org.gradle.api.tasks.bundling.AbstractArchiveTask import org.opensearch.gradle.BuildPlugin import org.opensearch.gradle.NoticeTask import org.opensearch.gradle.Version import org.opensearch.gradle.VersionProperties import org.opensearch.gradle.dependencies.CompileOnlyResolvePlugin import org.opensearch.gradle.info.BuildParams -import org.opensearch.gradle.plugin.PluginPropertiesExtension import org.opensearch.gradle.test.RestTestBasePlugin import org.opensearch.gradle.testclusters.RunTask import org.opensearch.gradle.util.Util @@ -134,6 +134,12 @@ class PluginBuildPlugin implements Plugin { } project.configurations.getByName('default') .extendsFrom(project.configurations.getByName('runtimeClasspath')) + project.tasks.withType(AbstractArchiveTask.class).configureEach { task -> + // ignore file timestamps + // be consistent in archive file order + task.preserveFileTimestamps = false + task.reproducibleFileOrder = true + } // allow running ES with this plugin in the foreground of a build project.tasks.register('run', RunTask) { dependsOn(project.tasks.bundlePlugin) diff --git a/buildSrc/src/test/java/org/opensearch/gradle/plugin/PluginBuildPluginTests.java b/buildSrc/src/test/java/org/opensearch/gradle/plugin/PluginBuildPluginTests.java index 9ed0e3e494992..8772a9fbd65ee 100644 --- a/buildSrc/src/test/java/org/opensearch/gradle/plugin/PluginBuildPluginTests.java +++ b/buildSrc/src/test/java/org/opensearch/gradle/plugin/PluginBuildPluginTests.java @@ -31,6 +31,7 @@ package org.opensearch.gradle.plugin; +import org.gradle.api.tasks.bundling.AbstractArchiveTask; import org.opensearch.gradle.BwcVersions; import org.opensearch.gradle.test.GradleUnitTestCase; import org.gradle.api.Project; @@ -64,6 +65,10 @@ public void testApply() { assertNotNull("plugin extensions has the right type", project.getExtensions().findByType(PluginPropertiesExtension.class)); assertNull("plugin should not create the integTest task", project.getTasks().findByName("integTest")); + project.getTasks().withType(AbstractArchiveTask.class).forEach(t -> { + assertFalse(String.format("task '%s' should not preserve timestamps", t.getName()), t.isPreserveFileTimestamps()); + assertTrue(String.format("task '%s' should have reproducible file order", t.getName()), t.isReproducibleFileOrder()); + }); } @Ignore("https://github.com/elastic/elasticsearch/issues/47123") From 6ac0c7cd2054da9a6492c76011d1dcd2e0f518b8 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Mon, 17 Oct 2022 14:51:56 -0700 Subject: [PATCH 30/39] Fix recovery path for searchable snapshots (#4813) Replicas are bootstrapped using the recovery path, as opposed to the restore path used for creating the primary shard. This has been broken in the initial implementation of searchable snapshots. The fix here is to put in the appropriate checks to avoid failing during recovery. I've also updated the integration test to ensure the recovery path is always exercised during testing. Signed-off-by: Andrew Ross Signed-off-by: Andrew Ross --- CHANGELOG.md | 1 + .../opensearch/snapshots/SearchableSnapshotIT.java | 13 +++++++++++-- .../indices/recovery/PeerRecoveryTargetService.java | 9 ++++++--- .../opensearch/indices/recovery/RecoveryTarget.java | 11 +++++++---- 4 files changed, 25 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 963ccdcdc9a6b..25efcc838553a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -137,6 +137,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fixing Gradle warnings associated with publishPluginZipPublicationToXxx tasks ([#4696](https://github.com/opensearch-project/OpenSearch/pull/4696)) - Fixed randomly failing test ([4774](https://github.com/opensearch-project/OpenSearch/pull/4774)) - Update version check after backport ([4786](https://github.com/opensearch-project/OpenSearch/pull/4786)) +- Fix recovery path for searchable snapshots ([4813](https://github.com/opensearch-project/OpenSearch/pull/4813)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java index 96fcf0053c9ab..0ab025bb575cc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java @@ -49,15 +49,24 @@ protected boolean addMockInternalEngine() { } public void testCreateSearchableSnapshot() throws Exception { + final int numReplicasIndex1 = randomIntBetween(1, 4); + final int numReplicasIndex2 = randomIntBetween(0, 2); + internalCluster().ensureAtLeastNumDataNodes(Math.max(numReplicasIndex1, numReplicasIndex2) + 1); final Client client = client(); createRepository("test-repo", "fs"); createIndex( "test-idx-1", - Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0").put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1").build() + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, Integer.toString(numReplicasIndex1)) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") + .build() ); createIndex( "test-idx-2", - Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0").put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1").build() + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, Integer.toString(numReplicasIndex2)) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") + .build() ); ensureGreen(); indexRandomDocs("test-idx-1", 100); diff --git a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java index b5702431ed4bf..556e4db3400e1 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java @@ -54,6 +54,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.CancellableThreads; import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.index.IndexModule; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.engine.RecoveryEngineException; import org.opensearch.index.mapper.MapperException; @@ -244,8 +245,10 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi assert recoveryTarget.sourceNode() != null : "can not do a recovery without a source node"; logger.trace("{} preparing shard for peer recovery", recoveryTarget.shardId()); indexShard.prepareForIndexRecovery(); - boolean remoteTranslogEnabled = recoveryTarget.state().getPrimary() == false && indexShard.isRemoteTranslogEnabled(); - final long startingSeqNo = indexShard.recoverLocallyAndFetchStartSeqNo(!remoteTranslogEnabled); + final boolean hasRemoteTranslog = recoveryTarget.state().getPrimary() == false && indexShard.isRemoteTranslogEnabled(); + final boolean hasNoTranslog = IndexModule.Type.REMOTE_SNAPSHOT.match(indexShard.indexSettings()); + final boolean verifyTranslog = (hasRemoteTranslog || hasNoTranslog) == false; + final long startingSeqNo = indexShard.recoverLocallyAndFetchStartSeqNo(!hasRemoteTranslog); assert startingSeqNo == UNASSIGNED_SEQ_NO || recoveryTarget.state().getStage() == RecoveryState.Stage.TRANSLOG : "unexpected recovery stage [" + recoveryTarget.state().getStage() + "] starting seqno [ " + startingSeqNo + "]"; startRequest = getStartRecoveryRequest( @@ -253,7 +256,7 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi clusterService.localNode(), recoveryTarget, startingSeqNo, - !remoteTranslogEnabled + verifyTranslog ); requestToSend = startRequest; actionName = PeerRecoverySourceService.Actions.START_RECOVERY; diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java index c1e29e0d866d8..b6122dbeeea09 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java @@ -44,6 +44,7 @@ import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.util.CancellableThreads; +import org.opensearch.index.IndexModule; import org.opensearch.index.engine.Engine; import org.opensearch.index.mapper.MapperException; import org.opensearch.index.seqno.ReplicationTracker; @@ -355,10 +356,12 @@ public void cleanFiles( try { store.cleanupAndVerify("recovery CleanFilesRequestHandler", sourceMetadata); - // If Segment Replication is enabled, we need to reuse the primary's translog UUID already stored in the index. - // With Segrep, replicas should never create their own commit points. This ensures the index and xlog share the same - // UUID without the extra step to associate the index with a new xlog. - if (indexShard.indexSettings().isSegRepEnabled()) { + // Replicas for segment replication or remote snapshot indices do not create + // their own commit points and therefore do not modify the commit user data + // in their store. In these cases, reuse the primary's translog UUID. + final boolean reuseTranslogUUID = indexShard.indexSettings().isSegRepEnabled() + || IndexModule.Type.REMOTE_SNAPSHOT.match(indexShard.indexSettings()); + if (reuseTranslogUUID) { final String translogUUID = store.getMetadata().getCommitUserData().get(TRANSLOG_UUID_KEY); Translog.createEmptyTranslog( indexShard.shardPath().resolveTranslog(), From 1d654851ff2ea50b0fbb07b7602945f3a0e4cc88 Mon Sep 17 00:00:00 2001 From: Kunal Kotwani Date: Mon, 17 Oct 2022 16:20:47 -0700 Subject: [PATCH 31/39] Refactor BalancedAllocator.Balancer to LocalShardsBalancer (#4761) * Refactor BalancedAllocator.Balancer to LocalShardsBalancer Signed-off-by: Kunal Kotwani * Deprecate Balancer to maintain BWC Signed-off-by: Kunal Kotwani Signed-off-by: Kunal Kotwani --- CHANGELOG.md | 1 + .../allocation/AllocationConstraints.java | 7 +- .../allocator/BalancedShardsAllocator.java | 967 +----------------- .../allocator/LocalShardsBalancer.java | 967 ++++++++++++++++++ .../allocation/allocator/ShardsBalancer.java | 75 ++ .../AllocationConstraintsTests.java | 4 +- .../allocation/BalancedSingleShardTests.java | 4 +- 7 files changed, 1083 insertions(+), 942 deletions(-) create mode 100644 server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java create mode 100644 server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/ShardsBalancer.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 25efcc838553a..4f6e5425f37db 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -86,6 +86,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fix weighted routing metadata deserialization error on process restart ([#4691](https://github.com/opensearch-project/OpenSearch/pull/4691)) - Refactor Base Action class javadocs to OpenSearch.API ([#4732](https://github.com/opensearch-project/OpenSearch/pull/4732)) - Migrate client transports to Apache HttpClient / Core 5.x ([#4459](https://github.com/opensearch-project/OpenSearch/pull/4459)) +- Refactored BalancedAllocator.Balancer to LocalShardsBalancer ([#4761](https://github.com/opensearch-project/OpenSearch/pull/4761)) ### Deprecated ### Removed - Remove deprecated code to add node name into log pattern of log4j property file ([#4568](https://github.com/opensearch-project/OpenSearch/pull/4568)) diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationConstraints.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationConstraints.java index 8c2c85ce107a6..3d9847ca35931 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationConstraints.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationConstraints.java @@ -6,6 +6,7 @@ package org.opensearch.cluster.routing.allocation; import org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; +import org.opensearch.cluster.routing.allocation.allocator.ShardsBalancer; import java.util.ArrayList; import java.util.List; @@ -27,11 +28,11 @@ public AllocationConstraints() { } class ConstraintParams { - private BalancedShardsAllocator.Balancer balancer; + private ShardsBalancer balancer; private BalancedShardsAllocator.ModelNode node; private String index; - ConstraintParams(BalancedShardsAllocator.Balancer balancer, BalancedShardsAllocator.ModelNode node, String index) { + ConstraintParams(ShardsBalancer balancer, BalancedShardsAllocator.ModelNode node, String index) { this.balancer = balancer; this.node = node; this.index = index; @@ -50,7 +51,7 @@ class ConstraintParams { * This weight function is used only in case of unassigned shards to avoid overloading a newly added node. * Weight calculation in other scenarios like shard movement and re-balancing remain unaffected by this function. */ - public long weight(BalancedShardsAllocator.Balancer balancer, BalancedShardsAllocator.ModelNode node, String index) { + public long weight(ShardsBalancer balancer, BalancedShardsAllocator.ModelNode node, String index) { int constraintsBreached = 0; ConstraintParams params = new ConstraintParams(balancer, node, index); for (Predicate predicate : constraintPredicates) { diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 181910e3ac1c4..42c8f7987bf3d 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -34,47 +34,28 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.IntroSorter; -import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.RoutingNodes; import org.opensearch.cluster.routing.ShardRouting; -import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.routing.UnassignedInfo.AllocationStatus; import org.opensearch.cluster.routing.allocation.AllocateUnassignedDecision; import org.opensearch.cluster.routing.allocation.AllocationConstraints; -import org.opensearch.cluster.routing.allocation.AllocationDecision; import org.opensearch.cluster.routing.allocation.MoveDecision; -import org.opensearch.cluster.routing.allocation.NodeAllocationResult; import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.cluster.routing.allocation.ShardAllocationDecision; -import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; -import org.opensearch.cluster.routing.allocation.decider.Decision; -import org.opensearch.cluster.routing.allocation.decider.Decision.Type; -import org.opensearch.cluster.routing.allocation.decider.DiskThresholdDecider; -import org.opensearch.common.collect.Tuple; import org.opensearch.common.inject.Inject; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; -import org.opensearch.gateway.PriorityComparator; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; -import java.util.List; import java.util.Map; import java.util.Set; -import java.util.stream.StreamSupport; - -import static org.opensearch.cluster.routing.ShardRoutingState.RELOCATING; /** * The {@link BalancedShardsAllocator} re-balances the nodes allocations @@ -160,23 +141,23 @@ public void allocate(RoutingAllocation allocation) { failAllocationOfNewPrimaries(allocation); return; } - final Balancer balancer = new Balancer(logger, allocation, movePrimaryFirst, weightFunction, threshold); - balancer.allocateUnassigned(); - balancer.moveShards(); - balancer.balance(); + final ShardsBalancer localShardsBalancer = new LocalShardsBalancer(logger, allocation, movePrimaryFirst, weightFunction, threshold); + localShardsBalancer.allocateUnassigned(); + localShardsBalancer.moveShards(); + localShardsBalancer.balance(); } @Override public ShardAllocationDecision decideShardAllocation(final ShardRouting shard, final RoutingAllocation allocation) { - Balancer balancer = new Balancer(logger, allocation, movePrimaryFirst, weightFunction, threshold); + ShardsBalancer localShardsBalancer = new LocalShardsBalancer(logger, allocation, movePrimaryFirst, weightFunction, threshold); AllocateUnassignedDecision allocateUnassignedDecision = AllocateUnassignedDecision.NOT_TAKEN; MoveDecision moveDecision = MoveDecision.NOT_TAKEN; if (shard.unassigned()) { - allocateUnassignedDecision = balancer.decideAllocateUnassigned(shard); + allocateUnassignedDecision = localShardsBalancer.decideAllocateUnassigned(shard); } else { - moveDecision = balancer.decideMove(shard); + moveDecision = localShardsBalancer.decideMove(shard); if (moveDecision.isDecisionTaken() && moveDecision.canRemain()) { - MoveDecision rebalanceDecision = balancer.decideRebalance(shard); + MoveDecision rebalanceDecision = localShardsBalancer.decideRebalance(shard); moveDecision = rebalanceDecision.withRemainDecision(moveDecision.getCanRemainDecision()); } } @@ -277,923 +258,18 @@ static class WeightFunction { this.constraints = new AllocationConstraints(); } - public float weightWithAllocationConstraints(Balancer balancer, ModelNode node, String index) { + public float weightWithAllocationConstraints(ShardsBalancer balancer, ModelNode node, String index) { float balancerWeight = weight(balancer, node, index); return balancerWeight + constraints.weight(balancer, node, index); } - float weight(Balancer balancer, ModelNode node, String index) { + float weight(ShardsBalancer balancer, ModelNode node, String index) { final float weightShard = node.numShards() - balancer.avgShardsPerNode(); final float weightIndex = node.numShards(index) - balancer.avgShardsPerNode(index); return theta0 * weightShard + theta1 * weightIndex; } } - /** - * A {@link Balancer} - * - * @opensearch.internal - */ - public static class Balancer { - private final Logger logger; - private final Map nodes; - private final RoutingAllocation allocation; - private final RoutingNodes routingNodes; - private final boolean movePrimaryFirst; - private final WeightFunction weight; - - private final float threshold; - private final Metadata metadata; - private final float avgShardsPerNode; - private final NodeSorter sorter; - private final Set inEligibleTargetNode; - - public Balancer(Logger logger, RoutingAllocation allocation, boolean movePrimaryFirst, WeightFunction weight, float threshold) { - this.logger = logger; - this.allocation = allocation; - this.movePrimaryFirst = movePrimaryFirst; - this.weight = weight; - this.threshold = threshold; - this.routingNodes = allocation.routingNodes(); - this.metadata = allocation.metadata(); - avgShardsPerNode = ((float) metadata.getTotalNumberOfShards()) / routingNodes.size(); - nodes = Collections.unmodifiableMap(buildModelFromAssigned()); - sorter = newNodeSorter(); - inEligibleTargetNode = new HashSet<>(); - } - - /** - * Returns an array view on the nodes in the balancer. Nodes should not be removed from this list. - */ - private ModelNode[] nodesArray() { - return nodes.values().toArray(new ModelNode[nodes.size()]); - } - - /** - * Returns the average of shards per node for the given index - */ - public float avgShardsPerNode(String index) { - return ((float) metadata.index(index).getTotalNumberOfShards()) / nodes.size(); - } - - /** - * Returns the global average of shards per node - */ - public float avgShardsPerNode() { - return avgShardsPerNode; - } - - /** - * Returns a new {@link NodeSorter} that sorts the nodes based on their - * current weight with respect to the index passed to the sorter. The - * returned sorter is not sorted. Use {@link NodeSorter#reset(String)} - * to sort based on an index. - */ - private NodeSorter newNodeSorter() { - return new NodeSorter(nodesArray(), weight, this); - } - - /** - * The absolute value difference between two weights. - */ - private static float absDelta(float lower, float higher) { - assert higher >= lower : higher + " lt " + lower + " but was expected to be gte"; - return Math.abs(higher - lower); - } - - /** - * Returns {@code true} iff the weight delta between two nodes is under a defined threshold. - * See {@link #THRESHOLD_SETTING} for defining the threshold. - */ - private static boolean lessThan(float delta, float threshold) { - /* deltas close to the threshold are "rounded" to the threshold manually - to prevent floating point problems if the delta is very close to the - threshold ie. 1.000000002 which can trigger unnecessary balance actions*/ - return delta <= (threshold + 0.001f); - } - - /** - * Balances the nodes on the cluster model according to the weight function. - * The actual balancing is delegated to {@link #balanceByWeights()} - */ - private void balance() { - if (logger.isTraceEnabled()) { - logger.trace("Start balancing cluster"); - } - if (allocation.hasPendingAsyncFetch()) { - /* - * see https://github.com/elastic/elasticsearch/issues/14387 - * if we allow rebalance operations while we are still fetching shard store data - * we might end up with unnecessary rebalance operations which can be super confusion/frustrating - * since once the fetches come back we might just move all the shards back again. - * Therefore we only do a rebalance if we have fetched all information. - */ - logger.debug("skipping rebalance due to in-flight shard/store fetches"); - return; - } - if (allocation.deciders().canRebalance(allocation).type() != Type.YES) { - logger.trace("skipping rebalance as it is disabled"); - return; - } - if (nodes.size() < 2) { /* skip if we only have one node */ - logger.trace("skipping rebalance as single node only"); - return; - } - balanceByWeights(); - } - - /** - * Makes a decision about moving a single shard to a different node to form a more - * optimally balanced cluster. This method is invoked from the cluster allocation - * explain API only. - */ - private MoveDecision decideRebalance(final ShardRouting shard) { - if (shard.started() == false) { - // we can only rebalance started shards - return MoveDecision.NOT_TAKEN; - } - - Decision canRebalance = allocation.deciders().canRebalance(shard, allocation); - - sorter.reset(shard.getIndexName()); - ModelNode[] modelNodes = sorter.modelNodes; - final String currentNodeId = shard.currentNodeId(); - // find currently assigned node - ModelNode currentNode = null; - for (ModelNode node : modelNodes) { - if (node.getNodeId().equals(currentNodeId)) { - currentNode = node; - break; - } - } - assert currentNode != null : "currently assigned node could not be found"; - - // balance the shard, if a better node can be found - final String idxName = shard.getIndexName(); - final float currentWeight = weight.weight(this, currentNode, idxName); - final AllocationDeciders deciders = allocation.deciders(); - Type rebalanceDecisionType = Type.NO; - ModelNode assignedNode = null; - List> betterBalanceNodes = new ArrayList<>(); - List> sameBalanceNodes = new ArrayList<>(); - List> worseBalanceNodes = new ArrayList<>(); - for (ModelNode node : modelNodes) { - if (node == currentNode) { - continue; // skip over node we're currently allocated to - } - final Decision canAllocate = deciders.canAllocate(shard, node.getRoutingNode(), allocation); - // the current weight of the node in the cluster, as computed by the weight function; - // this is a comparison of the number of shards on this node to the number of shards - // that should be on each node on average (both taking the cluster as a whole into account - // as well as shards per index) - final float nodeWeight = weight.weight(this, node, idxName); - // if the node we are examining has a worse (higher) weight than the node the shard is - // assigned to, then there is no way moving the shard to the node with the worse weight - // can make the balance of the cluster better, so we check for that here - final boolean betterWeightThanCurrent = nodeWeight <= currentWeight; - boolean rebalanceConditionsMet = false; - if (betterWeightThanCurrent) { - // get the delta between the weights of the node we are checking and the node that holds the shard - float currentDelta = absDelta(nodeWeight, currentWeight); - // checks if the weight delta is above a certain threshold; if it is not above a certain threshold, - // then even though the node we are examining has a better weight and may make the cluster balance - // more even, it doesn't make sense to execute the heavyweight operation of relocating a shard unless - // the gains make it worth it, as defined by the threshold - boolean deltaAboveThreshold = lessThan(currentDelta, threshold) == false; - // calculate the delta of the weights of the two nodes if we were to add the shard to the - // node in question and move it away from the node that currently holds it. - // hence we add 2.0f to the weight delta - float proposedDelta = 2.0f + nodeWeight - currentWeight; - boolean betterWeightWithShardAdded = proposedDelta < currentDelta; - - rebalanceConditionsMet = deltaAboveThreshold && betterWeightWithShardAdded; - // if the simulated weight delta with the shard moved away is better than the weight delta - // with the shard remaining on the current node, and we are allowed to allocate to the - // node in question, then allow the rebalance - if (rebalanceConditionsMet && canAllocate.type().higherThan(rebalanceDecisionType)) { - // rebalance to the node, only will get overwritten if the decision here is to - // THROTTLE and we get a decision with YES on another node - rebalanceDecisionType = canAllocate.type(); - assignedNode = node; - } - } - Tuple nodeResult = Tuple.tuple(node, canAllocate); - if (rebalanceConditionsMet) { - betterBalanceNodes.add(nodeResult); - } else if (betterWeightThanCurrent) { - sameBalanceNodes.add(nodeResult); - } else { - worseBalanceNodes.add(nodeResult); - } - } - - int weightRanking = 0; - List nodeDecisions = new ArrayList<>(modelNodes.length - 1); - for (Tuple result : betterBalanceNodes) { - nodeDecisions.add( - new NodeAllocationResult( - result.v1().routingNode.node(), - AllocationDecision.fromDecisionType(result.v2().type()), - result.v2(), - ++weightRanking - ) - ); - } - int currentNodeWeightRanking = ++weightRanking; - for (Tuple result : sameBalanceNodes) { - AllocationDecision nodeDecision = result.v2().type() == Type.NO ? AllocationDecision.NO : AllocationDecision.WORSE_BALANCE; - nodeDecisions.add( - new NodeAllocationResult(result.v1().routingNode.node(), nodeDecision, result.v2(), currentNodeWeightRanking) - ); - } - for (Tuple result : worseBalanceNodes) { - AllocationDecision nodeDecision = result.v2().type() == Type.NO ? AllocationDecision.NO : AllocationDecision.WORSE_BALANCE; - nodeDecisions.add(new NodeAllocationResult(result.v1().routingNode.node(), nodeDecision, result.v2(), ++weightRanking)); - } - - if (canRebalance.type() != Type.YES || allocation.hasPendingAsyncFetch()) { - AllocationDecision allocationDecision = allocation.hasPendingAsyncFetch() - ? AllocationDecision.AWAITING_INFO - : AllocationDecision.fromDecisionType(canRebalance.type()); - return MoveDecision.cannotRebalance(canRebalance, allocationDecision, currentNodeWeightRanking, nodeDecisions); - } else { - return MoveDecision.rebalance( - canRebalance, - AllocationDecision.fromDecisionType(rebalanceDecisionType), - assignedNode != null ? assignedNode.routingNode.node() : null, - currentNodeWeightRanking, - nodeDecisions - ); - } - } - - /** - * Balances the nodes on the cluster model according to the weight - * function. The configured threshold is the minimum delta between the - * weight of the maximum node and the minimum node according to the - * {@link WeightFunction}. This weight is calculated per index to - * distribute shards evenly per index. The balancer tries to relocate - * shards only if the delta exceeds the threshold. In the default case - * the threshold is set to {@code 1.0} to enforce gaining relocation - * only, or in other words relocations that move the weight delta closer - * to {@code 0.0} - */ - private void balanceByWeights() { - final AllocationDeciders deciders = allocation.deciders(); - final ModelNode[] modelNodes = sorter.modelNodes; - final float[] weights = sorter.weights; - for (String index : buildWeightOrderedIndices()) { - IndexMetadata indexMetadata = metadata.index(index); - - // find nodes that have a shard of this index or where shards of this index are allowed to be allocated to, - // move these nodes to the front of modelNodes so that we can only balance based on these nodes - int relevantNodes = 0; - for (int i = 0; i < modelNodes.length; i++) { - ModelNode modelNode = modelNodes[i]; - if (modelNode.getIndex(index) != null - || deciders.canAllocate(indexMetadata, modelNode.getRoutingNode(), allocation).type() != Type.NO) { - // swap nodes at position i and relevantNodes - modelNodes[i] = modelNodes[relevantNodes]; - modelNodes[relevantNodes] = modelNode; - relevantNodes++; - } - } - - if (relevantNodes < 2) { - continue; - } - - sorter.reset(index, 0, relevantNodes); - int lowIdx = 0; - int highIdx = relevantNodes - 1; - while (true) { - final ModelNode minNode = modelNodes[lowIdx]; - final ModelNode maxNode = modelNodes[highIdx]; - advance_range: if (maxNode.numShards(index) > 0) { - final float delta = absDelta(weights[lowIdx], weights[highIdx]); - if (lessThan(delta, threshold)) { - if (lowIdx > 0 - && highIdx - 1 > 0 // is there a chance for a higher delta? - && (absDelta(weights[0], weights[highIdx - 1]) > threshold) // check if we need to break at all - ) { - /* This is a special case if allocations from the "heaviest" to the "lighter" nodes is not possible - * due to some allocation decider restrictions like zone awareness. if one zone has for instance - * less nodes than another zone. so one zone is horribly overloaded from a balanced perspective but we - * can't move to the "lighter" shards since otherwise the zone would go over capacity. - * - * This break jumps straight to the condition below were we start moving from the high index towards - * the low index to shrink the window we are considering for balance from the other direction. - * (check shrinking the window from MAX to MIN) - * See #3580 - */ - break advance_range; - } - if (logger.isTraceEnabled()) { - logger.trace( - "Stop balancing index [{}] min_node [{}] weight: [{}]" + " max_node [{}] weight: [{}] delta: [{}]", - index, - maxNode.getNodeId(), - weights[highIdx], - minNode.getNodeId(), - weights[lowIdx], - delta - ); - } - break; - } - if (logger.isTraceEnabled()) { - logger.trace( - "Balancing from node [{}] weight: [{}] to node [{}] weight: [{}] delta: [{}]", - maxNode.getNodeId(), - weights[highIdx], - minNode.getNodeId(), - weights[lowIdx], - delta - ); - } - if (delta <= 1.0f) { - /* - * prevent relocations that only swap the weights of the two nodes. a relocation must bring us closer to the - * balance if we only achieve the same delta the relocation is useless - * - * NB this comment above was preserved from an earlier version but doesn't obviously describe the code today. We - * already know that lessThan(delta, threshold) == false and threshold defaults to 1.0, so by default we never - * hit this case anyway. - */ - logger.trace( - "Couldn't find shard to relocate from node [{}] to node [{}]", - maxNode.getNodeId(), - minNode.getNodeId() - ); - } else if (tryRelocateShard(minNode, maxNode, index)) { - /* - * TODO we could be a bit smarter here, we don't need to fully sort necessarily - * we could just find the place to insert linearly but the win might be minor - * compared to the added complexity - */ - weights[lowIdx] = sorter.weight(modelNodes[lowIdx]); - weights[highIdx] = sorter.weight(modelNodes[highIdx]); - sorter.sort(0, relevantNodes); - lowIdx = 0; - highIdx = relevantNodes - 1; - continue; - } - } - if (lowIdx < highIdx - 1) { - /* Shrinking the window from MIN to MAX - * we can't move from any shard from the min node lets move on to the next node - * and see if the threshold still holds. We either don't have any shard of this - * index on this node of allocation deciders prevent any relocation.*/ - lowIdx++; - } else if (lowIdx > 0) { - /* Shrinking the window from MAX to MIN - * now we go max to min since obviously we can't move anything to the max node - * lets pick the next highest */ - lowIdx = 0; - highIdx--; - } else { - /* we are done here, we either can't relocate anymore or we are balanced */ - break; - } - } - } - } - - /** - * This builds a initial index ordering where the indices are returned - * in most unbalanced first. We need this in order to prevent over - * allocations on added nodes from one index when the weight parameters - * for global balance overrule the index balance at an intermediate - * state. For example this can happen if we have 3 nodes and 3 indices - * with 3 primary and 1 replica shards. At the first stage all three nodes hold - * 2 shard for each index. Now we add another node and the first index - * is balanced moving three shards from two of the nodes over to the new node since it - * has no shards yet and global balance for the node is way below - * average. To re-balance we need to move shards back eventually likely - * to the nodes we relocated them from. - */ - private String[] buildWeightOrderedIndices() { - final String[] indices = allocation.routingTable().indicesRouting().keys().toArray(String.class); - final float[] deltas = new float[indices.length]; - for (int i = 0; i < deltas.length; i++) { - sorter.reset(indices[i]); - deltas[i] = sorter.delta(); - } - new IntroSorter() { - - float pivotWeight; - - @Override - protected void swap(int i, int j) { - final String tmpIdx = indices[i]; - indices[i] = indices[j]; - indices[j] = tmpIdx; - final float tmpDelta = deltas[i]; - deltas[i] = deltas[j]; - deltas[j] = tmpDelta; - } - - @Override - protected int compare(int i, int j) { - return Float.compare(deltas[j], deltas[i]); - } - - @Override - protected void setPivot(int i) { - pivotWeight = deltas[i]; - } - - @Override - protected int comparePivot(int j) { - return Float.compare(deltas[j], pivotWeight); - } - }.sort(0, deltas.length); - - return indices; - } - - /** - * Checks if target node is ineligible and if so, adds to the list - * of ineligible target nodes - */ - private void checkAndAddInEligibleTargetNode(RoutingNode targetNode) { - Decision nodeLevelAllocationDecision = allocation.deciders().canAllocateAnyShardToNode(targetNode, allocation); - if (nodeLevelAllocationDecision.type() != Decision.Type.YES) { - inEligibleTargetNode.add(targetNode); - } - } - - /** - * Move started shards that can not be allocated to a node anymore - * - * For each shard to be moved this function executes a move operation - * to the minimal eligible node with respect to the - * weight function. If a shard is moved the shard will be set to - * {@link ShardRoutingState#RELOCATING} and a shadow instance of this - * shard is created with an incremented version in the state - * {@link ShardRoutingState#INITIALIZING}. - */ - public void moveShards() { - // Iterate over the started shards interleaving between nodes, and check if they can remain. In the presence of throttling - // shard movements, the goal of this iteration order is to achieve a fairer movement of shards from the nodes that are - // offloading the shards. - - // Trying to eliminate target nodes so that we donot unnecessarily iterate over source nodes - // when no target is eligible - for (ModelNode currentNode : sorter.modelNodes) { - checkAndAddInEligibleTargetNode(currentNode.getRoutingNode()); - } - boolean primariesThrottled = false; - for (Iterator it = allocation.routingNodes().nodeInterleavedShardIterator(movePrimaryFirst); it.hasNext();) { - // Verify if the cluster concurrent recoveries have been reached. - if (allocation.deciders().canMoveAnyShard(allocation).type() != Decision.Type.YES) { - logger.info( - "Cannot move any shard in the cluster due to cluster concurrent recoveries getting breached" - + ". Skipping shard iteration" - ); - return; - } - // Early terminate node interleaved shard iteration when no eligible target nodes are available - if (sorter.modelNodes.length == inEligibleTargetNode.size()) { - logger.info( - "Cannot move any shard in the cluster as there is no node on which shards can be allocated" - + ". Skipping shard iteration" - ); - return; - } - - ShardRouting shardRouting = it.next(); - - // Ensure that replicas don't relocate if primaries are being throttled and primary first is enabled - if (movePrimaryFirst && primariesThrottled && !shardRouting.primary()) { - logger.info( - "Cannot move any replica shard in the cluster as movePrimaryFirst is enabled and primary shards" - + "are being throttled. Skipping shard iteration" - ); - return; - } - - // Verify if the shard is allowed to move if outgoing recovery on the node hosting the primary shard - // is not being throttled. - Decision canMoveAwayDecision = allocation.deciders().canMoveAway(shardRouting, allocation); - if (canMoveAwayDecision.type() != Decision.Type.YES) { - if (logger.isDebugEnabled()) logger.debug("Cannot move away shard [{}] Skipping this shard", shardRouting); - if (shardRouting.primary() && canMoveAwayDecision.type() == Type.THROTTLE) { - primariesThrottled = true; - } - continue; - } - - final MoveDecision moveDecision = decideMove(shardRouting); - if (moveDecision.isDecisionTaken() && moveDecision.forceMove()) { - final ModelNode sourceNode = nodes.get(shardRouting.currentNodeId()); - final ModelNode targetNode = nodes.get(moveDecision.getTargetNode().getId()); - sourceNode.removeShard(shardRouting); - Tuple relocatingShards = routingNodes.relocateShard( - shardRouting, - targetNode.getNodeId(), - allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), - allocation.changes() - ); - targetNode.addShard(relocatingShards.v2()); - if (logger.isTraceEnabled()) { - logger.trace("Moved shard [{}] to node [{}]", shardRouting, targetNode.getRoutingNode()); - } - - // Verifying if this node can be considered ineligible for further iterations - if (targetNode != null) { - checkAndAddInEligibleTargetNode(targetNode.getRoutingNode()); - } - } else if (moveDecision.isDecisionTaken() && moveDecision.canRemain() == false) { - logger.trace("[{}][{}] can't move", shardRouting.index(), shardRouting.id()); - } - } - } - - /** - * Makes a decision on whether to move a started shard to another node. The following rules apply - * to the {@link MoveDecision} return object: - * 1. If the shard is not started, no decision will be taken and {@link MoveDecision#isDecisionTaken()} will return false. - * 2. If the shard is allowed to remain on its current node, no attempt will be made to move the shard and - * {@link MoveDecision#getCanRemainDecision} will have a decision type of YES. All other fields in the object will be null. - * 3. If the shard is not allowed to remain on its current node, then {@link MoveDecision#getAllocationDecision()} will be - * populated with the decision of moving to another node. If {@link MoveDecision#forceMove()} ()} returns {@code true}, then - * {@link MoveDecision#getTargetNode} will return a non-null value, otherwise the assignedNodeId will be null. - * 4. If the method is invoked in explain mode (e.g. from the cluster allocation explain APIs), then - * {@link MoveDecision#getNodeDecisions} will have a non-null value. - */ - public MoveDecision decideMove(final ShardRouting shardRouting) { - if (shardRouting.started() == false) { - // we can only move started shards - return MoveDecision.NOT_TAKEN; - } - - final boolean explain = allocation.debugDecision(); - final ModelNode sourceNode = nodes.get(shardRouting.currentNodeId()); - assert sourceNode != null && sourceNode.containsShard(shardRouting); - RoutingNode routingNode = sourceNode.getRoutingNode(); - Decision canRemain = allocation.deciders().canRemain(shardRouting, routingNode, allocation); - if (canRemain.type() != Decision.Type.NO) { - return MoveDecision.stay(canRemain); - } - - sorter.reset(shardRouting.getIndexName()); - /* - * the sorter holds the minimum weight node first for the shards index. - * We now walk through the nodes until we find a node to allocate the shard. - * This is not guaranteed to be balanced after this operation we still try best effort to - * allocate on the minimal eligible node. - */ - Type bestDecision = Type.NO; - RoutingNode targetNode = null; - final List nodeExplanationMap = explain ? new ArrayList<>() : null; - int weightRanking = 0; - int targetNodeProcessed = 0; - for (ModelNode currentNode : sorter.modelNodes) { - if (currentNode != sourceNode) { - RoutingNode target = currentNode.getRoutingNode(); - if (!explain && inEligibleTargetNode.contains(target)) continue; - // don't use canRebalance as we want hard filtering rules to apply. See #17698 - if (!explain) { - // If we cannot allocate any shard to node marking it in eligible - Decision nodeLevelAllocationDecision = allocation.deciders().canAllocateAnyShardToNode(target, allocation); - if (nodeLevelAllocationDecision.type() != Decision.Type.YES) { - inEligibleTargetNode.add(currentNode.getRoutingNode()); - continue; - } - } - targetNodeProcessed++; - // don't use canRebalance as we want hard filtering rules to apply. See #17698 - Decision allocationDecision = allocation.deciders().canAllocate(shardRouting, target, allocation); - if (explain) { - nodeExplanationMap.add( - new NodeAllocationResult(currentNode.getRoutingNode().node(), allocationDecision, ++weightRanking) - ); - } - // TODO maybe we can respect throttling here too? - if (allocationDecision.type().higherThan(bestDecision)) { - bestDecision = allocationDecision.type(); - if (bestDecision == Type.YES) { - targetNode = target; - if (explain == false) { - // we are not in explain mode and already have a YES decision on the best weighted node, - // no need to continue iterating - break; - } - } - } - } - } - - return MoveDecision.cannotRemain( - canRemain, - AllocationDecision.fromDecisionType(bestDecision), - targetNode != null ? targetNode.node() : null, - nodeExplanationMap - ); - } - - /** - * Builds the internal model from all shards in the given - * {@link Iterable}. All shards in the {@link Iterable} must be assigned - * to a node. This method will skip shards in the state - * {@link ShardRoutingState#RELOCATING} since each relocating shard has - * a shadow shard in the state {@link ShardRoutingState#INITIALIZING} - * on the target node which we respect during the allocation / balancing - * process. In short, this method recreates the status-quo in the cluster. - */ - private Map buildModelFromAssigned() { - Map nodes = new HashMap<>(); - for (RoutingNode rn : routingNodes) { - ModelNode node = new ModelNode(rn); - nodes.put(rn.nodeId(), node); - for (ShardRouting shard : rn) { - assert rn.nodeId().equals(shard.currentNodeId()); - /* we skip relocating shards here since we expect an initializing shard with the same id coming in */ - if (shard.state() != RELOCATING) { - node.addShard(shard); - if (logger.isTraceEnabled()) { - logger.trace("Assigned shard [{}] to node [{}]", shard, node.getNodeId()); - } - } - } - } - return nodes; - } - - /** - * Allocates all given shards on the minimal eligible node for the shards index - * with respect to the weight function. All given shards must be unassigned. - */ - private void allocateUnassigned() { - RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned(); - assert !nodes.isEmpty(); - if (logger.isTraceEnabled()) { - logger.trace("Start allocating unassigned shards"); - } - if (unassigned.isEmpty()) { - return; - } - - /* - * TODO: We could be smarter here and group the shards by index and then - * use the sorter to save some iterations. - */ - final PriorityComparator secondaryComparator = PriorityComparator.getAllocationComparator(allocation); - final Comparator comparator = (o1, o2) -> { - if (o1.primary() ^ o2.primary()) { - return o1.primary() ? -1 : 1; - } - final int indexCmp; - if ((indexCmp = o1.getIndexName().compareTo(o2.getIndexName())) == 0) { - return o1.getId() - o2.getId(); - } - // this comparator is more expensive than all the others up there - // that's why it's added last even though it could be easier to read - // if we'd apply it earlier. this comparator will only differentiate across - // indices all shards of the same index is treated equally. - final int secondary = secondaryComparator.compare(o1, o2); - return secondary == 0 ? indexCmp : secondary; - }; - /* - * we use 2 arrays and move replicas to the second array once we allocated an identical - * replica in the current iteration to make sure all indices get allocated in the same manner. - * The arrays are sorted by primaries first and then by index and shard ID so a 2 indices with - * 2 replica and 1 shard would look like: - * [(0,P,IDX1), (0,P,IDX2), (0,R,IDX1), (0,R,IDX1), (0,R,IDX2), (0,R,IDX2)] - * if we allocate for instance (0, R, IDX1) we move the second replica to the secondary array and proceed with - * the next replica. If we could not find a node to allocate (0,R,IDX1) we move all it's replicas to ignoreUnassigned. - */ - ShardRouting[] primary = unassigned.drain(); - ShardRouting[] secondary = new ShardRouting[primary.length]; - int secondaryLength = 0; - int primaryLength = primary.length; - ArrayUtil.timSort(primary, comparator); - do { - for (int i = 0; i < primaryLength; i++) { - ShardRouting shard = primary[i]; - final AllocateUnassignedDecision allocationDecision = decideAllocateUnassigned(shard); - final String assignedNodeId = allocationDecision.getTargetNode() != null - ? allocationDecision.getTargetNode().getId() - : null; - final ModelNode minNode = assignedNodeId != null ? nodes.get(assignedNodeId) : null; - - if (allocationDecision.getAllocationDecision() == AllocationDecision.YES) { - if (logger.isTraceEnabled()) { - logger.trace("Assigned shard [{}] to [{}]", shard, minNode.getNodeId()); - } - - final long shardSize = DiskThresholdDecider.getExpectedShardSize( - shard, - ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, - allocation.clusterInfo(), - allocation.snapshotShardSizeInfo(), - allocation.metadata(), - allocation.routingTable() - ); - shard = routingNodes.initializeShard(shard, minNode.getNodeId(), null, shardSize, allocation.changes()); - minNode.addShard(shard); - if (!shard.primary()) { - // copy over the same replica shards to the secondary array so they will get allocated - // in a subsequent iteration, allowing replicas of other shards to be allocated first - while (i < primaryLength - 1 && comparator.compare(primary[i], primary[i + 1]) == 0) { - secondary[secondaryLength++] = primary[++i]; - } - } - } else { - // did *not* receive a YES decision - if (logger.isTraceEnabled()) { - logger.trace( - "No eligible node found to assign shard [{}] allocation_status [{}]", - shard, - allocationDecision.getAllocationStatus() - ); - } - - if (minNode != null) { - // throttle decision scenario - assert allocationDecision.getAllocationStatus() == AllocationStatus.DECIDERS_THROTTLED; - final long shardSize = DiskThresholdDecider.getExpectedShardSize( - shard, - ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, - allocation.clusterInfo(), - allocation.snapshotShardSizeInfo(), - allocation.metadata(), - allocation.routingTable() - ); - minNode.addShard(shard.initialize(minNode.getNodeId(), null, shardSize)); - } else { - if (logger.isTraceEnabled()) { - logger.trace("No Node found to assign shard [{}]", shard); - } - } - - unassigned.ignoreShard(shard, allocationDecision.getAllocationStatus(), allocation.changes()); - if (!shard.primary()) { // we could not allocate it and we are a replica - check if we can ignore the other replicas - while (i < primaryLength - 1 && comparator.compare(primary[i], primary[i + 1]) == 0) { - unassigned.ignoreShard(primary[++i], allocationDecision.getAllocationStatus(), allocation.changes()); - } - } - } - } - primaryLength = secondaryLength; - ShardRouting[] tmp = primary; - primary = secondary; - secondary = tmp; - secondaryLength = 0; - } while (primaryLength > 0); - // clear everything we have either added it or moved to ignoreUnassigned - } - - /** - * Make a decision for allocating an unassigned shard. This method returns a two values in a tuple: the - * first value is the {@link Decision} taken to allocate the unassigned shard, the second value is the - * {@link ModelNode} representing the node that the shard should be assigned to. If the decision returned - * is of type {@link Type#NO}, then the assigned node will be null. - */ - private AllocateUnassignedDecision decideAllocateUnassigned(final ShardRouting shard) { - if (shard.assignedToNode()) { - // we only make decisions for unassigned shards here - return AllocateUnassignedDecision.NOT_TAKEN; - } - - final boolean explain = allocation.debugDecision(); - Decision shardLevelDecision = allocation.deciders().canAllocate(shard, allocation); - if (shardLevelDecision.type() == Type.NO && explain == false) { - // NO decision for allocating the shard, irrespective of any particular node, so exit early - return AllocateUnassignedDecision.no(AllocationStatus.DECIDERS_NO, null); - } - - /* find an node with minimal weight we can allocate on*/ - float minWeight = Float.POSITIVE_INFINITY; - ModelNode minNode = null; - Decision decision = null; - /* Don't iterate over an identity hashset here the - * iteration order is different for each run and makes testing hard */ - Map nodeExplanationMap = explain ? new HashMap<>() : null; - List> nodeWeights = explain ? new ArrayList<>() : null; - for (ModelNode node : nodes.values()) { - if (node.containsShard(shard) && explain == false) { - // decision is NO without needing to check anything further, so short circuit - continue; - } - - // weight of this index currently on the node - float currentWeight = weight.weightWithAllocationConstraints(this, node, shard.getIndexName()); - // moving the shard would not improve the balance, and we are not in explain mode, so short circuit - if (currentWeight > minWeight && explain == false) { - continue; - } - - Decision currentDecision = allocation.deciders().canAllocate(shard, node.getRoutingNode(), allocation); - if (explain) { - nodeExplanationMap.put(node.getNodeId(), new NodeAllocationResult(node.getRoutingNode().node(), currentDecision, 0)); - nodeWeights.add(Tuple.tuple(node.getNodeId(), currentWeight)); - } - if (currentDecision.type() == Type.YES || currentDecision.type() == Type.THROTTLE) { - final boolean updateMinNode; - if (currentWeight == minWeight) { - /* we have an equal weight tie breaking: - * 1. if one decision is YES prefer it - * 2. prefer the node that holds the primary for this index with the next id in the ring ie. - * for the 3 shards 2 replica case we try to build up: - * 1 2 0 - * 2 0 1 - * 0 1 2 - * such that if we need to tie-break we try to prefer the node holding a shard with the minimal id greater - * than the id of the shard we need to assign. This works find when new indices are created since - * primaries are added first and we only add one shard set a time in this algorithm. - */ - if (currentDecision.type() == decision.type()) { - final int repId = shard.id(); - final int nodeHigh = node.highestPrimary(shard.index().getName()); - final int minNodeHigh = minNode.highestPrimary(shard.getIndexName()); - updateMinNode = ((((nodeHigh > repId && minNodeHigh > repId) || (nodeHigh < repId && minNodeHigh < repId)) - && (nodeHigh < minNodeHigh)) || (nodeHigh > repId && minNodeHigh < repId)); - } else { - updateMinNode = currentDecision.type() == Type.YES; - } - } else { - updateMinNode = currentWeight < minWeight; - } - if (updateMinNode) { - minNode = node; - minWeight = currentWeight; - decision = currentDecision; - } - } - } - if (decision == null) { - // decision was not set and a node was not assigned, so treat it as a NO decision - decision = Decision.NO; - } - List nodeDecisions = null; - if (explain) { - nodeDecisions = new ArrayList<>(); - // fill in the correct weight ranking, once we've been through all nodes - nodeWeights.sort((nodeWeight1, nodeWeight2) -> Float.compare(nodeWeight1.v2(), nodeWeight2.v2())); - int weightRanking = 0; - for (Tuple nodeWeight : nodeWeights) { - NodeAllocationResult current = nodeExplanationMap.get(nodeWeight.v1()); - nodeDecisions.add(new NodeAllocationResult(current.getNode(), current.getCanAllocateDecision(), ++weightRanking)); - } - } - return AllocateUnassignedDecision.fromDecision(decision, minNode != null ? minNode.routingNode.node() : null, nodeDecisions); - } - - private static final Comparator BY_DESCENDING_SHARD_ID = Comparator.comparing(ShardRouting::shardId).reversed(); - - /** - * Tries to find a relocation from the max node to the minimal node for an arbitrary shard of the given index on the - * balance model. Iff this method returns a true the relocation has already been executed on the - * simulation model as well as on the cluster. - */ - private boolean tryRelocateShard(ModelNode minNode, ModelNode maxNode, String idx) { - final ModelIndex index = maxNode.getIndex(idx); - if (index != null) { - logger.trace("Try relocating shard of [{}] from [{}] to [{}]", idx, maxNode.getNodeId(), minNode.getNodeId()); - final Iterable shardRoutings = StreamSupport.stream(index.spliterator(), false) - .filter(ShardRouting::started) // cannot rebalance unassigned, initializing or relocating shards anyway - .filter(maxNode::containsShard) - .sorted(BY_DESCENDING_SHARD_ID) // check in descending order of shard id so that the decision is deterministic - ::iterator; - - final AllocationDeciders deciders = allocation.deciders(); - for (ShardRouting shard : shardRoutings) { - final Decision rebalanceDecision = deciders.canRebalance(shard, allocation); - if (rebalanceDecision.type() == Type.NO) { - continue; - } - final Decision allocationDecision = deciders.canAllocate(shard, minNode.getRoutingNode(), allocation); - if (allocationDecision.type() == Type.NO) { - continue; - } - - final Decision decision = new Decision.Multi().add(allocationDecision).add(rebalanceDecision); - - maxNode.removeShard(shard); - long shardSize = allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); - - if (decision.type() == Type.YES) { - /* only allocate on the cluster if we are not throttled */ - logger.debug("Relocate [{}] from [{}] to [{}]", shard, maxNode.getNodeId(), minNode.getNodeId()); - minNode.addShard(routingNodes.relocateShard(shard, minNode.getNodeId(), shardSize, allocation.changes()).v1()); - return true; - } else { - /* allocate on the model even if throttled */ - logger.debug("Simulate relocation of [{}] from [{}] to [{}]", shard, maxNode.getNodeId(), minNode.getNodeId()); - assert decision.type() == Type.THROTTLE; - minNode.addShard(shard.relocate(minNode.getNodeId(), shardSize)); - return false; - } - } - } - logger.trace("No shards of [{}] can relocate from [{}] to [{}]", idx, maxNode.getNodeId(), minNode.getNodeId()); - return false; - } - - } - /** * A model node. * @@ -1277,6 +353,25 @@ public boolean containsShard(ShardRouting shard) { } + /** + * A {@link Balancer} used by the {@link BalancedShardsAllocator} to perform allocation operations + * @deprecated As of 2.4.0, replaced by {@link LocalShardsBalancer} + * + * @opensearch.internal + */ + @Deprecated + public static class Balancer extends LocalShardsBalancer { + public Balancer( + Logger logger, + RoutingAllocation allocation, + boolean movePrimaryFirst, + BalancedShardsAllocator.WeightFunction weight, + float threshold + ) { + super(logger, allocation, movePrimaryFirst, weight, threshold); + } + } + /** * A model index. * @@ -1346,10 +441,10 @@ static final class NodeSorter extends IntroSorter { final float[] weights; private final WeightFunction function; private String index; - private final Balancer balancer; + private final ShardsBalancer balancer; private float pivotWeight; - NodeSorter(ModelNode[] modelNodes, WeightFunction function, Balancer balancer) { + NodeSorter(ModelNode[] modelNodes, WeightFunction function, ShardsBalancer balancer) { this.function = function; this.balancer = balancer; this.modelNodes = modelNodes; diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java new file mode 100644 index 0000000000000..53d7c827392d5 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java @@ -0,0 +1,967 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing.allocation.allocator; + +import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.ArrayUtil; +import org.apache.lucene.util.IntroSorter; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.routing.RoutingNode; +import org.opensearch.cluster.routing.RoutingNodes; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.cluster.routing.UnassignedInfo; +import org.opensearch.cluster.routing.allocation.AllocateUnassignedDecision; +import org.opensearch.cluster.routing.allocation.AllocationDecision; +import org.opensearch.cluster.routing.allocation.MoveDecision; +import org.opensearch.cluster.routing.allocation.NodeAllocationResult; +import org.opensearch.cluster.routing.allocation.RoutingAllocation; +import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.opensearch.cluster.routing.allocation.decider.Decision; +import org.opensearch.cluster.routing.allocation.decider.DiskThresholdDecider; +import org.opensearch.common.collect.Tuple; +import org.opensearch.gateway.PriorityComparator; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.StreamSupport; + +import static org.opensearch.cluster.routing.ShardRoutingState.RELOCATING; + +/** + * A {@link LocalShardsBalancer} used by the {@link BalancedShardsAllocator} to perform allocation operations + * for local shards within the cluster. + * + * @opensearch.internal + */ +public class LocalShardsBalancer extends ShardsBalancer { + private final Logger logger; + private final Map nodes; + private final RoutingAllocation allocation; + private final RoutingNodes routingNodes; + private final boolean movePrimaryFirst; + private final BalancedShardsAllocator.WeightFunction weight; + + private final float threshold; + private final Metadata metadata; + private final float avgShardsPerNode; + private final BalancedShardsAllocator.NodeSorter sorter; + private final Set inEligibleTargetNode; + + public LocalShardsBalancer( + Logger logger, + RoutingAllocation allocation, + boolean movePrimaryFirst, + BalancedShardsAllocator.WeightFunction weight, + float threshold + ) { + this.logger = logger; + this.allocation = allocation; + this.movePrimaryFirst = movePrimaryFirst; + this.weight = weight; + this.threshold = threshold; + this.routingNodes = allocation.routingNodes(); + this.metadata = allocation.metadata(); + avgShardsPerNode = ((float) metadata.getTotalNumberOfShards()) / routingNodes.size(); + nodes = Collections.unmodifiableMap(buildModelFromAssigned()); + sorter = newNodeSorter(); + inEligibleTargetNode = new HashSet<>(); + } + + /** + * Returns an array view on the nodes in the balancer. Nodes should not be removed from this list. + */ + private BalancedShardsAllocator.ModelNode[] nodesArray() { + return nodes.values().toArray(new BalancedShardsAllocator.ModelNode[nodes.size()]); + } + + /** + * Returns the average of shards per node for the given index + */ + @Override + public float avgShardsPerNode(String index) { + return ((float) metadata.index(index).getTotalNumberOfShards()) / nodes.size(); + } + + /** + * Returns the global average of shards per node + */ + @Override + public float avgShardsPerNode() { + return avgShardsPerNode; + } + + /** + * Returns a new {@link BalancedShardsAllocator.NodeSorter} that sorts the nodes based on their + * current weight with respect to the index passed to the sorter. The + * returned sorter is not sorted. Use {@link BalancedShardsAllocator.NodeSorter#reset(String)} + * to sort based on an index. + */ + private BalancedShardsAllocator.NodeSorter newNodeSorter() { + return new BalancedShardsAllocator.NodeSorter(nodesArray(), weight, this); + } + + /** + * The absolute value difference between two weights. + */ + private static float absDelta(float lower, float higher) { + assert higher >= lower : higher + " lt " + lower + " but was expected to be gte"; + return Math.abs(higher - lower); + } + + /** + * Returns {@code true} iff the weight delta between two nodes is under a defined threshold. + * See {@link BalancedShardsAllocator#THRESHOLD_SETTING} for defining the threshold. + */ + private static boolean lessThan(float delta, float threshold) { + /* deltas close to the threshold are "rounded" to the threshold manually + to prevent floating point problems if the delta is very close to the + threshold ie. 1.000000002 which can trigger unnecessary balance actions*/ + return delta <= (threshold + 0.001f); + } + + /** + * Balances the nodes on the cluster model according to the weight function. + * The actual balancing is delegated to {@link #balanceByWeights()} + */ + @Override + void balance() { + if (logger.isTraceEnabled()) { + logger.trace("Start balancing cluster"); + } + if (allocation.hasPendingAsyncFetch()) { + /* + * see https://github.com/elastic/elasticsearch/issues/14387 + * if we allow rebalance operations while we are still fetching shard store data + * we might end up with unnecessary rebalance operations which can be super confusion/frustrating + * since once the fetches come back we might just move all the shards back again. + * Therefore we only do a rebalance if we have fetched all information. + */ + logger.debug("skipping rebalance due to in-flight shard/store fetches"); + return; + } + if (allocation.deciders().canRebalance(allocation).type() != Decision.Type.YES) { + logger.trace("skipping rebalance as it is disabled"); + return; + } + if (nodes.size() < 2) { /* skip if we only have one node */ + logger.trace("skipping rebalance as single node only"); + return; + } + balanceByWeights(); + } + + /** + * Makes a decision about moving a single shard to a different node to form a more + * optimally balanced cluster. This method is invoked from the cluster allocation + * explain API only. + */ + @Override + MoveDecision decideRebalance(final ShardRouting shard) { + if (shard.started() == false) { + // we can only rebalance started shards + return MoveDecision.NOT_TAKEN; + } + + Decision canRebalance = allocation.deciders().canRebalance(shard, allocation); + + sorter.reset(shard.getIndexName()); + BalancedShardsAllocator.ModelNode[] modelNodes = sorter.modelNodes; + final String currentNodeId = shard.currentNodeId(); + // find currently assigned node + BalancedShardsAllocator.ModelNode currentNode = null; + for (BalancedShardsAllocator.ModelNode node : modelNodes) { + if (node.getNodeId().equals(currentNodeId)) { + currentNode = node; + break; + } + } + assert currentNode != null : "currently assigned node could not be found"; + + // balance the shard, if a better node can be found + final String idxName = shard.getIndexName(); + final float currentWeight = weight.weight(this, currentNode, idxName); + final AllocationDeciders deciders = allocation.deciders(); + Decision.Type rebalanceDecisionType = Decision.Type.NO; + BalancedShardsAllocator.ModelNode assignedNode = null; + List> betterBalanceNodes = new ArrayList<>(); + List> sameBalanceNodes = new ArrayList<>(); + List> worseBalanceNodes = new ArrayList<>(); + for (BalancedShardsAllocator.ModelNode node : modelNodes) { + if (node == currentNode) { + continue; // skip over node we're currently allocated to + } + final Decision canAllocate = deciders.canAllocate(shard, node.getRoutingNode(), allocation); + // the current weight of the node in the cluster, as computed by the weight function; + // this is a comparison of the number of shards on this node to the number of shards + // that should be on each node on average (both taking the cluster as a whole into account + // as well as shards per index) + final float nodeWeight = weight.weight(this, node, idxName); + // if the node we are examining has a worse (higher) weight than the node the shard is + // assigned to, then there is no way moving the shard to the node with the worse weight + // can make the balance of the cluster better, so we check for that here + final boolean betterWeightThanCurrent = nodeWeight <= currentWeight; + boolean rebalanceConditionsMet = false; + if (betterWeightThanCurrent) { + // get the delta between the weights of the node we are checking and the node that holds the shard + float currentDelta = absDelta(nodeWeight, currentWeight); + // checks if the weight delta is above a certain threshold; if it is not above a certain threshold, + // then even though the node we are examining has a better weight and may make the cluster balance + // more even, it doesn't make sense to execute the heavyweight operation of relocating a shard unless + // the gains make it worth it, as defined by the threshold + boolean deltaAboveThreshold = lessThan(currentDelta, threshold) == false; + // calculate the delta of the weights of the two nodes if we were to add the shard to the + // node in question and move it away from the node that currently holds it. + // hence we add 2.0f to the weight delta + float proposedDelta = 2.0f + nodeWeight - currentWeight; + boolean betterWeightWithShardAdded = proposedDelta < currentDelta; + + rebalanceConditionsMet = deltaAboveThreshold && betterWeightWithShardAdded; + // if the simulated weight delta with the shard moved away is better than the weight delta + // with the shard remaining on the current node, and we are allowed to allocate to the + // node in question, then allow the rebalance + if (rebalanceConditionsMet && canAllocate.type().higherThan(rebalanceDecisionType)) { + // rebalance to the node, only will get overwritten if the decision here is to + // THROTTLE and we get a decision with YES on another node + rebalanceDecisionType = canAllocate.type(); + assignedNode = node; + } + } + Tuple nodeResult = Tuple.tuple(node, canAllocate); + if (rebalanceConditionsMet) { + betterBalanceNodes.add(nodeResult); + } else if (betterWeightThanCurrent) { + sameBalanceNodes.add(nodeResult); + } else { + worseBalanceNodes.add(nodeResult); + } + } + + int weightRanking = 0; + List nodeDecisions = new ArrayList<>(modelNodes.length - 1); + for (Tuple result : betterBalanceNodes) { + nodeDecisions.add( + new NodeAllocationResult( + result.v1().getRoutingNode().node(), + AllocationDecision.fromDecisionType(result.v2().type()), + result.v2(), + ++weightRanking + ) + ); + } + int currentNodeWeightRanking = ++weightRanking; + for (Tuple result : sameBalanceNodes) { + AllocationDecision nodeDecision = result.v2().type() == Decision.Type.NO + ? AllocationDecision.NO + : AllocationDecision.WORSE_BALANCE; + nodeDecisions.add( + new NodeAllocationResult(result.v1().getRoutingNode().node(), nodeDecision, result.v2(), currentNodeWeightRanking) + ); + } + for (Tuple result : worseBalanceNodes) { + AllocationDecision nodeDecision = result.v2().type() == Decision.Type.NO + ? AllocationDecision.NO + : AllocationDecision.WORSE_BALANCE; + nodeDecisions.add(new NodeAllocationResult(result.v1().getRoutingNode().node(), nodeDecision, result.v2(), ++weightRanking)); + } + + if (canRebalance.type() != Decision.Type.YES || allocation.hasPendingAsyncFetch()) { + AllocationDecision allocationDecision = allocation.hasPendingAsyncFetch() + ? AllocationDecision.AWAITING_INFO + : AllocationDecision.fromDecisionType(canRebalance.type()); + return MoveDecision.cannotRebalance(canRebalance, allocationDecision, currentNodeWeightRanking, nodeDecisions); + } else { + return MoveDecision.rebalance( + canRebalance, + AllocationDecision.fromDecisionType(rebalanceDecisionType), + assignedNode != null ? assignedNode.getRoutingNode().node() : null, + currentNodeWeightRanking, + nodeDecisions + ); + } + } + + /** + * Balances the nodes on the cluster model according to the weight + * function. The configured threshold is the minimum delta between the + * weight of the maximum node and the minimum node according to the + * {@link BalancedShardsAllocator.WeightFunction}. This weight is calculated per index to + * distribute shards evenly per index. The balancer tries to relocate + * shards only if the delta exceeds the threshold. In the default case + * the threshold is set to {@code 1.0} to enforce gaining relocation + * only, or in other words relocations that move the weight delta closer + * to {@code 0.0} + */ + private void balanceByWeights() { + final AllocationDeciders deciders = allocation.deciders(); + final BalancedShardsAllocator.ModelNode[] modelNodes = sorter.modelNodes; + final float[] weights = sorter.weights; + for (String index : buildWeightOrderedIndices()) { + IndexMetadata indexMetadata = metadata.index(index); + + // find nodes that have a shard of this index or where shards of this index are allowed to be allocated to, + // move these nodes to the front of modelNodes so that we can only balance based on these nodes + int relevantNodes = 0; + for (int i = 0; i < modelNodes.length; i++) { + BalancedShardsAllocator.ModelNode modelNode = modelNodes[i]; + if (modelNode.getIndex(index) != null + || deciders.canAllocate(indexMetadata, modelNode.getRoutingNode(), allocation).type() != Decision.Type.NO) { + // swap nodes at position i and relevantNodes + modelNodes[i] = modelNodes[relevantNodes]; + modelNodes[relevantNodes] = modelNode; + relevantNodes++; + } + } + + if (relevantNodes < 2) { + continue; + } + + sorter.reset(index, 0, relevantNodes); + int lowIdx = 0; + int highIdx = relevantNodes - 1; + while (true) { + final BalancedShardsAllocator.ModelNode minNode = modelNodes[lowIdx]; + final BalancedShardsAllocator.ModelNode maxNode = modelNodes[highIdx]; + advance_range: if (maxNode.numShards(index) > 0) { + final float delta = absDelta(weights[lowIdx], weights[highIdx]); + if (lessThan(delta, threshold)) { + if (lowIdx > 0 + && highIdx - 1 > 0 // is there a chance for a higher delta? + && (absDelta(weights[0], weights[highIdx - 1]) > threshold) // check if we need to break at all + ) { + /* This is a special case if allocations from the "heaviest" to the "lighter" nodes is not possible + * due to some allocation decider restrictions like zone awareness. if one zone has for instance + * less nodes than another zone. so one zone is horribly overloaded from a balanced perspective but we + * can't move to the "lighter" shards since otherwise the zone would go over capacity. + * + * This break jumps straight to the condition below were we start moving from the high index towards + * the low index to shrink the window we are considering for balance from the other direction. + * (check shrinking the window from MAX to MIN) + * See #3580 + */ + break advance_range; + } + if (logger.isTraceEnabled()) { + logger.trace( + "Stop balancing index [{}] min_node [{}] weight: [{}]" + " max_node [{}] weight: [{}] delta: [{}]", + index, + maxNode.getNodeId(), + weights[highIdx], + minNode.getNodeId(), + weights[lowIdx], + delta + ); + } + break; + } + if (logger.isTraceEnabled()) { + logger.trace( + "Balancing from node [{}] weight: [{}] to node [{}] weight: [{}] delta: [{}]", + maxNode.getNodeId(), + weights[highIdx], + minNode.getNodeId(), + weights[lowIdx], + delta + ); + } + if (delta <= 1.0f) { + /* + * prevent relocations that only swap the weights of the two nodes. a relocation must bring us closer to the + * balance if we only achieve the same delta the relocation is useless + * + * NB this comment above was preserved from an earlier version but doesn't obviously describe the code today. We + * already know that lessThan(delta, threshold) == false and threshold defaults to 1.0, so by default we never + * hit this case anyway. + */ + logger.trace( + "Couldn't find shard to relocate from node [{}] to node [{}]", + maxNode.getNodeId(), + minNode.getNodeId() + ); + } else if (tryRelocateShard(minNode, maxNode, index)) { + /* + * TODO we could be a bit smarter here, we don't need to fully sort necessarily + * we could just find the place to insert linearly but the win might be minor + * compared to the added complexity + */ + weights[lowIdx] = sorter.weight(modelNodes[lowIdx]); + weights[highIdx] = sorter.weight(modelNodes[highIdx]); + sorter.sort(0, relevantNodes); + lowIdx = 0; + highIdx = relevantNodes - 1; + continue; + } + } + if (lowIdx < highIdx - 1) { + /* Shrinking the window from MIN to MAX + * we can't move from any shard from the min node lets move on to the next node + * and see if the threshold still holds. We either don't have any shard of this + * index on this node of allocation deciders prevent any relocation.*/ + lowIdx++; + } else if (lowIdx > 0) { + /* Shrinking the window from MAX to MIN + * now we go max to min since obviously we can't move anything to the max node + * lets pick the next highest */ + lowIdx = 0; + highIdx--; + } else { + /* we are done here, we either can't relocate anymore or we are balanced */ + break; + } + } + } + } + + /** + * This builds a initial index ordering where the indices are returned + * in most unbalanced first. We need this in order to prevent over + * allocations on added nodes from one index when the weight parameters + * for global balance overrule the index balance at an intermediate + * state. For example this can happen if we have 3 nodes and 3 indices + * with 3 primary and 1 replica shards. At the first stage all three nodes hold + * 2 shard for each index. Now we add another node and the first index + * is balanced moving three shards from two of the nodes over to the new node since it + * has no shards yet and global balance for the node is way below + * average. To re-balance we need to move shards back eventually likely + * to the nodes we relocated them from. + */ + private String[] buildWeightOrderedIndices() { + final String[] indices = allocation.routingTable().indicesRouting().keys().toArray(String.class); + final float[] deltas = new float[indices.length]; + for (int i = 0; i < deltas.length; i++) { + sorter.reset(indices[i]); + deltas[i] = sorter.delta(); + } + new IntroSorter() { + + float pivotWeight; + + @Override + protected void swap(int i, int j) { + final String tmpIdx = indices[i]; + indices[i] = indices[j]; + indices[j] = tmpIdx; + final float tmpDelta = deltas[i]; + deltas[i] = deltas[j]; + deltas[j] = tmpDelta; + } + + @Override + protected int compare(int i, int j) { + return Float.compare(deltas[j], deltas[i]); + } + + @Override + protected void setPivot(int i) { + pivotWeight = deltas[i]; + } + + @Override + protected int comparePivot(int j) { + return Float.compare(deltas[j], pivotWeight); + } + }.sort(0, deltas.length); + + return indices; + } + + /** + * Checks if target node is ineligible and if so, adds to the list + * of ineligible target nodes + */ + private void checkAndAddInEligibleTargetNode(RoutingNode targetNode) { + Decision nodeLevelAllocationDecision = allocation.deciders().canAllocateAnyShardToNode(targetNode, allocation); + if (nodeLevelAllocationDecision.type() != Decision.Type.YES) { + inEligibleTargetNode.add(targetNode); + } + } + + /** + * Move started shards that can not be allocated to a node anymore + * + * For each shard to be moved this function executes a move operation + * to the minimal eligible node with respect to the + * weight function. If a shard is moved the shard will be set to + * {@link ShardRoutingState#RELOCATING} and a shadow instance of this + * shard is created with an incremented version in the state + * {@link ShardRoutingState#INITIALIZING}. + */ + @Override + void moveShards() { + // Iterate over the started shards interleaving between nodes, and check if they can remain. In the presence of throttling + // shard movements, the goal of this iteration order is to achieve a fairer movement of shards from the nodes that are + // offloading the shards. + + // Trying to eliminate target nodes so that we donot unnecessarily iterate over source nodes + // when no target is eligible + for (BalancedShardsAllocator.ModelNode currentNode : sorter.modelNodes) { + checkAndAddInEligibleTargetNode(currentNode.getRoutingNode()); + } + boolean primariesThrottled = false; + for (Iterator it = allocation.routingNodes().nodeInterleavedShardIterator(movePrimaryFirst); it.hasNext();) { + // Verify if the cluster concurrent recoveries have been reached. + if (allocation.deciders().canMoveAnyShard(allocation).type() != Decision.Type.YES) { + logger.info( + "Cannot move any shard in the cluster due to cluster concurrent recoveries getting breached" + + ". Skipping shard iteration" + ); + return; + } + // Early terminate node interleaved shard iteration when no eligible target nodes are available + if (sorter.modelNodes.length == inEligibleTargetNode.size()) { + logger.info( + "Cannot move any shard in the cluster as there is no node on which shards can be allocated" + + ". Skipping shard iteration" + ); + return; + } + + ShardRouting shardRouting = it.next(); + + // Ensure that replicas don't relocate if primaries are being throttled and primary first is enabled + if (movePrimaryFirst && primariesThrottled && !shardRouting.primary()) { + logger.info( + "Cannot move any replica shard in the cluster as movePrimaryFirst is enabled and primary shards" + + "are being throttled. Skipping shard iteration" + ); + return; + } + + // Verify if the shard is allowed to move if outgoing recovery on the node hosting the primary shard + // is not being throttled. + Decision canMoveAwayDecision = allocation.deciders().canMoveAway(shardRouting, allocation); + if (canMoveAwayDecision.type() != Decision.Type.YES) { + if (logger.isDebugEnabled()) logger.debug("Cannot move away shard [{}] Skipping this shard", shardRouting); + if (shardRouting.primary() && canMoveAwayDecision.type() == Decision.Type.THROTTLE) { + primariesThrottled = true; + } + continue; + } + + final MoveDecision moveDecision = decideMove(shardRouting); + if (moveDecision.isDecisionTaken() && moveDecision.forceMove()) { + final BalancedShardsAllocator.ModelNode sourceNode = nodes.get(shardRouting.currentNodeId()); + final BalancedShardsAllocator.ModelNode targetNode = nodes.get(moveDecision.getTargetNode().getId()); + sourceNode.removeShard(shardRouting); + Tuple relocatingShards = routingNodes.relocateShard( + shardRouting, + targetNode.getNodeId(), + allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), + allocation.changes() + ); + targetNode.addShard(relocatingShards.v2()); + if (logger.isTraceEnabled()) { + logger.trace("Moved shard [{}] to node [{}]", shardRouting, targetNode.getRoutingNode()); + } + + // Verifying if this node can be considered ineligible for further iterations + if (targetNode != null) { + checkAndAddInEligibleTargetNode(targetNode.getRoutingNode()); + } + } else if (moveDecision.isDecisionTaken() && moveDecision.canRemain() == false) { + logger.trace("[{}][{}] can't move", shardRouting.index(), shardRouting.id()); + } + } + } + + /** + * Makes a decision on whether to move a started shard to another node. The following rules apply + * to the {@link MoveDecision} return object: + * 1. If the shard is not started, no decision will be taken and {@link MoveDecision#isDecisionTaken()} will return false. + * 2. If the shard is allowed to remain on its current node, no attempt will be made to move the shard and + * {@link MoveDecision#getCanRemainDecision} will have a decision type of YES. All other fields in the object will be null. + * 3. If the shard is not allowed to remain on its current node, then {@link MoveDecision#getAllocationDecision()} will be + * populated with the decision of moving to another node. If {@link MoveDecision#forceMove()} ()} returns {@code true}, then + * {@link MoveDecision#getTargetNode} will return a non-null value, otherwise the assignedNodeId will be null. + * 4. If the method is invoked in explain mode (e.g. from the cluster allocation explain APIs), then + * {@link MoveDecision#getNodeDecisions} will have a non-null value. + */ + @Override + MoveDecision decideMove(final ShardRouting shardRouting) { + if (shardRouting.started() == false) { + // we can only move started shards + return MoveDecision.NOT_TAKEN; + } + + final boolean explain = allocation.debugDecision(); + final BalancedShardsAllocator.ModelNode sourceNode = nodes.get(shardRouting.currentNodeId()); + assert sourceNode != null && sourceNode.containsShard(shardRouting); + RoutingNode routingNode = sourceNode.getRoutingNode(); + Decision canRemain = allocation.deciders().canRemain(shardRouting, routingNode, allocation); + if (canRemain.type() != Decision.Type.NO) { + return MoveDecision.stay(canRemain); + } + + sorter.reset(shardRouting.getIndexName()); + /* + * the sorter holds the minimum weight node first for the shards index. + * We now walk through the nodes until we find a node to allocate the shard. + * This is not guaranteed to be balanced after this operation we still try best effort to + * allocate on the minimal eligible node. + */ + Decision.Type bestDecision = Decision.Type.NO; + RoutingNode targetNode = null; + final List nodeExplanationMap = explain ? new ArrayList<>() : null; + int weightRanking = 0; + int targetNodeProcessed = 0; + for (BalancedShardsAllocator.ModelNode currentNode : sorter.modelNodes) { + if (currentNode != sourceNode) { + RoutingNode target = currentNode.getRoutingNode(); + if (!explain && inEligibleTargetNode.contains(target)) continue; + // don't use canRebalance as we want hard filtering rules to apply. See #17698 + if (!explain) { + // If we cannot allocate any shard to node marking it in eligible + Decision nodeLevelAllocationDecision = allocation.deciders().canAllocateAnyShardToNode(target, allocation); + if (nodeLevelAllocationDecision.type() != Decision.Type.YES) { + inEligibleTargetNode.add(currentNode.getRoutingNode()); + continue; + } + } + targetNodeProcessed++; + // don't use canRebalance as we want hard filtering rules to apply. See #17698 + Decision allocationDecision = allocation.deciders().canAllocate(shardRouting, target, allocation); + if (explain) { + nodeExplanationMap.add( + new NodeAllocationResult(currentNode.getRoutingNode().node(), allocationDecision, ++weightRanking) + ); + } + // TODO maybe we can respect throttling here too? + if (allocationDecision.type().higherThan(bestDecision)) { + bestDecision = allocationDecision.type(); + if (bestDecision == Decision.Type.YES) { + targetNode = target; + if (explain == false) { + // we are not in explain mode and already have a YES decision on the best weighted node, + // no need to continue iterating + break; + } + } + } + } + } + + return MoveDecision.cannotRemain( + canRemain, + AllocationDecision.fromDecisionType(bestDecision), + targetNode != null ? targetNode.node() : null, + nodeExplanationMap + ); + } + + /** + * Builds the internal model from all shards in the given + * {@link Iterable}. All shards in the {@link Iterable} must be assigned + * to a node. This method will skip shards in the state + * {@link ShardRoutingState#RELOCATING} since each relocating shard has + * a shadow shard in the state {@link ShardRoutingState#INITIALIZING} + * on the target node which we respect during the allocation / balancing + * process. In short, this method recreates the status-quo in the cluster. + */ + private Map buildModelFromAssigned() { + Map nodes = new HashMap<>(); + for (RoutingNode rn : routingNodes) { + BalancedShardsAllocator.ModelNode node = new BalancedShardsAllocator.ModelNode(rn); + nodes.put(rn.nodeId(), node); + for (ShardRouting shard : rn) { + assert rn.nodeId().equals(shard.currentNodeId()); + /* we skip relocating shards here since we expect an initializing shard with the same id coming in */ + if (shard.state() != RELOCATING) { + node.addShard(shard); + if (logger.isTraceEnabled()) { + logger.trace("Assigned shard [{}] to node [{}]", shard, node.getNodeId()); + } + } + } + } + return nodes; + } + + /** + * Allocates all given shards on the minimal eligible node for the shards index + * with respect to the weight function. All given shards must be unassigned. + */ + @Override + void allocateUnassigned() { + RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned(); + assert !nodes.isEmpty(); + if (logger.isTraceEnabled()) { + logger.trace("Start allocating unassigned shards"); + } + if (unassigned.isEmpty()) { + return; + } + + /* + * TODO: We could be smarter here and group the shards by index and then + * use the sorter to save some iterations. + */ + final PriorityComparator secondaryComparator = PriorityComparator.getAllocationComparator(allocation); + final Comparator comparator = (o1, o2) -> { + if (o1.primary() ^ o2.primary()) { + return o1.primary() ? -1 : 1; + } + final int indexCmp; + if ((indexCmp = o1.getIndexName().compareTo(o2.getIndexName())) == 0) { + return o1.getId() - o2.getId(); + } + // this comparator is more expensive than all the others up there + // that's why it's added last even though it could be easier to read + // if we'd apply it earlier. this comparator will only differentiate across + // indices all shards of the same index is treated equally. + final int secondary = secondaryComparator.compare(o1, o2); + return secondary == 0 ? indexCmp : secondary; + }; + /* + * we use 2 arrays and move replicas to the second array once we allocated an identical + * replica in the current iteration to make sure all indices get allocated in the same manner. + * The arrays are sorted by primaries first and then by index and shard ID so a 2 indices with + * 2 replica and 1 shard would look like: + * [(0,P,IDX1), (0,P,IDX2), (0,R,IDX1), (0,R,IDX1), (0,R,IDX2), (0,R,IDX2)] + * if we allocate for instance (0, R, IDX1) we move the second replica to the secondary array and proceed with + * the next replica. If we could not find a node to allocate (0,R,IDX1) we move all it's replicas to ignoreUnassigned. + */ + ShardRouting[] primary = unassigned.drain(); + ShardRouting[] secondary = new ShardRouting[primary.length]; + int secondaryLength = 0; + int primaryLength = primary.length; + ArrayUtil.timSort(primary, comparator); + do { + for (int i = 0; i < primaryLength; i++) { + ShardRouting shard = primary[i]; + final AllocateUnassignedDecision allocationDecision = decideAllocateUnassigned(shard); + final String assignedNodeId = allocationDecision.getTargetNode() != null + ? allocationDecision.getTargetNode().getId() + : null; + final BalancedShardsAllocator.ModelNode minNode = assignedNodeId != null ? nodes.get(assignedNodeId) : null; + + if (allocationDecision.getAllocationDecision() == AllocationDecision.YES) { + if (logger.isTraceEnabled()) { + logger.trace("Assigned shard [{}] to [{}]", shard, minNode.getNodeId()); + } + + final long shardSize = DiskThresholdDecider.getExpectedShardSize( + shard, + ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, + allocation.clusterInfo(), + allocation.snapshotShardSizeInfo(), + allocation.metadata(), + allocation.routingTable() + ); + shard = routingNodes.initializeShard(shard, minNode.getNodeId(), null, shardSize, allocation.changes()); + minNode.addShard(shard); + if (!shard.primary()) { + // copy over the same replica shards to the secondary array so they will get allocated + // in a subsequent iteration, allowing replicas of other shards to be allocated first + while (i < primaryLength - 1 && comparator.compare(primary[i], primary[i + 1]) == 0) { + secondary[secondaryLength++] = primary[++i]; + } + } + } else { + // did *not* receive a YES decision + if (logger.isTraceEnabled()) { + logger.trace( + "No eligible node found to assign shard [{}] allocation_status [{}]", + shard, + allocationDecision.getAllocationStatus() + ); + } + + if (minNode != null) { + // throttle decision scenario + assert allocationDecision.getAllocationStatus() == UnassignedInfo.AllocationStatus.DECIDERS_THROTTLED; + final long shardSize = DiskThresholdDecider.getExpectedShardSize( + shard, + ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, + allocation.clusterInfo(), + allocation.snapshotShardSizeInfo(), + allocation.metadata(), + allocation.routingTable() + ); + minNode.addShard(shard.initialize(minNode.getNodeId(), null, shardSize)); + } else { + if (logger.isTraceEnabled()) { + logger.trace("No Node found to assign shard [{}]", shard); + } + } + + unassigned.ignoreShard(shard, allocationDecision.getAllocationStatus(), allocation.changes()); + if (!shard.primary()) { // we could not allocate it and we are a replica - check if we can ignore the other replicas + while (i < primaryLength - 1 && comparator.compare(primary[i], primary[i + 1]) == 0) { + unassigned.ignoreShard(primary[++i], allocationDecision.getAllocationStatus(), allocation.changes()); + } + } + } + } + primaryLength = secondaryLength; + ShardRouting[] tmp = primary; + primary = secondary; + secondary = tmp; + secondaryLength = 0; + } while (primaryLength > 0); + // clear everything we have either added it or moved to ignoreUnassigned + } + + /** + * Make a decision for allocating an unassigned shard. This method returns a two values in a tuple: the + * first value is the {@link Decision} taken to allocate the unassigned shard, the second value is the + * {@link BalancedShardsAllocator.ModelNode} representing the node that the shard should be assigned to. If the decision returned + * is of type {@link Decision.Type#NO}, then the assigned node will be null. + */ + @Override + AllocateUnassignedDecision decideAllocateUnassigned(final ShardRouting shard) { + if (shard.assignedToNode()) { + // we only make decisions for unassigned shards here + return AllocateUnassignedDecision.NOT_TAKEN; + } + + final boolean explain = allocation.debugDecision(); + Decision shardLevelDecision = allocation.deciders().canAllocate(shard, allocation); + if (shardLevelDecision.type() == Decision.Type.NO && explain == false) { + // NO decision for allocating the shard, irrespective of any particular node, so exit early + return AllocateUnassignedDecision.no(UnassignedInfo.AllocationStatus.DECIDERS_NO, null); + } + + /* find an node with minimal weight we can allocate on*/ + float minWeight = Float.POSITIVE_INFINITY; + BalancedShardsAllocator.ModelNode minNode = null; + Decision decision = null; + /* Don't iterate over an identity hashset here the + * iteration order is different for each run and makes testing hard */ + Map nodeExplanationMap = explain ? new HashMap<>() : null; + List> nodeWeights = explain ? new ArrayList<>() : null; + for (BalancedShardsAllocator.ModelNode node : nodes.values()) { + if (node.containsShard(shard) && explain == false) { + // decision is NO without needing to check anything further, so short circuit + continue; + } + + // weight of this index currently on the node + float currentWeight = weight.weightWithAllocationConstraints(this, node, shard.getIndexName()); + // moving the shard would not improve the balance, and we are not in explain mode, so short circuit + if (currentWeight > minWeight && explain == false) { + continue; + } + + Decision currentDecision = allocation.deciders().canAllocate(shard, node.getRoutingNode(), allocation); + if (explain) { + nodeExplanationMap.put(node.getNodeId(), new NodeAllocationResult(node.getRoutingNode().node(), currentDecision, 0)); + nodeWeights.add(Tuple.tuple(node.getNodeId(), currentWeight)); + } + if (currentDecision.type() == Decision.Type.YES || currentDecision.type() == Decision.Type.THROTTLE) { + final boolean updateMinNode; + if (currentWeight == minWeight) { + /* we have an equal weight tie breaking: + * 1. if one decision is YES prefer it + * 2. prefer the node that holds the primary for this index with the next id in the ring ie. + * for the 3 shards 2 replica case we try to build up: + * 1 2 0 + * 2 0 1 + * 0 1 2 + * such that if we need to tie-break we try to prefer the node holding a shard with the minimal id greater + * than the id of the shard we need to assign. This works find when new indices are created since + * primaries are added first and we only add one shard set a time in this algorithm. + */ + if (currentDecision.type() == decision.type()) { + final int repId = shard.id(); + final int nodeHigh = node.highestPrimary(shard.index().getName()); + final int minNodeHigh = minNode.highestPrimary(shard.getIndexName()); + updateMinNode = ((((nodeHigh > repId && minNodeHigh > repId) || (nodeHigh < repId && minNodeHigh < repId)) + && (nodeHigh < minNodeHigh)) || (nodeHigh > repId && minNodeHigh < repId)); + } else { + updateMinNode = currentDecision.type() == Decision.Type.YES; + } + } else { + updateMinNode = currentWeight < minWeight; + } + if (updateMinNode) { + minNode = node; + minWeight = currentWeight; + decision = currentDecision; + } + } + } + if (decision == null) { + // decision was not set and a node was not assigned, so treat it as a NO decision + decision = Decision.NO; + } + List nodeDecisions = null; + if (explain) { + nodeDecisions = new ArrayList<>(); + // fill in the correct weight ranking, once we've been through all nodes + nodeWeights.sort((nodeWeight1, nodeWeight2) -> Float.compare(nodeWeight1.v2(), nodeWeight2.v2())); + int weightRanking = 0; + for (Tuple nodeWeight : nodeWeights) { + NodeAllocationResult current = nodeExplanationMap.get(nodeWeight.v1()); + nodeDecisions.add(new NodeAllocationResult(current.getNode(), current.getCanAllocateDecision(), ++weightRanking)); + } + } + return AllocateUnassignedDecision.fromDecision(decision, minNode != null ? minNode.getRoutingNode().node() : null, nodeDecisions); + } + + private static final Comparator BY_DESCENDING_SHARD_ID = Comparator.comparing(ShardRouting::shardId).reversed(); + + /** + * Tries to find a relocation from the max node to the minimal node for an arbitrary shard of the given index on the + * balance model. Iff this method returns a true the relocation has already been executed on the + * simulation model as well as on the cluster. + */ + private boolean tryRelocateShard(BalancedShardsAllocator.ModelNode minNode, BalancedShardsAllocator.ModelNode maxNode, String idx) { + final BalancedShardsAllocator.ModelIndex index = maxNode.getIndex(idx); + if (index != null) { + logger.trace("Try relocating shard of [{}] from [{}] to [{}]", idx, maxNode.getNodeId(), minNode.getNodeId()); + final Iterable shardRoutings = StreamSupport.stream(index.spliterator(), false) + .filter(ShardRouting::started) // cannot rebalance unassigned, initializing or relocating shards anyway + .filter(maxNode::containsShard) + .sorted(BY_DESCENDING_SHARD_ID) // check in descending order of shard id so that the decision is deterministic + ::iterator; + + final AllocationDeciders deciders = allocation.deciders(); + for (ShardRouting shard : shardRoutings) { + final Decision rebalanceDecision = deciders.canRebalance(shard, allocation); + if (rebalanceDecision.type() == Decision.Type.NO) { + continue; + } + final Decision allocationDecision = deciders.canAllocate(shard, minNode.getRoutingNode(), allocation); + if (allocationDecision.type() == Decision.Type.NO) { + continue; + } + + final Decision decision = new Decision.Multi().add(allocationDecision).add(rebalanceDecision); + + maxNode.removeShard(shard); + long shardSize = allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); + + if (decision.type() == Decision.Type.YES) { + /* only allocate on the cluster if we are not throttled */ + logger.debug("Relocate [{}] from [{}] to [{}]", shard, maxNode.getNodeId(), minNode.getNodeId()); + minNode.addShard(routingNodes.relocateShard(shard, minNode.getNodeId(), shardSize, allocation.changes()).v1()); + return true; + } else { + /* allocate on the model even if throttled */ + logger.debug("Simulate relocation of [{}] from [{}] to [{}]", shard, maxNode.getNodeId(), minNode.getNodeId()); + assert decision.type() == Decision.Type.THROTTLE; + minNode.addShard(shard.relocate(minNode.getNodeId(), shardSize)); + return false; + } + } + } + logger.trace("No shards of [{}] can relocate from [{}] to [{}]", idx, maxNode.getNodeId(), minNode.getNodeId()); + return false; + } + +} diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/ShardsBalancer.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/ShardsBalancer.java new file mode 100644 index 0000000000000..593e6998141fb --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/ShardsBalancer.java @@ -0,0 +1,75 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing.allocation.allocator; + +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.allocation.AllocateUnassignedDecision; +import org.opensearch.cluster.routing.allocation.MoveDecision; + +/** + *

    + * A {@link ShardsBalancer} helps the {@link BalancedShardsAllocator} to perform allocation and balancing + * operations on the cluster. + *

    + * + * @opensearch.internal + */ +public abstract class ShardsBalancer { + + /** + * Performs allocation of unassigned shards on nodes within the cluster. + */ + abstract void allocateUnassigned(); + + /** + * Moves shards that cannot be allocated to a node anymore. + */ + abstract void moveShards(); + + /** + * Balances the nodes on the cluster model. + */ + abstract void balance(); + + /** + * Make a decision for allocating an unassigned shard. + * @param shardRouting the shard for which the decision has to be made + * @return the allocation decision + */ + abstract AllocateUnassignedDecision decideAllocateUnassigned(ShardRouting shardRouting); + + /** + * Makes a decision on whether to move a started shard to another node. + * @param shardRouting the shard for which the decision has to be made + * @return a move decision for the shard + */ + abstract MoveDecision decideMove(ShardRouting shardRouting); + + /** + * Makes a decision about moving a single shard to a different node to form a more + * optimally balanced cluster. + * @param shardRouting the shard for which the move decision has to be made + * @return a move decision for the shard + */ + abstract MoveDecision decideRebalance(ShardRouting shardRouting); + + /** + * Returns the average of shards per node for the given index + */ + public float avgShardsPerNode() { + return Float.MAX_VALUE; + } + + /** + * Returns the global average of shards per node + */ + public float avgShardsPerNode(String index) { + return Float.MAX_VALUE; + } +} diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationConstraintsTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationConstraintsTests.java index d115ee0c515cc..ae10a92a5104e 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationConstraintsTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationConstraintsTests.java @@ -10,6 +10,8 @@ import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; +import org.opensearch.cluster.routing.allocation.allocator.LocalShardsBalancer; +import org.opensearch.cluster.routing.allocation.allocator.ShardsBalancer; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; @@ -45,7 +47,7 @@ public void testSettings() { * for IndexShardPerNode constraint satisfied and breached. */ public void testIndexShardsPerNodeConstraint() { - BalancedShardsAllocator.Balancer balancer = mock(BalancedShardsAllocator.Balancer.class); + ShardsBalancer balancer = mock(LocalShardsBalancer.class); BalancedShardsAllocator.ModelNode node = mock(BalancedShardsAllocator.ModelNode.class); AllocationConstraints constraints = new AllocationConstraints(); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/BalancedSingleShardTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/BalancedSingleShardTests.java index a7b53a4c4bc8b..d29249cef0818 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/BalancedSingleShardTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/BalancedSingleShardTests.java @@ -43,7 +43,7 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; -import org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.Balancer; +import org.opensearch.cluster.routing.allocation.allocator.ShardsBalancer; import org.opensearch.cluster.routing.allocation.decider.AllocationDecider; import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; import org.opensearch.cluster.routing.allocation.decider.Decision; @@ -65,7 +65,7 @@ import static org.hamcrest.Matchers.startsWith; /** - * Tests for balancing a single shard, see {@link Balancer#decideRebalance(ShardRouting)}. + * Tests for balancing a single shard, see {@link ShardsBalancer#decideRebalance(ShardRouting)}. */ public class BalancedSingleShardTests extends OpenSearchAllocationTestCase { From cdc7a2fbe8c06a0ab11b35bd2142ca3ecdc0a0ba Mon Sep 17 00:00:00 2001 From: Rishab Nahata Date: Tue, 18 Oct 2022 12:33:43 +0530 Subject: [PATCH 32/39] Fix decommission status update to non leader nodes (#4800) * Fix decommission status update to non leader nodes Signed-off-by: Rishab Nahata --- CHANGELOG.md | 2 + .../AwarenessAttributeDecommissionIT.java | 164 ++++++++++++++++++ .../DecommissionAttributeMetadata.java | 21 +-- .../decommission/DecommissionController.java | 8 +- 4 files changed, 179 insertions(+), 16 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 4f6e5425f37db..bc22e66fe11ad 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -138,7 +138,9 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fixing Gradle warnings associated with publishPluginZipPublicationToXxx tasks ([#4696](https://github.com/opensearch-project/OpenSearch/pull/4696)) - Fixed randomly failing test ([4774](https://github.com/opensearch-project/OpenSearch/pull/4774)) - Update version check after backport ([4786](https://github.com/opensearch-project/OpenSearch/pull/4786)) +- Fix decommission status update to non leader nodes ([4800](https://github.com/opensearch-project/OpenSearch/pull/4800)) - Fix recovery path for searchable snapshots ([4813](https://github.com/opensearch-project/OpenSearch/pull/4813)) + ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java new file mode 100644 index 0000000000000..b8318503ee4a5 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java @@ -0,0 +1,164 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.coordination; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.junit.After; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateAction; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateRequest; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateResponse; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateAction; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateRequest; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateResponse; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionAction; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionRequest; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionResponse; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.decommission.DecommissionAttribute; +import org.opensearch.cluster.decommission.DecommissionStatus; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Priority; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.transport.MockTransportService; + +import java.util.Collection; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.ExecutionException; + +import static org.opensearch.test.NodeRoles.onlyRole; +import static org.opensearch.test.OpenSearchIntegTestCase.client; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoTimeout; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class AwarenessAttributeDecommissionIT extends OpenSearchIntegTestCase { + private final Logger logger = LogManager.getLogger(AwarenessAttributeDecommissionIT.class); + + @Override + protected Collection> nodePlugins() { + return Collections.singletonList(MockTransportService.TestPlugin.class); + } + + @After + public void cleanup() throws Exception { + assertNoTimeout(client().admin().cluster().prepareHealth().get()); + } + + public void testDecommissionStatusUpdatePublishedToAllNodes() throws ExecutionException, InterruptedException { + Settings commonSettings = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .build(); + + logger.info("--> start 3 cluster manager nodes on zones 'a' & 'b' & 'c'"); + List clusterManagerNodes = internalCluster().startNodes( + Settings.builder() + .put(commonSettings) + .put("node.attr.zone", "a") + .put(onlyRole(commonSettings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)) + .build(), + Settings.builder() + .put(commonSettings) + .put("node.attr.zone", "b") + .put(onlyRole(commonSettings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)) + .build(), + Settings.builder() + .put(commonSettings) + .put("node.attr.zone", "c") + .put(onlyRole(commonSettings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)) + .build() + ); + + logger.info("--> start 3 data nodes on zones 'a' & 'b' & 'c'"); + List dataNodes = internalCluster().startNodes( + Settings.builder() + .put(commonSettings) + .put("node.attr.zone", "a") + .put(onlyRole(commonSettings, DiscoveryNodeRole.DATA_ROLE)) + .build(), + Settings.builder() + .put(commonSettings) + .put("node.attr.zone", "b") + .put(onlyRole(commonSettings, DiscoveryNodeRole.DATA_ROLE)) + .build(), + Settings.builder() + .put(commonSettings) + .put("node.attr.zone", "c") + .put(onlyRole(commonSettings, DiscoveryNodeRole.DATA_ROLE)) + .build() + ); + + ensureStableCluster(6); + + logger.info("--> starting decommissioning nodes in zone {}", 'c'); + DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "c"); + DecommissionRequest decommissionRequest = new DecommissionRequest(decommissionAttribute); + DecommissionResponse decommissionResponse = client().execute(DecommissionAction.INSTANCE, decommissionRequest).get(); + assertTrue(decommissionResponse.isAcknowledged()); + + // Will wait for all events to complete + client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); + + // assert that decommission status is successful + GetDecommissionStateResponse response = client().execute(GetDecommissionStateAction.INSTANCE, new GetDecommissionStateRequest()) + .get(); + assertEquals(response.getDecommissionedAttribute(), decommissionAttribute); + assertEquals(response.getDecommissionStatus(), DecommissionStatus.SUCCESSFUL); + + ClusterState clusterState = client(clusterManagerNodes.get(0)).admin().cluster().prepareState().execute().actionGet().getState(); + assertEquals(4, clusterState.nodes().getSize()); + + // assert status on nodes that are part of cluster currently + Iterator discoveryNodeIterator = clusterState.nodes().getNodes().valuesIt(); + while (discoveryNodeIterator.hasNext()) { + // assert no node has decommissioned attribute + DiscoveryNode node = discoveryNodeIterator.next(); + assertNotEquals(node.getAttributes().get("zone"), "c"); + + // assert all the nodes has status as SUCCESSFUL + ClusterService localNodeClusterService = internalCluster().getInstance(ClusterService.class, node.getName()); + assertEquals( + localNodeClusterService.state().metadata().decommissionAttributeMetadata().status(), + DecommissionStatus.SUCCESSFUL + ); + } + + // assert status on decommissioned node + // Here we will verify that until it got kicked out, it received appropriate status updates + // decommissioned nodes hence will have status as IN_PROGRESS as it will be kicked out later after this + // and won't receive status update to SUCCESSFUL + String randomDecommissionedNode = randomFrom(clusterManagerNodes.get(2), dataNodes.get(2)); + ClusterService decommissionedNodeClusterService = internalCluster().getInstance(ClusterService.class, randomDecommissionedNode); + assertEquals( + decommissionedNodeClusterService.state().metadata().decommissionAttributeMetadata().status(), + DecommissionStatus.IN_PROGRESS + ); + + // Will wait for all events to complete + client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); + + // Recommissioning the zone back to gracefully succeed the test once above tests succeeds + DeleteDecommissionStateResponse deleteDecommissionStateResponse = client(clusterManagerNodes.get(0)).execute( + DeleteDecommissionStateAction.INSTANCE, + new DeleteDecommissionStateRequest() + ).get(); + assertTrue(deleteDecommissionStateResponse.isAcknowledged()); + + // will wait for cluster to stabilise with a timeout of 2 min (findPeerInterval for decommissioned nodes) + // as by then all nodes should have joined the cluster + ensureStableCluster(6, TimeValue.timeValueMinutes(2)); + } +} diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttributeMetadata.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttributeMetadata.java index dbb3fea823eb6..d3d508bf36451 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttributeMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttributeMetadata.java @@ -79,35 +79,29 @@ public DecommissionStatus status() { /** * Returns instance of the metadata with updated status * @param newStatus status to be updated with - * @return instance with valid status */ // synchronized is strictly speaking not needed (this is called by a single thread), but just to be safe - public synchronized DecommissionAttributeMetadata setUpdatedStatus(DecommissionStatus newStatus) { - // if the current status is the expected status already, we return the same instance - if (newStatus.equals(status)) { - return this; + public synchronized void validateNewStatus(DecommissionStatus newStatus) { + // if the current status is the expected status already or new status is FAILED, we let the check pass + if (newStatus.equals(status) || newStatus.equals(DecommissionStatus.FAILED)) { + return; } // We don't expect that INIT will be new status, as it is registered only when starting the decommission action switch (newStatus) { case IN_PROGRESS: - validateAndSetStatus(DecommissionStatus.INIT, newStatus); + validateStatus(DecommissionStatus.INIT, newStatus); break; case SUCCESSFUL: - validateAndSetStatus(DecommissionStatus.IN_PROGRESS, newStatus); - break; - case FAILED: - // we don't need to validate here and directly update status to FAILED - this.status = newStatus; + validateStatus(DecommissionStatus.IN_PROGRESS, newStatus); break; default: throw new IllegalArgumentException( "illegal decommission status [" + newStatus.status() + "] requested for updating metadata" ); } - return this; } - private void validateAndSetStatus(DecommissionStatus expected, DecommissionStatus next) { + private void validateStatus(DecommissionStatus expected, DecommissionStatus next) { if (status.equals(expected) == false) { assert false : "can't move decommission status to [" + next @@ -120,7 +114,6 @@ private void validateAndSetStatus(DecommissionStatus expected, DecommissionStatu "can't move decommission status to [" + next + "]. current status: [" + status + "] (expected [" + expected + "])" ); } - status = next; } @Override diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionController.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionController.java index 7719012f2f3d7..b58d99a9d59db 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionController.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionController.java @@ -246,8 +246,12 @@ public ClusterState execute(ClusterState currentState) { decommissionAttributeMetadata.status(), decommissionStatus ); - // setUpdatedStatus can throw IllegalStateException if the sequence of update is not valid - decommissionAttributeMetadata.setUpdatedStatus(decommissionStatus); + // validateNewStatus can throw IllegalStateException if the sequence of update is not valid + decommissionAttributeMetadata.validateNewStatus(decommissionStatus); + decommissionAttributeMetadata = new DecommissionAttributeMetadata( + decommissionAttributeMetadata.decommissionAttribute(), + decommissionStatus + ); return ClusterState.builder(currentState) .metadata(Metadata.builder(currentState.metadata()).decommissionAttributeMetadata(decommissionAttributeMetadata)) .build(); From f3d038f2703f7928dc8b43d9a8cdc0d4ecbd528a Mon Sep 17 00:00:00 2001 From: Rishab Nahata Date: Tue, 18 Oct 2022 18:23:51 +0530 Subject: [PATCH 33/39] Remove redundant field from GetDecommissionStateResponse (#4751) * Add attribute name to query param and simplify GetDecommissionStateResponse Signed-off-by: Rishab Nahata --- CHANGELOG.md | 1 + .../cluster.get_decommission_awareness.json | 12 ++- .../get/GetDecommissionStateRequest.java | 42 +++++++- .../GetDecommissionStateRequestBuilder.java | 9 ++ .../get/GetDecommissionStateResponse.java | 101 +++++------------- .../TransportGetDecommissionStateAction.java | 5 +- .../opensearch/client/ClusterAdminClient.java | 6 +- .../client/support/AbstractClient.java | 6 +- .../RestGetDecommissionStateAction.java | 6 +- .../get/GetDecommissionStateRequestTests.java | 50 +++++++++ .../GetDecommissionStateResponseTests.java | 13 +-- 11 files changed, 158 insertions(+), 93 deletions(-) create mode 100644 server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index bc22e66fe11ad..646151674f9f4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -136,6 +136,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fix new race condition in DecommissionControllerTests ([4688](https://github.com/opensearch-project/OpenSearch/pull/4688)) - Fix SearchStats (de)serialization (caused by https://github.com/opensearch-project/OpenSearch/pull/4616) ([#4697](https://github.com/opensearch-project/OpenSearch/pull/4697)) - Fixing Gradle warnings associated with publishPluginZipPublicationToXxx tasks ([#4696](https://github.com/opensearch-project/OpenSearch/pull/4696)) +- [BUG]: Remove redundant field from GetDecommissionStateResponse ([#4751](https://github.com/opensearch-project/OpenSearch/pull/4751)) - Fixed randomly failing test ([4774](https://github.com/opensearch-project/OpenSearch/pull/4774)) - Update version check after backport ([4786](https://github.com/opensearch-project/OpenSearch/pull/4786)) - Fix decommission status update to non leader nodes ([4800](https://github.com/opensearch-project/OpenSearch/pull/4800)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_decommission_awareness.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_decommission_awareness.json index 430f96921fbc2..302dea4ec31a7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_decommission_awareness.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_decommission_awareness.json @@ -8,10 +8,16 @@ "url": { "paths": [ { - "path": "/_cluster/decommission/awareness/_status", - "methods": [ + "path":"/_cluster/decommission/awareness/{awareness_attribute_name}/_status", + "methods":[ "GET" - ] + ], + "parts":{ + "awareness_attribute_name":{ + "type":"string", + "description":"Awareness attribute name" + } + } } ] } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequest.java index 90150c71bf3f2..1f301aa2b5273 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequest.java @@ -10,11 +10,14 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import java.io.IOException; +import static org.opensearch.action.ValidateActions.addValidationError; + /** * Get Decommissioned attribute request * @@ -22,19 +25,56 @@ */ public class GetDecommissionStateRequest extends ClusterManagerNodeReadRequest { + private String attributeName; + public GetDecommissionStateRequest() {} + /** + * Constructs a new get decommission state request with given attribute name + * + * @param attributeName name of the attribute + */ + public GetDecommissionStateRequest(String attributeName) { + this.attributeName = attributeName; + } + public GetDecommissionStateRequest(StreamInput in) throws IOException { super(in); + attributeName = in.readString(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); + out.writeString(attributeName); } @Override public ActionRequestValidationException validate() { - return null; + ActionRequestValidationException validationException = null; + if (attributeName == null || Strings.isEmpty(attributeName)) { + validationException = addValidationError("attribute name is missing", validationException); + } + return validationException; + } + + /** + * Sets attribute name + * + * @param attributeName attribute name + * @return this request + */ + public GetDecommissionStateRequest attributeName(String attributeName) { + this.attributeName = attributeName; + return this; + } + + /** + * Returns attribute name + * + * @return attributeName name of attribute + */ + public String attributeName() { + return this.attributeName; } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestBuilder.java index 2b8616d0511cd..e766e9c674ff7 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestBuilder.java @@ -27,4 +27,13 @@ public class GetDecommissionStateRequestBuilder extends ClusterManagerNodeReadOp public GetDecommissionStateRequestBuilder(OpenSearchClient client, GetDecommissionStateAction action) { super(client, action, new GetDecommissionStateRequest()); } + + /** + * @param attributeName name of attribute + * @return current object + */ + public GetDecommissionStateRequestBuilder setAttributeName(String attributeName) { + request.attributeName(attributeName); + return this; + } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponse.java index 2034cdb16e40f..ec0bd7cf7e7eb 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponse.java @@ -10,7 +10,6 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.action.ActionResponse; -import org.opensearch.cluster.decommission.DecommissionAttribute; import org.opensearch.cluster.decommission.DecommissionStatus; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -31,49 +30,40 @@ */ public class GetDecommissionStateResponse extends ActionResponse implements ToXContentObject { - private DecommissionAttribute decommissionedAttribute; + private String attributeValue; private DecommissionStatus status; GetDecommissionStateResponse() { this(null, null); } - GetDecommissionStateResponse(DecommissionAttribute decommissionedAttribute, DecommissionStatus status) { - this.decommissionedAttribute = decommissionedAttribute; + GetDecommissionStateResponse(String attributeValue, DecommissionStatus status) { + this.attributeValue = attributeValue; this.status = status; } GetDecommissionStateResponse(StreamInput in) throws IOException { // read decommissioned attribute and status only if it is present if (in.readBoolean()) { - this.decommissionedAttribute = new DecommissionAttribute(in); - } - if (in.readBoolean()) { + this.attributeValue = in.readString(); this.status = DecommissionStatus.fromString(in.readString()); } } @Override public void writeTo(StreamOutput out) throws IOException { - // if decommissioned attribute is null, mark absence of decommissioned attribute - if (decommissionedAttribute == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - decommissionedAttribute.writeTo(out); - } - - // if status is null, mark absence of status - if (status == null) { + // if decommissioned attribute value is null or status is null then mark its absence + if (attributeValue == null || status == null) { out.writeBoolean(false); } else { out.writeBoolean(true); + out.writeString(attributeValue); out.writeString(status.status()); } } - public DecommissionAttribute getDecommissionedAttribute() { - return decommissionedAttribute; + public String getAttributeValue() { + return attributeValue; } public DecommissionStatus getDecommissionStatus() { @@ -83,13 +73,8 @@ public DecommissionStatus getDecommissionStatus() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.startObject("awareness"); - if (decommissionedAttribute != null) { - builder.field(decommissionedAttribute.attributeName(), decommissionedAttribute.attributeValue()); - } - builder.endObject(); - if (status != null) { - builder.field("status", status); + if (attributeValue != null && status != null) { + builder.field(attributeValue, status); } builder.endObject(); return builder; @@ -97,58 +82,25 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public static GetDecommissionStateResponse fromXContent(XContentParser parser) throws IOException { ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); - String attributeType = "awareness"; XContentParser.Token token; - DecommissionAttribute decommissionAttribute = null; + String attributeValue = null; DecommissionStatus status = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { - String currentFieldName = parser.currentName(); - if (attributeType.equals(currentFieldName)) { - if (parser.nextToken() != XContentParser.Token.START_OBJECT) { - throw new OpenSearchParseException( - "failed to parse decommission attribute type [{}], expected object", - attributeType - ); - } - token = parser.nextToken(); - if (token != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - String fieldName = parser.currentName(); - String value; - token = parser.nextToken(); - if (token == XContentParser.Token.VALUE_STRING) { - value = parser.text(); - } else { - throw new OpenSearchParseException( - "failed to parse attribute [{}], expected string for attribute value", - fieldName - ); - } - decommissionAttribute = new DecommissionAttribute(fieldName, value); - parser.nextToken(); - } else { - throw new OpenSearchParseException("failed to parse attribute type [{}], unexpected type", attributeType); - } - } else { - throw new OpenSearchParseException("failed to parse attribute type [{}]", attributeType); - } - } else if ("status".equals(currentFieldName)) { - if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { - throw new OpenSearchParseException( - "failed to parse status of decommissioning, expected string but found unknown type" - ); - } - status = DecommissionStatus.fromString(parser.text().toLowerCase(Locale.ROOT)); - } else { - throw new OpenSearchParseException( - "unknown field found [{}], failed to parse the decommission attribute", - currentFieldName - ); + attributeValue = parser.currentName(); + if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { + throw new OpenSearchParseException("failed to parse status of decommissioning, expected string but found unknown type"); } + status = DecommissionStatus.fromString(parser.text().toLowerCase(Locale.ROOT)); + } else { + throw new OpenSearchParseException( + "failed to parse decommission state, expected [{}] but found [{}]", + XContentParser.Token.FIELD_NAME, + token + ); } } - return new GetDecommissionStateResponse(decommissionAttribute, status); + return new GetDecommissionStateResponse(attributeValue, status); } @Override @@ -156,11 +108,14 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; GetDecommissionStateResponse that = (GetDecommissionStateResponse) o; - return decommissionedAttribute.equals(that.decommissionedAttribute) && status == that.status; + if (!Objects.equals(attributeValue, that.attributeValue)) { + return false; + } + return Objects.equals(status, that.status); } @Override public int hashCode() { - return Objects.hash(decommissionedAttribute, status); + return Objects.hash(attributeValue, status); } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/TransportGetDecommissionStateAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/TransportGetDecommissionStateAction.java index 48ed13c6c0aaf..d811ab8cf6948 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/TransportGetDecommissionStateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/TransportGetDecommissionStateAction.java @@ -69,10 +69,11 @@ protected void clusterManagerOperation( ActionListener listener ) throws Exception { DecommissionAttributeMetadata decommissionAttributeMetadata = state.metadata().decommissionAttributeMetadata(); - if (decommissionAttributeMetadata != null) { + if (decommissionAttributeMetadata != null + && request.attributeName().equals(decommissionAttributeMetadata.decommissionAttribute().attributeName())) { listener.onResponse( new GetDecommissionStateResponse( - decommissionAttributeMetadata.decommissionAttribute(), + decommissionAttributeMetadata.decommissionAttribute().attributeValue(), decommissionAttributeMetadata.status() ) ); diff --git a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java index 77ddb5e17c742..4ab438ec064f1 100644 --- a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java +++ b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java @@ -873,17 +873,17 @@ public interface ClusterAdminClient extends OpenSearchClient { /** * Get Decommissioned attribute */ - ActionFuture getDecommission(GetDecommissionStateRequest request); + ActionFuture getDecommissionState(GetDecommissionStateRequest request); /** * Get Decommissioned attribute */ - void getDecommission(GetDecommissionStateRequest request, ActionListener listener); + void getDecommissionState(GetDecommissionStateRequest request, ActionListener listener); /** * Get Decommissioned attribute */ - GetDecommissionStateRequestBuilder prepareGetDecommission(); + GetDecommissionStateRequestBuilder prepareGetDecommissionState(); /** * Deletes the decommission metadata. diff --git a/server/src/main/java/org/opensearch/client/support/AbstractClient.java b/server/src/main/java/org/opensearch/client/support/AbstractClient.java index b42010d4253d5..828ca5f8083ee 100644 --- a/server/src/main/java/org/opensearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/opensearch/client/support/AbstractClient.java @@ -1417,17 +1417,17 @@ public DecommissionRequestBuilder prepareDecommission(DecommissionRequest reques } @Override - public ActionFuture getDecommission(GetDecommissionStateRequest request) { + public ActionFuture getDecommissionState(GetDecommissionStateRequest request) { return execute(GetDecommissionStateAction.INSTANCE, request); } @Override - public void getDecommission(GetDecommissionStateRequest request, ActionListener listener) { + public void getDecommissionState(GetDecommissionStateRequest request, ActionListener listener) { execute(GetDecommissionStateAction.INSTANCE, request, listener); } @Override - public GetDecommissionStateRequestBuilder prepareGetDecommission() { + public GetDecommissionStateRequestBuilder prepareGetDecommissionState() { return new GetDecommissionStateRequestBuilder(this, GetDecommissionStateAction.INSTANCE); } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestGetDecommissionStateAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestGetDecommissionStateAction.java index 8bc89ebf37960..5d72adbd6ae08 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestGetDecommissionStateAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestGetDecommissionStateAction.java @@ -30,7 +30,7 @@ public class RestGetDecommissionStateAction extends BaseRestHandler { @Override public List routes() { - return singletonList(new Route(GET, "/_cluster/decommission/awareness/_status")); + return singletonList(new Route(GET, "/_cluster/decommission/awareness/{awareness_attribute_name}/_status")); } @Override @@ -41,6 +41,8 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { GetDecommissionStateRequest getDecommissionStateRequest = Requests.getDecommissionStateRequest(); - return channel -> client.admin().cluster().getDecommission(getDecommissionStateRequest, new RestToXContentListener<>(channel)); + String attributeName = request.param("awareness_attribute_name"); + getDecommissionStateRequest.attributeName(attributeName); + return channel -> client.admin().cluster().getDecommissionState(getDecommissionStateRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestTests.java new file mode 100644 index 0000000000000..973485e1917f7 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestTests.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.get; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class GetDecommissionStateRequestTests extends OpenSearchTestCase { + public void testSerialization() throws IOException { + String attributeName = "zone"; + final GetDecommissionStateRequest originalRequest = new GetDecommissionStateRequest(attributeName); + final GetDecommissionStateRequest deserialized = copyWriteable( + originalRequest, + writableRegistry(), + GetDecommissionStateRequest::new + ); + assertEquals(deserialized.attributeName(), originalRequest.attributeName()); + } + + public void testValidation() { + { + String attributeName = null; + final GetDecommissionStateRequest request = new GetDecommissionStateRequest(attributeName); + ActionRequestValidationException e = request.validate(); + assertNotNull(e); + assertTrue(e.getMessage().contains("attribute name is missing")); + } + { + String attributeName = ""; + final GetDecommissionStateRequest request = new GetDecommissionStateRequest(attributeName); + ActionRequestValidationException e = request.validate(); + assertNotNull(e); + assertTrue(e.getMessage().contains("attribute name is missing")); + } + { + String attributeName = "zone"; + final GetDecommissionStateRequest request = new GetDecommissionStateRequest(attributeName); + ActionRequestValidationException e = request.validate(); + assertNull(e); + } + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponseTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponseTests.java index 97bc54d8d7b30..437faf2a75720 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponseTests.java @@ -8,7 +8,6 @@ package org.opensearch.action.admin.cluster.decommission.awareness.get; -import org.opensearch.cluster.decommission.DecommissionAttribute; import org.opensearch.cluster.decommission.DecommissionStatus; import org.opensearch.common.xcontent.XContentParser; import org.opensearch.test.AbstractXContentTestCase; @@ -18,11 +17,13 @@ public class GetDecommissionStateResponseTests extends AbstractXContentTestCase { @Override protected GetDecommissionStateResponse createTestInstance() { - DecommissionStatus status = randomFrom(DecommissionStatus.values()); - String attributeName = randomAlphaOfLength(10); - String attributeValue = randomAlphaOfLength(10); - DecommissionAttribute decommissionAttribute = new DecommissionAttribute(attributeName, attributeValue); - return new GetDecommissionStateResponse(decommissionAttribute, status); + DecommissionStatus status = null; + String attributeValue = null; + if (randomBoolean()) { + status = randomFrom(DecommissionStatus.values()); + attributeValue = randomAlphaOfLength(10); + } + return new GetDecommissionStateResponse(attributeValue, status); } @Override From b19429b97d06876408474e11287be1a8e6131d58 Mon Sep 17 00:00:00 2001 From: Rishab Nahata Date: Tue, 18 Oct 2022 21:09:01 +0530 Subject: [PATCH 34/39] Fix bug in AwarenessAttributeDecommissionIT (#4822) * Fix bug in AwarenessAttributeDecommissionIT Signed-off-by: Rishab Nahata --- CHANGELOG.md | 1 + .../coordination/AwarenessAttributeDecommissionIT.java | 9 +++++---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 646151674f9f4..c7f5ad1c95ab4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -141,6 +141,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Update version check after backport ([4786](https://github.com/opensearch-project/OpenSearch/pull/4786)) - Fix decommission status update to non leader nodes ([4800](https://github.com/opensearch-project/OpenSearch/pull/4800)) - Fix recovery path for searchable snapshots ([4813](https://github.com/opensearch-project/OpenSearch/pull/4813)) +- Fix bug in AwarenessAttributeDecommissionIT([4822](https://github.com/opensearch-project/OpenSearch/pull/4822)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java index b8318503ee4a5..a2270d63ba6fa 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java @@ -40,7 +40,6 @@ import java.util.concurrent.ExecutionException; import static org.opensearch.test.NodeRoles.onlyRole; -import static org.opensearch.test.OpenSearchIntegTestCase.client; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoTimeout; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) @@ -113,9 +112,11 @@ public void testDecommissionStatusUpdatePublishedToAllNodes() throws ExecutionEx client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); // assert that decommission status is successful - GetDecommissionStateResponse response = client().execute(GetDecommissionStateAction.INSTANCE, new GetDecommissionStateRequest()) - .get(); - assertEquals(response.getDecommissionedAttribute(), decommissionAttribute); + GetDecommissionStateResponse response = client().execute( + GetDecommissionStateAction.INSTANCE, + new GetDecommissionStateRequest(decommissionAttribute.attributeName()) + ).get(); + assertEquals(response.getAttributeValue(), decommissionAttribute.attributeValue()); assertEquals(response.getDecommissionStatus(), DecommissionStatus.SUCCESSFUL); ClusterState clusterState = client(clusterManagerNodes.get(0)).admin().cluster().prepareState().execute().actionGet().getState(); From f0b759d19fe0b2650cf3e51ae639d621e5c041c7 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Tue, 18 Oct 2022 14:04:26 -0700 Subject: [PATCH 35/39] Exclude jettison version brought in with hadoop-minicluster. (#4787) Signed-off-by: Marc Handalian Signed-off-by: Marc Handalian --- CHANGELOG.md | 1 + buildSrc/version.properties | 1 + plugins/discovery-azure-classic/build.gradle | 2 +- test/fixtures/hdfs-fixture/build.gradle | 2 ++ 4 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c7f5ad1c95ab4..1304f8bb203d1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -59,6 +59,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Bumps `jna` from 5.11.0 to 5.12.1 ([#4656](https://github.com/opensearch-project/OpenSearch/pull/4656)) - Update Jackson Databind to 2.13.4.2 (addressing CVE-2022-42003) ([#4779](https://github.com/opensearch-project/OpenSearch/pull/4779)) - Bumps `tika` from 2.4.0 to 2.5.0 ([#4791](https://github.com/opensearch-project/OpenSearch/pull/4791)) +- Exclude jettison version brought in with hadoop-minicluster. ([#4787](https://github.com/opensearch-project/OpenSearch/pull/4787)) ### Changed - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) - Use RemoteSegmentStoreDirectory instead of RemoteDirectory ([#4240](https://github.com/opensearch-project/OpenSearch/pull/4240)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 08784c82a4cc4..db0e5b142f7de 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -17,6 +17,7 @@ supercsv = 2.4.0 log4j = 2.17.1 slf4j = 1.7.36 asm = 9.3 +jettison = 1.5.1 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.12.1 diff --git a/plugins/discovery-azure-classic/build.gradle b/plugins/discovery-azure-classic/build.gradle index 8ca9491f834a6..c88d19f0e2806 100644 --- a/plugins/discovery-azure-classic/build.gradle +++ b/plugins/discovery-azure-classic/build.gradle @@ -59,7 +59,7 @@ dependencies { api "com.sun.jersey:jersey-client:${versions.jersey}" api "com.sun.jersey:jersey-core:${versions.jersey}" api "com.sun.jersey:jersey-json:${versions.jersey}" - api 'org.codehaus.jettison:jettison:1.5.1' + api "org.codehaus.jettison:jettison:${versions.jettison}" api 'com.sun.xml.bind:jaxb-impl:2.2.3-1' // HACK: javax.xml.bind was removed from default modules in java 9, so we pull the api in here, diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index d01db6376db42..03455adc8033d 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -35,7 +35,9 @@ group = 'hdfs' dependencies { api("org.apache.hadoop:hadoop-minicluster:3.3.4") { exclude module: 'websocket-client' + exclude module: 'jettison' } + api "org.codehaus.jettison:jettison:${versions.jettison}" api "org.apache.commons:commons-compress:1.21" api "commons-codec:commons-codec:${versions.commonscodec}" api "org.apache.logging.log4j:log4j-core:${versions.log4j}" From fce34ce68d7f86f8550036e9a51ff58223beb130 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Tue, 18 Oct 2022 15:59:08 -0700 Subject: [PATCH 36/39] Bump protobuf-java to 3.21.7 in repository-gcs and repository-hdfs. (#4790) Signed-off-by: Marc Handalian Add missing SHAs. Signed-off-by: Marc Handalian Signed-off-by: Marc Handalian --- CHANGELOG.md | 4 +++- plugins/repository-gcs/build.gradle | 2 +- plugins/repository-gcs/licenses/protobuf-java-3.19.3.jar.sha1 | 1 - plugins/repository-gcs/licenses/protobuf-java-3.21.7.jar.sha1 | 1 + plugins/repository-hdfs/build.gradle | 2 +- .../repository-hdfs/licenses/protobuf-java-3.21.4.jar.sha1 | 1 - .../repository-hdfs/licenses/protobuf-java-3.21.7.jar.sha1 | 1 + 7 files changed, 7 insertions(+), 5 deletions(-) delete mode 100644 plugins/repository-gcs/licenses/protobuf-java-3.19.3.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/protobuf-java-3.21.7.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/protobuf-java-3.21.4.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/protobuf-java-3.21.7.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 1304f8bb203d1..15f3e1e5cdd2a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,7 +41,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Bumps `azure-storage-common` from 12.18.0 to 12.18.1 - Bumps `forbiddenapis` from 3.3 to 3.4 - Bumps `gson` from 2.9.0 to 2.9.1 -- Bumps `protobuf-java` from 3.21.2 to 3.21.7 +- Bumps `protobuf-java` from 3.21.2 to 3.21.7k - Bumps `azure-core` from 1.31.0 to 1.33.0 - Bumps `avro` from 1.11.0 to 1.11.1 - Bumps `woodstox-core` from 6.3.0 to 6.3.1 @@ -60,6 +60,8 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Update Jackson Databind to 2.13.4.2 (addressing CVE-2022-42003) ([#4779](https://github.com/opensearch-project/OpenSearch/pull/4779)) - Bumps `tika` from 2.4.0 to 2.5.0 ([#4791](https://github.com/opensearch-project/OpenSearch/pull/4791)) - Exclude jettison version brought in with hadoop-minicluster. ([#4787](https://github.com/opensearch-project/OpenSearch/pull/4787)) +- Bump protobuf-java to 3.21.7 in repository-gcs and repository-hdfs ([#]()) + ### Changed - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) - Use RemoteSegmentStoreDirectory instead of RemoteDirectory ([#4240](https://github.com/opensearch-project/OpenSearch/pull/4240)) diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 097e96fcd8fdc..05e879547a4b0 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -66,7 +66,7 @@ dependencies { api 'com.google.api:gax:2.17.0' api 'org.threeten:threetenbp:1.4.4' api 'com.google.protobuf:protobuf-java-util:3.20.0' - api 'com.google.protobuf:protobuf-java:3.19.3' + api 'com.google.protobuf:protobuf-java:3.21.7' api 'com.google.code.gson:gson:2.9.0' api 'com.google.api.grpc:proto-google-common-protos:2.8.0' api 'com.google.api.grpc:proto-google-iam-v1:0.12.0' diff --git a/plugins/repository-gcs/licenses/protobuf-java-3.19.3.jar.sha1 b/plugins/repository-gcs/licenses/protobuf-java-3.19.3.jar.sha1 deleted file mode 100644 index 655ecd1f1c1c9..0000000000000 --- a/plugins/repository-gcs/licenses/protobuf-java-3.19.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4b57f1b1b9e281231c3fcfc039ce3021e29ff570 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/protobuf-java-3.21.7.jar.sha1 b/plugins/repository-gcs/licenses/protobuf-java-3.21.7.jar.sha1 new file mode 100644 index 0000000000000..faa673a23ef41 --- /dev/null +++ b/plugins/repository-gcs/licenses/protobuf-java-3.21.7.jar.sha1 @@ -0,0 +1 @@ +96cfc7147192f1de72c3d7d06972155ffb7d180c \ No newline at end of file diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 792bdc6bacd4a..07dc8b8547b80 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -69,7 +69,7 @@ dependencies { api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api 'com.google.code.gson:gson:2.9.0' runtimeOnly 'com.google.guava:guava:31.1-jre' - api 'com.google.protobuf:protobuf-java:3.21.4' + api 'com.google.protobuf:protobuf-java:3.21.7' api "commons-logging:commons-logging:${versions.commonslogging}" api 'commons-cli:commons-cli:1.5.0' api "commons-codec:commons-codec:${versions.commonscodec}" diff --git a/plugins/repository-hdfs/licenses/protobuf-java-3.21.4.jar.sha1 b/plugins/repository-hdfs/licenses/protobuf-java-3.21.4.jar.sha1 deleted file mode 100644 index f232c9a449547..0000000000000 --- a/plugins/repository-hdfs/licenses/protobuf-java-3.21.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9947febd7a6d0695726c78f603a149b7b7c108e0 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/protobuf-java-3.21.7.jar.sha1 b/plugins/repository-hdfs/licenses/protobuf-java-3.21.7.jar.sha1 new file mode 100644 index 0000000000000..faa673a23ef41 --- /dev/null +++ b/plugins/repository-hdfs/licenses/protobuf-java-3.21.7.jar.sha1 @@ -0,0 +1 @@ +96cfc7147192f1de72c3d7d06972155ffb7d180c \ No newline at end of file From 49363fb06a4cbd48e6baa90f53b74764fb8ec155 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Tue, 18 Oct 2022 19:01:27 -0500 Subject: [PATCH 37/39] Disable merge on refresh in DiskThresholdDeciderIT (#4828) Disables merge on refresh for DiskThresholdDeciderIT.testRestoreSnapshotAllocationDoesNotExceedWatermark. Signed-off-by: Nicholas Walter Knize --- CHANGELOG.md | 1 + .../routing/allocation/decider/DiskThresholdDeciderIT.java | 1 + 2 files changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 15f3e1e5cdd2a..f592ee04e55b9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -166,6 +166,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - PR reference to checkout code for changelog verifier ([#4296](https://github.com/opensearch-project/OpenSearch/pull/4296)) - Commit workflow for dependabot changelog helper ([#4331](https://github.com/opensearch-project/OpenSearch/pull/4331)) - Better plural stemmer than minimal_english ([#4738](https://github.com/opensearch-project/OpenSearch/pull/4738)) +- Disable merge on refresh in DiskThresholdDeciderIT ([#4828](https://github.com/opensearch-project/OpenSearch/pull/4828)) ### Security [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java index 10e809e2fb5dc..955f0f0465d88 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java @@ -217,6 +217,7 @@ public void testRestoreSnapshotAllocationDoesNotExceedWatermark() throws Excepti .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 6) .put(INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), "0ms") + .put(IndexSettings.INDEX_MERGE_ON_FLUSH_ENABLED.getKey(), false) .build() ); final long minShardSize = createReasonableSizedShards(indexName); From eb3301554ec7e1a07866508ffc390b8651dc3394 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Luk=C3=A1=C5=A1=20Vl=C4=8Dek?= Date: Wed, 19 Oct 2022 02:27:08 +0200 Subject: [PATCH 38/39] Add groupId value propagation tests for ZIP publication task (#4772) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The groupId can be defined on several levels. This commit adds more tests to cover the "edge" cases. - In one case the groupId is inherited from the top most 'allprojects' section (and thus can be missing in the publications section). - The other case is opposite, it tests that if the groupId is defined on several levels then the most internal level outweighs the other levels. Closes: #4771 Signed-off-by: Lukáš Vlček Signed-off-by: Lukáš Vlček --- CHANGELOG.md | 1 + .../gradle/pluginzip/PublishTests.java | 70 +++++++++++++++++++ .../pluginzip/allProjectsGroup.gradle | 28 ++++++++ .../pluginzip/groupPriorityLevel.gradle | 30 ++++++++ 4 files changed, 129 insertions(+) create mode 100644 buildSrc/src/test/resources/pluginzip/allProjectsGroup.gradle create mode 100644 buildSrc/src/test/resources/pluginzip/groupPriorityLevel.gradle diff --git a/CHANGELOG.md b/CHANGELOG.md index f592ee04e55b9..4c4c7c80e5410 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,6 +33,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Recommissioning of zone. REST layer support. ([#4624](https://github.com/opensearch-project/OpenSearch/pull/4604)) - Added in-flight cancellation of SearchShardTask based on resource consumption ([#4565](https://github.com/opensearch-project/OpenSearch/pull/4565)) - Apply reproducible builds configuration for OpenSearch plugins through gradle plugin ([#4746](https://github.com/opensearch-project/OpenSearch/pull/4746)) +- Add groupId value propagation tests for ZIP publication task ([#4772](https://github.com/opensearch-project/OpenSearch/pull/4772)) ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 diff --git a/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java b/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java index 2ca0e507acb44..148a836f32b41 100644 --- a/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java +++ b/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java @@ -271,6 +271,76 @@ public void useDefaultValues() throws IOException, URISyntaxException, XmlPullPa assertEquals(model.getUrl(), "https://github.com/doe/sample-plugin"); } + /** + * If the `group` is defined in gradle's allprojects section then it does not have to defined in publications. + */ + @Test + public void allProjectsGroup() throws IOException, URISyntaxException, XmlPullParserException { + GradleRunner runner = prepareGradleRunnerFromTemplate("allProjectsGroup.gradle", "build", ZIP_PUBLISH_TASK); + BuildResult result = runner.build(); + + /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */ + assertEquals(SUCCESS, result.task(":" + "build").getOutcome()); + assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome()); + + // Parse the maven file and validate default values + MavenXpp3Reader reader = new MavenXpp3Reader(); + Model model = reader.read( + new FileReader( + new File( + projectDir.getRoot(), + String.join( + File.separator, + "build", + "local-staging-repo", + "org", + "opensearch", + PROJECT_NAME, + "2.0.0.0", + PROJECT_NAME + "-2.0.0.0.pom" + ) + ) + ) + ); + assertEquals(model.getVersion(), "2.0.0.0"); + assertEquals(model.getGroupId(), "org.opensearch"); + } + + /** + * The groupId value can be defined on several levels. This tests that the most internal level outweighs other levels. + */ + @Test + public void groupPriorityLevel() throws IOException, URISyntaxException, XmlPullParserException { + GradleRunner runner = prepareGradleRunnerFromTemplate("groupPriorityLevel.gradle", "build", ZIP_PUBLISH_TASK); + BuildResult result = runner.build(); + + /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */ + assertEquals(SUCCESS, result.task(":" + "build").getOutcome()); + assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome()); + + // Parse the maven file and validate default values + MavenXpp3Reader reader = new MavenXpp3Reader(); + Model model = reader.read( + new FileReader( + new File( + projectDir.getRoot(), + String.join( + File.separator, + "build", + "local-staging-repo", + "level", + "3", + PROJECT_NAME, + "2.0.0.0", + PROJECT_NAME + "-2.0.0.0.pom" + ) + ) + ) + ); + assertEquals(model.getVersion(), "2.0.0.0"); + assertEquals(model.getGroupId(), "level.3"); + } + /** * In this case the Publication entity is completely missing but still the POM file is generated using the default * values including the groupId and version values obtained from the Gradle project object. diff --git a/buildSrc/src/test/resources/pluginzip/allProjectsGroup.gradle b/buildSrc/src/test/resources/pluginzip/allProjectsGroup.gradle new file mode 100644 index 0000000000000..80638107c86e1 --- /dev/null +++ b/buildSrc/src/test/resources/pluginzip/allProjectsGroup.gradle @@ -0,0 +1,28 @@ +plugins { + id 'java-gradle-plugin' + id 'opensearch.pluginzip' +} + +version='2.0.0.0' + +// A bundlePlugin task mockup +tasks.register('bundlePlugin', Zip.class) { + archiveFileName = "sample-plugin-${version}.zip" + destinationDirectory = layout.buildDirectory.dir('distributions') + from layout.projectDirectory.file('sample-plugin-source.txt') +} + +allprojects { + group = 'org.opensearch' +} + +publishing { + publications { + pluginZip(MavenPublication) { publication -> + pom { + name = "sample-plugin" + description = "pluginDescription" + } + } + } +} diff --git a/buildSrc/src/test/resources/pluginzip/groupPriorityLevel.gradle b/buildSrc/src/test/resources/pluginzip/groupPriorityLevel.gradle new file mode 100644 index 0000000000000..4da02c9f191d8 --- /dev/null +++ b/buildSrc/src/test/resources/pluginzip/groupPriorityLevel.gradle @@ -0,0 +1,30 @@ +plugins { + id 'java-gradle-plugin' + id 'opensearch.pluginzip' +} + +version='2.0.0.0' + +// A bundlePlugin task mockup +tasks.register('bundlePlugin', Zip.class) { + archiveFileName = "sample-plugin-${version}.zip" + destinationDirectory = layout.buildDirectory.dir('distributions') + from layout.projectDirectory.file('sample-plugin-source.txt') +} + +allprojects { + group = 'level.1' +} + +publishing { + publications { + pluginZip(MavenPublication) { publication -> + groupId = "level.2" + pom { + name = "sample-plugin" + description = "pluginDescription" + groupId = "level.3" + } + } + } +} From ec3473708326fcb126f6a4440bbda93f4a1a6c14 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Wed, 19 Oct 2022 09:10:24 -0500 Subject: [PATCH 39/39] [Remove] Legacy Version support from Snapshot/Restore Service (#4728) Removes all stale snapshot / restore code for legacy versions prior to OpenSearch 2.0. This includes handling null ShardGenerations, partial concurrency, null index generations, etc. which are no longer supported in snapshot versions after legacy 7.5. Signed-off-by: Nicholas Walter Knize --- CHANGELOG.md | 2 + .../s3/S3BlobStoreRepositoryTests.java | 64 ----- .../repositories/s3/S3Repository.java | 85 ------- .../MultiVersionRepositoryAccessIT.java | 22 +- .../opensearch/snapshots/CloneSnapshotIT.java | 20 +- .../snapshots/ConcurrentSnapshotsIT.java | 38 --- .../CorruptedBlobStoreRepositoryIT.java | 191 -------------- .../delete/DeleteSnapshotRequest.java | 18 +- .../cluster/SnapshotDeletionsInProgress.java | 40 +-- .../cluster/SnapshotsInProgress.java | 59 +---- .../IndexMetaDataGenerations.java | 11 +- .../repositories/RepositoryData.java | 53 ++-- .../repositories/ShardGenerations.java | 23 +- .../blobstore/BlobStoreRepository.java | 238 ++++++------------ .../opensearch/snapshots/SnapshotInfo.java | 13 +- .../snapshots/SnapshotShardsService.java | 6 - .../snapshots/SnapshotsService.java | 232 +++-------------- .../repositories/RepositoryDataTests.java | 8 +- .../blobstore/BlobStoreTestUtil.java | 2 +- .../AbstractSnapshotIntegTestCase.java | 7 +- 20 files changed, 160 insertions(+), 972 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4c4c7c80e5410..d294eb68ee7c9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -99,6 +99,8 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Remove LegacyESVersion.V_7_2_ and V_7_3_ Constants ([#4702](https://github.com/opensearch-project/OpenSearch/pull/4702)) - Always auto release the flood stage block ([#4703](https://github.com/opensearch-project/OpenSearch/pull/4703)) - Remove LegacyESVersion.V_7_4_ and V_7_5_ Constants ([#4704](https://github.com/opensearch-project/OpenSearch/pull/4704)) +- Remove Legacy Version support from Snapshot/Restore Service ([#4728](https://github.com/opensearch-project/OpenSearch/pull/4728)) + ### Fixed - `opensearch-service.bat start` and `opensearch-service.bat manager` failing to run ([#4289](https://github.com/opensearch-project/OpenSearch/pull/4289)) - PR reference to checkout code for changelog verifier ([#4296](https://github.com/opensearch-project/OpenSearch/pull/4296)) diff --git a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java index b35d4080a413a..3c3f2887469b3 100644 --- a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -36,8 +36,6 @@ import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import fixture.s3.S3HttpHandler; -import org.opensearch.action.ActionRunnable; -import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.service.ClusterService; @@ -45,29 +43,21 @@ import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; -import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.regex.Regex; import org.opensearch.common.settings.MockSecureSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.NamedXContentRegistry; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.plugins.Plugin; -import org.opensearch.repositories.RepositoriesService; -import org.opensearch.repositories.RepositoryData; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.blobstore.OpenSearchMockAPIBasedRepositoryIntegTestCase; -import org.opensearch.snapshots.SnapshotId; -import org.opensearch.snapshots.SnapshotsService; import org.opensearch.snapshots.mockstore.BlobStoreWrapper; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.threadpool.ThreadPool; import java.io.IOException; -import java.io.InputStream; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -75,8 +65,6 @@ import java.util.Map; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.startsWith; @SuppressForbidden(reason = "this test uses a HttpServer to emulate an S3 endpoint") @@ -84,8 +72,6 @@ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) public class S3BlobStoreRepositoryTests extends OpenSearchMockAPIBasedRepositoryIntegTestCase { - private static final TimeValue TEST_COOLDOWN_PERIOD = TimeValue.timeValueSeconds(10L); - private String region; private String signerOverride; @@ -158,56 +144,6 @@ protected Settings nodeSettings(int nodeOrdinal) { return builder.build(); } - public void testEnforcedCooldownPeriod() throws IOException { - final String repoName = createRepository( - randomName(), - Settings.builder().put(repositorySettings()).put(S3Repository.COOLDOWN_PERIOD.getKey(), TEST_COOLDOWN_PERIOD).build() - ); - - final SnapshotId fakeOldSnapshot = client().admin() - .cluster() - .prepareCreateSnapshot(repoName, "snapshot-old") - .setWaitForCompletion(true) - .setIndices() - .get() - .getSnapshotInfo() - .snapshotId(); - final RepositoriesService repositoriesService = internalCluster().getCurrentClusterManagerNodeInstance(RepositoriesService.class); - final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(repoName); - final RepositoryData repositoryData = getRepositoryData(repository); - final RepositoryData modifiedRepositoryData = repositoryData.withVersions( - Collections.singletonMap(fakeOldSnapshot, SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION.minimumCompatibilityVersion()) - ); - final BytesReference serialized = BytesReference.bytes( - modifiedRepositoryData.snapshotsToXContent(XContentFactory.jsonBuilder(), SnapshotsService.OLD_SNAPSHOT_FORMAT) - ); - PlainActionFuture.get(f -> repository.threadPool().generic().execute(ActionRunnable.run(f, () -> { - try (InputStream stream = serialized.streamInput()) { - repository.blobStore() - .blobContainer(repository.basePath()) - .writeBlobAtomic( - BlobStoreRepository.INDEX_FILE_PREFIX + modifiedRepositoryData.getGenId(), - stream, - serialized.length(), - true - ); - } - }))); - - final String newSnapshotName = "snapshot-new"; - final long beforeThrottledSnapshot = repository.threadPool().relativeTimeInNanos(); - client().admin().cluster().prepareCreateSnapshot(repoName, newSnapshotName).setWaitForCompletion(true).setIndices().get(); - assertThat(repository.threadPool().relativeTimeInNanos() - beforeThrottledSnapshot, greaterThan(TEST_COOLDOWN_PERIOD.getNanos())); - - final long beforeThrottledDelete = repository.threadPool().relativeTimeInNanos(); - client().admin().cluster().prepareDeleteSnapshot(repoName, newSnapshotName).get(); - assertThat(repository.threadPool().relativeTimeInNanos() - beforeThrottledDelete, greaterThan(TEST_COOLDOWN_PERIOD.getNanos())); - - final long beforeFastDelete = repository.threadPool().relativeTimeInNanos(); - client().admin().cluster().prepareDeleteSnapshot(repoName, fakeOldSnapshot.getName()).get(); - assertThat(repository.threadPool().relativeTimeInNanos() - beforeFastDelete, lessThan(TEST_COOLDOWN_PERIOD.getNanos())); - } - /** * S3RepositoryPlugin that allows to disable chunked encoding and to set a low threshold between single upload and multipart upload. */ diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java index c8377949a6842..f80e5743edbc0 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java @@ -35,10 +35,8 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.ActionListener; -import org.opensearch.action.ActionRunnable; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.RepositoryMetadata; @@ -52,7 +50,6 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.unit.ByteSizeUnit; import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.NamedXContentRegistry; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.monitor.jvm.JvmInfo; @@ -62,13 +59,10 @@ import org.opensearch.repositories.blobstore.MeteredBlobStoreRepository; import org.opensearch.snapshots.SnapshotId; import org.opensearch.snapshots.SnapshotInfo; -import org.opensearch.snapshots.SnapshotsService; import org.opensearch.threadpool.Scheduler; -import org.opensearch.threadpool.ThreadPool; import java.util.Collection; import java.util.Map; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; @@ -182,24 +176,6 @@ class S3Repository extends MeteredBlobStoreRepository { static final Setting CLIENT_NAME = new Setting<>("client", "default", Function.identity()); - /** - * Artificial delay to introduce after a snapshot finalization or delete has finished so long as the repository is still using the - * backwards compatible snapshot format from before - * {@link org.opensearch.snapshots.SnapshotsService#SHARD_GEN_IN_REPO_DATA_VERSION} ({@link LegacyESVersion#V_7_6_0}). - * This delay is necessary so that the eventually consistent nature of AWS S3 does not randomly result in repository corruption when - * doing repository operations in rapid succession on a repository in the old metadata format. - * This setting should not be adjusted in production when working with an AWS S3 backed repository. Doing so risks the repository - * becoming silently corrupted. To get rid of this waiting period, either create a new S3 repository or remove all snapshots older than - * {@link LegacyESVersion#V_7_6_0} from the repository which will trigger an upgrade of the repository metadata to the new - * format and disable the cooldown period. - */ - static final Setting COOLDOWN_PERIOD = Setting.timeSetting( - "cooldown_period", - new TimeValue(3, TimeUnit.MINUTES), - new TimeValue(0, TimeUnit.MILLISECONDS), - Setting.Property.Dynamic - ); - /** * Specifies the path within bucket to repository data. Defaults to root directory. */ @@ -223,12 +199,6 @@ class S3Repository extends MeteredBlobStoreRepository { private final RepositoryMetadata repositoryMetadata; - /** - * Time period to delay repository operations by after finalizing or deleting a snapshot. - * See {@link #COOLDOWN_PERIOD} for details. - */ - private final TimeValue coolDown; - /** * Constructs an s3 backed repository */ @@ -296,8 +266,6 @@ class S3Repository extends MeteredBlobStoreRepository { ); } - coolDown = COOLDOWN_PERIOD.get(metadata.settings()); - logger.debug( "using bucket [{}], chunk_size [{}], server_side_encryption [{}], buffer_size [{}], cannedACL [{}], storageClass [{}]", bucket, @@ -334,9 +302,6 @@ public void finalizeSnapshot( Function stateTransformer, ActionListener listener ) { - if (SnapshotsService.useShardGenerations(repositoryMetaVersion) == false) { - listener = delayedListener(listener); - } super.finalizeSnapshot( shardGenerations, repositoryStateId, @@ -355,59 +320,9 @@ public void deleteSnapshots( Version repositoryMetaVersion, ActionListener listener ) { - if (SnapshotsService.useShardGenerations(repositoryMetaVersion) == false) { - listener = delayedListener(listener); - } super.deleteSnapshots(snapshotIds, repositoryStateId, repositoryMetaVersion, listener); } - /** - * Wraps given listener such that it is executed with a delay of {@link #coolDown} on the snapshot thread-pool after being invoked. - * See {@link #COOLDOWN_PERIOD} for details. - */ - private ActionListener delayedListener(ActionListener listener) { - final ActionListener wrappedListener = ActionListener.runBefore(listener, () -> { - final Scheduler.Cancellable cancellable = finalizationFuture.getAndSet(null); - assert cancellable != null; - }); - return new ActionListener() { - @Override - public void onResponse(T response) { - logCooldownInfo(); - final Scheduler.Cancellable existing = finalizationFuture.getAndSet( - threadPool.schedule( - ActionRunnable.wrap(wrappedListener, l -> l.onResponse(response)), - coolDown, - ThreadPool.Names.SNAPSHOT - ) - ); - assert existing == null : "Already have an ongoing finalization " + finalizationFuture; - } - - @Override - public void onFailure(Exception e) { - logCooldownInfo(); - final Scheduler.Cancellable existing = finalizationFuture.getAndSet( - threadPool.schedule(ActionRunnable.wrap(wrappedListener, l -> l.onFailure(e)), coolDown, ThreadPool.Names.SNAPSHOT) - ); - assert existing == null : "Already have an ongoing finalization " + finalizationFuture; - } - }; - } - - private void logCooldownInfo() { - logger.info( - "Sleeping for [{}] after modifying repository [{}] because it contains snapshots older than version [{}]" - + " and therefore is using a backwards compatible metadata format that requires this cooldown period to avoid " - + "repository corruption. To get rid of this message and move to the new repository metadata format, either remove " - + "all snapshots older than version [{}] from the repository or create a new repository at an empty location.", - coolDown, - metadata.name(), - SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION, - SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION - ); - } - @Override protected S3BlobStore createBlobStore() { return new S3BlobStore(service, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass, repositoryMetadata); diff --git a/qa/repository-multi-version/src/test/java/org/opensearch/upgrades/MultiVersionRepositoryAccessIT.java b/qa/repository-multi-version/src/test/java/org/opensearch/upgrades/MultiVersionRepositoryAccessIT.java index c0b90626d7bad..49efce0516434 100644 --- a/qa/repository-multi-version/src/test/java/org/opensearch/upgrades/MultiVersionRepositoryAccessIT.java +++ b/qa/repository-multi-version/src/test/java/org/opensearch/upgrades/MultiVersionRepositoryAccessIT.java @@ -32,7 +32,6 @@ package org.opensearch.upgrades; -import org.opensearch.OpenSearchStatusException; import org.opensearch.action.admin.cluster.repositories.put.PutRepositoryRequest; import org.opensearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotStatus; @@ -42,20 +41,17 @@ import org.opensearch.client.Request; import org.opensearch.client.RequestOptions; import org.opensearch.client.Response; -import org.opensearch.client.ResponseException; import org.opensearch.client.RestClient; import org.opensearch.client.RestHighLevelClient; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.DeprecationHandler; import org.opensearch.common.xcontent.XContentParser; import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.snapshots.SnapshotsService; import org.opensearch.test.rest.OpenSearchRestTestCase; import java.io.IOException; import java.io.InputStream; import java.net.HttpURLConnection; -import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -231,20 +227,10 @@ public void testUpgradeMovesRepoToNewMetaVersion() throws IOException { ensureSnapshotRestoreWorks(repoName, "snapshot-2", shards); } } else { - if (SnapshotsService.useIndexGenerations(minimumNodeVersion()) == false) { - assertThat(TEST_STEP, is(TestStep.STEP3_OLD_CLUSTER)); - final List> expectedExceptions = - Arrays.asList(ResponseException.class, OpenSearchStatusException.class); - expectThrowsAnyOf(expectedExceptions, () -> listSnapshots(repoName)); - expectThrowsAnyOf(expectedExceptions, () -> deleteSnapshot(client, repoName, "snapshot-1")); - expectThrowsAnyOf(expectedExceptions, () -> deleteSnapshot(client, repoName, "snapshot-2")); - expectThrowsAnyOf(expectedExceptions, () -> createSnapshot(client, repoName, "snapshot-impossible", index)); - } else { - assertThat(listSnapshots(repoName), hasSize(2)); - if (TEST_STEP == TestStep.STEP4_NEW_CLUSTER) { - ensureSnapshotRestoreWorks(repoName, "snapshot-1", shards); - ensureSnapshotRestoreWorks(repoName, "snapshot-2", shards); - } + assertThat(listSnapshots(repoName), hasSize(2)); + if (TEST_STEP == TestStep.STEP4_NEW_CLUSTER) { + ensureSnapshotRestoreWorks(repoName, "snapshot-1", shards); + ensureSnapshotRestoreWorks(repoName, "snapshot-2", shards); } } } finally { diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java index 7b95bf93f8bf4..f659d827448a0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java @@ -78,14 +78,6 @@ public void testShardClone() throws Exception { final Path repoPath = randomRepoPath(); createRepository(repoName, "fs", repoPath); - final boolean useBwCFormat = randomBoolean(); - if (useBwCFormat) { - initWithSnapshotVersion(repoName, repoPath, SnapshotsService.OLD_SNAPSHOT_FORMAT); - // Re-create repo to clear repository data cache - assertAcked(clusterAdmin().prepareDeleteRepository(repoName).get()); - createRepository(repoName, "fs", repoPath); - } - final String indexName = "test-index"; createIndexWithRandomDocs(indexName, randomIntBetween(5, 10)); final String sourceSnapshot = "source-snapshot"; @@ -101,21 +93,11 @@ public void testShardClone() throws Exception { final SnapshotId targetSnapshotId = new SnapshotId("target-snapshot", UUIDs.randomBase64UUID(random())); - final String currentShardGen; - if (useBwCFormat) { - currentShardGen = null; - } else { - currentShardGen = repositoryData.shardGenerations().getShardGen(indexId, shardId); - } + final String currentShardGen = repositoryData.shardGenerations().getShardGen(indexId, shardId); final String newShardGeneration = PlainActionFuture.get( f -> repository.cloneShardSnapshot(sourceSnapshotInfo.snapshotId(), targetSnapshotId, repositoryShardId, currentShardGen, f) ); - if (useBwCFormat) { - final long gen = Long.parseLong(newShardGeneration); - assertEquals(gen, 1L); // Initial snapshot brought it to 0, clone increments it to 1 - } - final BlobStoreIndexShardSnapshot targetShardSnapshot = readShardSnapshot(repository, repositoryShardId, targetSnapshotId); final BlobStoreIndexShardSnapshot sourceShardSnapshot = readShardSnapshot( repository, diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java index b6d482cad8860..f483ed7fe6c5d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java @@ -56,7 +56,6 @@ import org.opensearch.plugins.Plugin; import org.opensearch.repositories.RepositoryData; import org.opensearch.repositories.RepositoryException; -import org.opensearch.repositories.ShardGenerations; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.snapshots.mockstore.MockRepository; import org.opensearch.test.OpenSearchIntegTestCase; @@ -1297,43 +1296,6 @@ public void testConcurrentOperationsLimit() throws Exception { } } - public void testConcurrentSnapshotWorksWithOldVersionRepo() throws Exception { - internalCluster().startClusterManagerOnlyNode(); - final String dataNode = internalCluster().startDataOnlyNode(); - final String repoName = "test-repo"; - final Path repoPath = randomRepoPath(); - createRepository( - repoName, - "mock", - Settings.builder().put(BlobStoreRepository.CACHE_REPOSITORY_DATA.getKey(), false).put("location", repoPath) - ); - initWithSnapshotVersion(repoName, repoPath, SnapshotsService.OLD_SNAPSHOT_FORMAT); - - createIndexWithContent("index-slow"); - - final ActionFuture createSlowFuture = startFullSnapshotBlockedOnDataNode( - "slow-snapshot", - repoName, - dataNode - ); - - final String dataNode2 = internalCluster().startDataOnlyNode(); - ensureStableCluster(3); - final String indexFast = "index-fast"; - createIndexWithContent(indexFast, dataNode2, dataNode); - - final ActionFuture createFastSnapshot = startFullSnapshot(repoName, "fast-snapshot"); - - assertThat(createSlowFuture.isDone(), is(false)); - unblockNode(repoName, dataNode); - - assertSuccessful(createFastSnapshot); - assertSuccessful(createSlowFuture); - - final RepositoryData repositoryData = getRepositoryData(repoName); - assertThat(repositoryData.shardGenerations(), is(ShardGenerations.EMPTY)); - } - public void testQueuedDeleteAfterFinalizationFailure() throws Exception { final String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); final String repoName = "test-repo"; diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/CorruptedBlobStoreRepositoryIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/CorruptedBlobStoreRepositoryIT.java index b806ee3e55a94..a07b245db2b21 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/CorruptedBlobStoreRepositoryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/CorruptedBlobStoreRepositoryIT.java @@ -32,48 +32,33 @@ package org.opensearch.snapshots; -import org.opensearch.Version; -import org.opensearch.action.ActionRunnable; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; import org.opensearch.action.index.IndexRequestBuilder; -import org.opensearch.action.support.PlainActionFuture; import org.opensearch.client.Client; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.RepositoriesMetadata; -import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.internal.io.IOUtils; import org.opensearch.repositories.IndexId; -import org.opensearch.repositories.IndexMetaDataGenerations; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; import org.opensearch.repositories.RepositoryData; import org.opensearch.repositories.RepositoryException; -import org.opensearch.repositories.ShardGenerations; import org.opensearch.repositories.blobstore.BlobStoreRepository; -import org.opensearch.threadpool.ThreadPool; -import java.io.IOException; import java.nio.channels.SeekableByteChannel; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; -import java.util.Collections; import java.util.List; -import java.util.Locale; import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.function.Function; -import java.util.stream.Collectors; import java.util.stream.Stream; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFileExists; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertRequestBuilderThrows; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -310,101 +295,6 @@ public void testFindDanglingLatestGeneration() throws Exception { ); } - public void testHandlingMissingRootLevelSnapshotMetadata() throws Exception { - Path repo = randomRepoPath(); - final String repoName = "test-repo"; - createRepository( - repoName, - "fs", - Settings.builder() - .put("location", repo) - .put("compress", false) - // Don't cache repository data because the test manually modifies the repository data - .put(BlobStoreRepository.CACHE_REPOSITORY_DATA.getKey(), false) - .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - ); - - final String snapshotPrefix = "test-snap-"; - final int snapshots = randomIntBetween(1, 2); - logger.info("--> creating [{}] snapshots", snapshots); - for (int i = 0; i < snapshots; ++i) { - // Workaround to simulate BwC situation: taking a snapshot without indices here so that we don't create any new version shard - // generations (the existence of which would short-circuit checks for the repo containing old version snapshots) - CreateSnapshotResponse createSnapshotResponse = client().admin() - .cluster() - .prepareCreateSnapshot(repoName, snapshotPrefix + i) - .setIndices() - .setWaitForCompletion(true) - .get(); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), is(0)); - assertThat( - createSnapshotResponse.getSnapshotInfo().successfulShards(), - equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) - ); - } - final RepositoryData repositoryData = getRepositoryData(repoName); - - final SnapshotId snapshotToCorrupt = randomFrom(repositoryData.getSnapshotIds()); - logger.info("--> delete root level snapshot metadata blob for snapshot [{}]", snapshotToCorrupt); - Files.delete(repo.resolve(String.format(Locale.ROOT, BlobStoreRepository.SNAPSHOT_NAME_FORMAT, snapshotToCorrupt.getUUID()))); - - logger.info("--> strip version information from index-N blob"); - final RepositoryData withoutVersions = new RepositoryData( - repositoryData.getGenId(), - repositoryData.getSnapshotIds().stream().collect(Collectors.toMap(SnapshotId::getUUID, Function.identity())), - repositoryData.getSnapshotIds().stream().collect(Collectors.toMap(SnapshotId::getUUID, repositoryData::getSnapshotState)), - Collections.emptyMap(), - Collections.emptyMap(), - ShardGenerations.EMPTY, - IndexMetaDataGenerations.EMPTY - ); - - Files.write( - repo.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + withoutVersions.getGenId()), - BytesReference.toBytes( - BytesReference.bytes(withoutVersions.snapshotsToXContent(XContentFactory.jsonBuilder(), Version.CURRENT)) - ), - StandardOpenOption.TRUNCATE_EXISTING - ); - - logger.info("--> verify that repo is assumed in old metadata format"); - final SnapshotsService snapshotsService = internalCluster().getCurrentClusterManagerNodeInstance(SnapshotsService.class); - final ThreadPool threadPool = internalCluster().getCurrentClusterManagerNodeInstance(ThreadPool.class); - assertThat( - PlainActionFuture.get( - f -> threadPool.generic() - .execute( - ActionRunnable.supply( - f, - () -> snapshotsService.minCompatibleVersion(Version.CURRENT, getRepositoryData(repoName), null) - ) - ) - ), - is(SnapshotsService.OLD_SNAPSHOT_FORMAT) - ); - - logger.info("--> verify that snapshot with missing root level metadata can be deleted"); - assertAcked(startDeleteSnapshot(repoName, snapshotToCorrupt.getName()).get()); - - logger.info("--> verify that repository is assumed in new metadata format after removing corrupted snapshot"); - assertThat( - PlainActionFuture.get( - f -> threadPool.generic() - .execute( - ActionRunnable.supply( - f, - () -> snapshotsService.minCompatibleVersion(Version.CURRENT, getRepositoryData(repoName), null) - ) - ) - ), - is(Version.CURRENT) - ); - final RepositoryData finalRepositoryData = getRepositoryData(repoName); - for (SnapshotId snapshotId : finalRepositoryData.getSnapshotIds()) { - assertThat(finalRepositoryData.getVersion(snapshotId), is(Version.CURRENT)); - } - } - public void testMountCorruptedRepositoryData() throws Exception { disableRepoConsistencyCheck("This test intentionally corrupts the repository contents"); Client client = client(); @@ -453,87 +343,6 @@ public void testMountCorruptedRepositoryData() throws Exception { expectThrows(RepositoryException.class, () -> getRepositoryData(otherRepo)); } - public void testHandleSnapshotErrorWithBwCFormat() throws IOException, ExecutionException, InterruptedException { - final String repoName = "test-repo"; - final Path repoPath = randomRepoPath(); - createRepository(repoName, "fs", repoPath); - final String oldVersionSnapshot = initWithSnapshotVersion(repoName, repoPath, SnapshotsService.OLD_SNAPSHOT_FORMAT); - - logger.info("--> recreating repository to clear caches"); - client().admin().cluster().prepareDeleteRepository(repoName).get(); - createRepository(repoName, "fs", repoPath); - - final String indexName = "test-index"; - createIndex(indexName); - - createFullSnapshot(repoName, "snapshot-1"); - - // In the old metadata version the shard level metadata could be moved to the next generation for all sorts of reasons, this should - // not break subsequent repository operations - logger.info("--> move shard level metadata to new generation"); - final IndexId indexId = getRepositoryData(repoName).resolveIndexId(indexName); - final Path shardPath = repoPath.resolve("indices").resolve(indexId.getId()).resolve("0"); - final Path initialShardMetaPath = shardPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + "0"); - assertFileExists(initialShardMetaPath); - Files.move(initialShardMetaPath, shardPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + "1")); - - startDeleteSnapshot(repoName, oldVersionSnapshot).get(); - - createFullSnapshot(repoName, "snapshot-2"); - } - - public void testRepairBrokenShardGenerations() throws Exception { - final String repoName = "test-repo"; - final Path repoPath = randomRepoPath(); - createRepository(repoName, "fs", repoPath); - final String oldVersionSnapshot = initWithSnapshotVersion(repoName, repoPath, SnapshotsService.OLD_SNAPSHOT_FORMAT); - - logger.info("--> recreating repository to clear caches"); - client().admin().cluster().prepareDeleteRepository(repoName).get(); - createRepository(repoName, "fs", repoPath); - - final String indexName = "test-index"; - createIndex(indexName); - - createFullSnapshot(repoName, "snapshot-1"); - - startDeleteSnapshot(repoName, oldVersionSnapshot).get(); - - logger.info("--> move shard level metadata to new generation and make RepositoryData point at an older generation"); - final IndexId indexId = getRepositoryData(repoName).resolveIndexId(indexName); - final Path shardPath = repoPath.resolve("indices").resolve(indexId.getId()).resolve("0"); - final Path initialShardMetaPath = shardPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + "0"); - assertFileExists(initialShardMetaPath); - Files.move(initialShardMetaPath, shardPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + randomIntBetween(1, 1000))); - - final RepositoryData repositoryData1 = getRepositoryData(repoName); - final Map snapshotIds = repositoryData1.getSnapshotIds() - .stream() - .collect(Collectors.toMap(SnapshotId::getUUID, Function.identity())); - final RepositoryData brokenRepoData = new RepositoryData( - repositoryData1.getGenId(), - snapshotIds, - snapshotIds.values().stream().collect(Collectors.toMap(SnapshotId::getUUID, repositoryData1::getSnapshotState)), - snapshotIds.values().stream().collect(Collectors.toMap(SnapshotId::getUUID, repositoryData1::getVersion)), - repositoryData1.getIndices().values().stream().collect(Collectors.toMap(Function.identity(), repositoryData1::getSnapshots)), - ShardGenerations.builder().putAll(repositoryData1.shardGenerations()).put(indexId, 0, "0").build(), - repositoryData1.indexMetaDataGenerations() - ); - Files.write( - repoPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + repositoryData1.getGenId()), - BytesReference.toBytes( - BytesReference.bytes(brokenRepoData.snapshotsToXContent(XContentFactory.jsonBuilder(), Version.CURRENT)) - ), - StandardOpenOption.TRUNCATE_EXISTING - ); - - logger.info("--> recreating repository to clear caches"); - client().admin().cluster().prepareDeleteRepository(repoName).get(); - createRepository(repoName, "fs", repoPath); - - createFullSnapshot(repoName, "snapshot-2"); - } - /** * Tests that a shard snapshot with a corrupted shard index file can still be used for restore and incremental snapshots. */ diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java index 61c4fdb9d5c14..832b37050ffe6 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java @@ -36,7 +36,6 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.snapshots.SnapshotsService; import java.io.IOException; @@ -84,27 +83,14 @@ public DeleteSnapshotRequest(String repository) { public DeleteSnapshotRequest(StreamInput in) throws IOException { super(in); repository = in.readString(); - if (in.getVersion().onOrAfter(SnapshotsService.MULTI_DELETE_VERSION)) { - snapshots = in.readStringArray(); - } else { - snapshots = new String[] { in.readString() }; - } + snapshots = in.readStringArray(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(repository); - if (out.getVersion().onOrAfter(SnapshotsService.MULTI_DELETE_VERSION)) { - out.writeStringArray(snapshots); - } else { - if (snapshots.length != 1) { - throw new IllegalArgumentException( - "Can't write snapshot delete with more than one snapshot to version [" + out.getVersion() + "]" - ); - } - out.writeString(snapshots[0]); - } + out.writeStringArray(snapshots); } @Override diff --git a/server/src/main/java/org/opensearch/cluster/SnapshotDeletionsInProgress.java b/server/src/main/java/org/opensearch/cluster/SnapshotDeletionsInProgress.java index 0f95890e56ec2..e4ce0a8f99e02 100644 --- a/server/src/main/java/org/opensearch/cluster/SnapshotDeletionsInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/SnapshotDeletionsInProgress.java @@ -34,7 +34,6 @@ import org.opensearch.Version; import org.opensearch.cluster.ClusterState.Custom; -import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.UUIDs; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -42,9 +41,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.repositories.RepositoryOperation; -import org.opensearch.snapshots.Snapshot; import org.opensearch.snapshots.SnapshotId; -import org.opensearch.snapshots.SnapshotsService; import java.io.IOException; import java.util.ArrayList; @@ -245,23 +242,12 @@ private Entry(List snapshots, String repoName, long startTime, long } public Entry(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(SnapshotsService.MULTI_DELETE_VERSION)) { - this.repoName = in.readString(); - this.snapshots = in.readList(SnapshotId::new); - } else { - final Snapshot snapshot = new Snapshot(in); - this.snapshots = Collections.singletonList(snapshot.getSnapshotId()); - this.repoName = snapshot.getRepository(); - } + this.repoName = in.readString(); + this.snapshots = in.readList(SnapshotId::new); this.startTime = in.readVLong(); this.repositoryStateId = in.readLong(); - if (in.getVersion().onOrAfter(SnapshotsService.FULL_CONCURRENCY_VERSION)) { - this.state = State.readFrom(in); - this.uuid = in.readString(); - } else { - this.state = State.STARTED; - this.uuid = IndexMetadata.INDEX_UUID_NA_VALUE; - } + this.state = State.readFrom(in); + this.uuid = in.readString(); } public Entry started() { @@ -343,22 +329,12 @@ public int hashCode() { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(SnapshotsService.MULTI_DELETE_VERSION)) { - out.writeString(repoName); - out.writeCollection(snapshots); - } else { - assert snapshots.size() == 1 : "Only single deletion allowed in mixed version cluster containing [" - + out.getVersion() - + "] but saw " - + snapshots; - new Snapshot(repoName, snapshots.get(0)).writeTo(out); - } + out.writeString(repoName); + out.writeCollection(snapshots); out.writeVLong(startTime); out.writeLong(repositoryStateId); - if (out.getVersion().onOrAfter(SnapshotsService.FULL_CONCURRENCY_VERSION)) { - state.writeTo(out); - out.writeString(uuid); - } + state.writeTo(out); + out.writeString(uuid); } @Override diff --git a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java index 4cbf0cfe70adb..f77b3e3b2a904 100644 --- a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java @@ -35,7 +35,6 @@ import com.carrotsearch.hppc.ObjectContainer; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.cluster.ClusterState.Custom; import org.opensearch.common.Nullable; @@ -54,7 +53,6 @@ import org.opensearch.snapshots.InFlightShardSnapshotStates; import org.opensearch.snapshots.Snapshot; import org.opensearch.snapshots.SnapshotId; -import org.opensearch.snapshots.SnapshotsService; import java.io.IOException; import java.util.Collections; @@ -66,8 +64,6 @@ import java.util.Set; import java.util.stream.Collectors; -import static org.opensearch.snapshots.SnapshotInfo.DATA_STREAMS_IN_SNAPSHOT; - /** * Meta data about snapshots that are currently executing * @@ -77,8 +73,6 @@ public class SnapshotsInProgress extends AbstractNamedDiffable implement public static final SnapshotsInProgress EMPTY = new SnapshotsInProgress(Collections.emptyList()); - private static final Version VERSION_IN_SNAPSHOT_VERSION = LegacyESVersion.V_7_7_0; - public static final String TYPE = "snapshots"; public static final String ABORTED_FAILURE_TEXT = "Snapshot was aborted by deletion"; @@ -296,28 +290,10 @@ private Entry(StreamInput in) throws IOException { repositoryStateId = in.readLong(); failure = in.readOptionalString(); userMetadata = in.readMap(); - if (in.getVersion().onOrAfter(VERSION_IN_SNAPSHOT_VERSION)) { - version = Version.readVersion(in); - } else if (in.getVersion().onOrAfter(SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION)) { - // If an older cluster-manager informs us that shard generations are supported - // we use the minimum shard generation compatible version. - // If shard generations are not supported yet we use a placeholder for a version that does not use shard generations. - version = in.readBoolean() ? SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION : SnapshotsService.OLD_SNAPSHOT_FORMAT; - } else { - version = SnapshotsService.OLD_SNAPSHOT_FORMAT; - } - if (in.getVersion().onOrAfter(DATA_STREAMS_IN_SNAPSHOT)) { - dataStreams = in.readStringList(); - } else { - dataStreams = Collections.emptyList(); - } - if (in.getVersion().onOrAfter(SnapshotsService.CLONE_SNAPSHOT_VERSION)) { - source = in.readOptionalWriteable(SnapshotId::new); - clones = in.readImmutableMap(RepositoryShardId::new, ShardSnapshotStatus::readFrom); - } else { - source = null; - clones = ImmutableOpenMap.of(); - } + version = Version.readVersion(in); + dataStreams = in.readStringList(); + source = in.readOptionalWriteable(SnapshotId::new); + clones = in.readImmutableMap(RepositoryShardId::new, ShardSnapshotStatus::readFrom); } private static boolean assertShardsConsistent( @@ -732,18 +708,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(repositoryStateId); out.writeOptionalString(failure); out.writeMap(userMetadata); - if (out.getVersion().onOrAfter(VERSION_IN_SNAPSHOT_VERSION)) { - Version.writeVersion(version, out); - } else if (out.getVersion().onOrAfter(SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION)) { - out.writeBoolean(SnapshotsService.useShardGenerations(version)); - } - if (out.getVersion().onOrAfter(DATA_STREAMS_IN_SNAPSHOT)) { - out.writeStringCollection(dataStreams); - } - if (out.getVersion().onOrAfter(SnapshotsService.CLONE_SNAPSHOT_VERSION)) { - out.writeOptionalWriteable(source); - out.writeMap(clones); - } + Version.writeVersion(version, out); + out.writeStringCollection(dataStreams); + out.writeOptionalWriteable(source); + out.writeMap(clones); } @Override @@ -840,12 +808,7 @@ private boolean assertConsistent() { public static ShardSnapshotStatus readFrom(StreamInput in) throws IOException { String nodeId = in.readOptionalString(); final ShardState state = ShardState.fromValue(in.readByte()); - final String generation; - if (SnapshotsService.useShardGenerations(in.getVersion())) { - generation = in.readOptionalString(); - } else { - generation = null; - } + final String generation = in.readOptionalString(); final String reason = in.readOptionalString(); if (state == ShardState.QUEUED) { return UNASSIGNED_QUEUED; @@ -884,9 +847,7 @@ public boolean isActive() { public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(nodeId); out.writeByte(state.value); - if (SnapshotsService.useShardGenerations(out.getVersion())) { - out.writeOptionalString(generation); - } + out.writeOptionalString(generation); out.writeOptionalString(reason); } diff --git a/server/src/main/java/org/opensearch/repositories/IndexMetaDataGenerations.java b/server/src/main/java/org/opensearch/repositories/IndexMetaDataGenerations.java index afabf4ebfdb58..e8f96bb313dd1 100644 --- a/server/src/main/java/org/opensearch/repositories/IndexMetaDataGenerations.java +++ b/server/src/main/java/org/opensearch/repositories/IndexMetaDataGenerations.java @@ -92,10 +92,7 @@ public String getIndexMetaBlobId(String metaIdentifier) { } /** - * Get the blob id by {@link SnapshotId} and {@link IndexId} and fall back to the value of {@link SnapshotId#getUUID()} if none is - * known to enable backwards compatibility with versions older than - * {@link org.opensearch.snapshots.SnapshotsService#SHARD_GEN_IN_REPO_DATA_VERSION} which used the snapshot uuid as index metadata - * blob uuid. + * Get the blob id by {@link SnapshotId} and {@link IndexId}. * * @param snapshotId Snapshot Id * @param indexId Index Id @@ -103,11 +100,7 @@ public String getIndexMetaBlobId(String metaIdentifier) { */ public String indexMetaBlobId(SnapshotId snapshotId, IndexId indexId) { final String identifier = lookup.getOrDefault(snapshotId, Collections.emptyMap()).get(indexId); - if (identifier == null) { - return snapshotId.getUUID(); - } else { - return identifiers.get(identifier); - } + return identifiers.get(identifier); } /** diff --git a/server/src/main/java/org/opensearch/repositories/RepositoryData.java b/server/src/main/java/org/opensearch/repositories/RepositoryData.java index e8132801e4238..524a978bcf1d1 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoryData.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoryData.java @@ -42,7 +42,6 @@ import org.opensearch.common.xcontent.XContentParserUtils; import org.opensearch.snapshots.SnapshotId; import org.opensearch.snapshots.SnapshotState; -import org.opensearch.snapshots.SnapshotsService; import java.io.IOException; import java.util.ArrayList; @@ -238,7 +237,6 @@ public SnapshotState getSnapshotState(final SnapshotId snapshotId) { /** * Returns the {@link Version} for the given snapshot or {@code null} if unknown. */ - @Nullable public Version getVersion(SnapshotId snapshotId) { return snapshotVersions.get(snapshotId.getUUID()); } @@ -551,8 +549,6 @@ public XContentBuilder snapshotsToXContent(final XContentBuilder builder, final builder.startObject(); // write the snapshots list builder.startArray(SNAPSHOTS); - final boolean shouldWriteIndexGens = SnapshotsService.useIndexGenerations(repoMetaVersion); - final boolean shouldWriteShardGens = SnapshotsService.useShardGenerations(repoMetaVersion); for (final SnapshotId snapshot : getSnapshotIds()) { builder.startObject(); builder.field(NAME, snapshot.getName()); @@ -562,14 +558,12 @@ public XContentBuilder snapshotsToXContent(final XContentBuilder builder, final if (state != null) { builder.field(STATE, state.value()); } - if (shouldWriteIndexGens) { - builder.startObject(INDEX_METADATA_LOOKUP); - for (Map.Entry entry : indexMetaDataGenerations.lookup.getOrDefault(snapshot, Collections.emptyMap()) - .entrySet()) { - builder.field(entry.getKey().getId(), entry.getValue()); - } - builder.endObject(); + builder.startObject(INDEX_METADATA_LOOKUP); + for (Map.Entry entry : indexMetaDataGenerations.lookup.getOrDefault(snapshot, Collections.emptyMap()) + .entrySet()) { + builder.field(entry.getKey().getId(), entry.getValue()); } + builder.endObject(); final Version version = snapshotVersions.get(snapshotUUID); if (version != null) { builder.field(VERSION, version.toString()); @@ -589,23 +583,15 @@ public XContentBuilder snapshotsToXContent(final XContentBuilder builder, final builder.value(snapshotId.getUUID()); } builder.endArray(); - if (shouldWriteShardGens) { - builder.startArray(SHARD_GENERATIONS); - for (String gen : shardGenerations.getGens(indexId)) { - builder.value(gen); - } - builder.endArray(); + builder.startArray(SHARD_GENERATIONS); + for (String gen : shardGenerations.getGens(indexId)) { + builder.value(gen); } + builder.endArray(); builder.endObject(); } builder.endObject(); - if (shouldWriteIndexGens) { - builder.field(MIN_VERSION, SnapshotsService.INDEX_GEN_IN_REPO_DATA_VERSION.toString()); - builder.field(INDEX_METADATA_IDENTIFIERS, indexMetaDataGenerations.identifiers); - } else if (shouldWriteShardGens) { - // Add min version field to make it impossible for older OpenSearch versions to deserialize this object - builder.field(MIN_VERSION, SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION.toString()); - } + builder.field(INDEX_METADATA_IDENTIFIERS, indexMetaDataGenerations.identifiers); builder.endObject(); return builder; } @@ -616,12 +602,8 @@ public IndexMetaDataGenerations indexMetaDataGenerations() { /** * Reads an instance of {@link RepositoryData} from x-content, loading the snapshots and indices metadata. - * - * @param fixBrokenShardGens set to {@code true} to filter out broken shard generations read from the {@code parser} via - * {@link ShardGenerations#fixShardGeneration}. Used to disable fixing broken generations when reading - * from cached bytes that we trust to not contain broken generations. */ - public static RepositoryData snapshotsFromXContent(XContentParser parser, long genId, boolean fixBrokenShardGens) throws IOException { + public static RepositoryData snapshotsFromXContent(XContentParser parser, long genId) throws IOException { XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); final Map snapshots = new HashMap<>(); @@ -639,16 +621,16 @@ public static RepositoryData snapshotsFromXContent(XContentParser parser, long g parseSnapshots(parser, snapshots, snapshotStates, snapshotVersions, indexMetaLookup); break; case INDICES: - parseIndices(parser, fixBrokenShardGens, snapshots, indexSnapshots, indexLookup, shardGenerations); + parseIndices(parser, snapshots, indexSnapshots, indexLookup, shardGenerations); break; case INDEX_METADATA_IDENTIFIERS: XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); indexMetaIdentifiers = parser.mapStrings(); break; case MIN_VERSION: - XContentParserUtils.ensureExpectedToken(XContentParser.Token.VALUE_STRING, parser.nextToken(), parser); - final Version version = Version.fromString(parser.text()); - assert SnapshotsService.useShardGenerations(version); + // ignore min_version + // todo: remove in next version + parser.nextToken(); break; default: XContentParserUtils.throwUnknownField(field, parser.getTokenLocation()); @@ -763,7 +745,6 @@ private static void parseSnapshots( * {@code shardGenerations}. * * @param parser x-content parser - * @param fixBrokenShardGens whether or not to fix broken shard generation (see {@link #snapshotsFromXContent} for details) * @param snapshots map of snapshot uuid to {@link SnapshotId} that was populated by {@link #parseSnapshots} * @param indexSnapshots map of {@link IndexId} to list of {@link SnapshotId} that contain the given index * @param indexLookup map of index uuid (as returned by {@link IndexId#getId}) to {@link IndexId} @@ -771,7 +752,6 @@ private static void parseSnapshots( */ private static void parseIndices( XContentParser parser, - boolean fixBrokenShardGens, Map snapshots, Map> indexSnapshots, Map indexLookup, @@ -835,9 +815,6 @@ private static void parseIndices( indexLookup.put(indexId.getId(), indexId); for (int i = 0; i < gens.size(); i++) { String parsedGen = gens.get(i); - if (fixBrokenShardGens) { - parsedGen = ShardGenerations.fixShardGeneration(parsedGen); - } if (parsedGen != null) { shardGenerations.put(indexId, i, parsedGen); } diff --git a/server/src/main/java/org/opensearch/repositories/ShardGenerations.java b/server/src/main/java/org/opensearch/repositories/ShardGenerations.java index 27eac4dfdd8c1..d918eb7e1e476 100644 --- a/server/src/main/java/org/opensearch/repositories/ShardGenerations.java +++ b/server/src/main/java/org/opensearch/repositories/ShardGenerations.java @@ -33,7 +33,6 @@ package org.opensearch.repositories; import org.opensearch.common.Nullable; -import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import java.util.Arrays; import java.util.Collection; @@ -44,7 +43,6 @@ import java.util.Map; import java.util.Objects; import java.util.Set; -import java.util.regex.Pattern; import java.util.stream.Collectors; /** @@ -74,24 +72,6 @@ private ShardGenerations(Map> shardGenerations) { this.shardGenerations = shardGenerations; } - private static final Pattern IS_NUMBER = Pattern.compile("^\\d+$"); - - /** - * Filters out unreliable numeric shard generations read from {@link RepositoryData} or {@link IndexShardSnapshotStatus}, returning - * {@code null} in their place. - * @see Issue #57988 - * - * @param shardGeneration shard generation to fix - * @return given shard generation or {@code null} if it was filtered out or {@code null} was passed - */ - @Nullable - public static String fixShardGeneration(@Nullable String shardGeneration) { - if (shardGeneration == null) { - return null; - } - return IS_NUMBER.matcher(shardGeneration).matches() ? null : shardGeneration; - } - /** * Returns the total number of shards tracked by this instance. */ @@ -145,8 +125,7 @@ public Map> obsoleteShardGenerations(ShardGenerati *
      *
    • {@link #DELETED_SHARD_GEN} a deleted shard that isn't referenced by any snapshot in the repository any longer
    • *
    • {@link #NEW_SHARD_GEN} a new shard that we know doesn't hold any valid data yet in the repository
    • - *
    • {@code null} unknown state. The shard either does not exist at all or it was created by a node older than - * {@link org.opensearch.snapshots.SnapshotsService#SHARD_GEN_IN_REPO_DATA_VERSION}. If a caller expects a shard to exist in the + *
    • {@code null} unknown state. The shard does not exist at all. If a caller expects a shard to exist in the * repository but sees a {@code null} return, it should try to recover the generation by falling back to listing the contents * of the respective shard directory.
    • *
    diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index bf06191bdc8d3..1000fe4078b48 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -818,66 +818,46 @@ private void doDeleteShardSnapshots( Version repoMetaVersion, ActionListener listener ) { - - if (SnapshotsService.useShardGenerations(repoMetaVersion)) { - // First write the new shard state metadata (with the removed snapshot) and compute deletion targets - final StepListener> writeShardMetaDataAndComputeDeletesStep = new StepListener<>(); - writeUpdatedShardMetaDataAndComputeDeletes(snapshotIds, repositoryData, true, writeShardMetaDataAndComputeDeletesStep); - // Once we have put the new shard-level metadata into place, we can update the repository metadata as follows: - // 1. Remove the snapshots from the list of existing snapshots - // 2. Update the index shard generations of all updated shard folders - // - // Note: If we fail updating any of the individual shard paths, none of them are changed since the newly created - // index-${gen_uuid} will not be referenced by the existing RepositoryData and new RepositoryData is only - // written if all shard paths have been successfully updated. - final StepListener writeUpdatedRepoDataStep = new StepListener<>(); - writeShardMetaDataAndComputeDeletesStep.whenComplete(deleteResults -> { - final ShardGenerations.Builder builder = ShardGenerations.builder(); - for (ShardSnapshotMetaDeleteResult newGen : deleteResults) { - builder.put(newGen.indexId, newGen.shardId, newGen.newGeneration); - } - final RepositoryData updatedRepoData = repositoryData.removeSnapshots(snapshotIds, builder.build()); - writeIndexGen( - updatedRepoData, - repositoryStateId, - repoMetaVersion, - Function.identity(), - ActionListener.wrap(writeUpdatedRepoDataStep::onResponse, listener::onFailure) - ); - }, listener::onFailure); - // Once we have updated the repository, run the clean-ups - writeUpdatedRepoDataStep.whenComplete(updatedRepoData -> { - // Run unreferenced blobs cleanup in parallel to shard-level snapshot deletion - final ActionListener afterCleanupsListener = new GroupedActionListener<>( - ActionListener.wrap(() -> listener.onResponse(updatedRepoData)), - 2 - ); - cleanupUnlinkedRootAndIndicesBlobs(snapshotIds, foundIndices, rootBlobs, updatedRepoData, afterCleanupsListener); - asyncCleanupUnlinkedShardLevelBlobs( - repositoryData, - snapshotIds, - writeShardMetaDataAndComputeDeletesStep.result(), - afterCleanupsListener - ); - }, listener::onFailure); - } else { - // Write the new repository data first (with the removed snapshot), using no shard generations - final RepositoryData updatedRepoData = repositoryData.removeSnapshots(snapshotIds, ShardGenerations.EMPTY); - writeIndexGen(updatedRepoData, repositoryStateId, repoMetaVersion, Function.identity(), ActionListener.wrap(newRepoData -> { - // Run unreferenced blobs cleanup in parallel to shard-level snapshot deletion - final ActionListener afterCleanupsListener = new GroupedActionListener<>( - ActionListener.wrap(() -> listener.onResponse(newRepoData)), - 2 - ); - cleanupUnlinkedRootAndIndicesBlobs(snapshotIds, foundIndices, rootBlobs, newRepoData, afterCleanupsListener); - final StepListener> writeMetaAndComputeDeletesStep = new StepListener<>(); - writeUpdatedShardMetaDataAndComputeDeletes(snapshotIds, repositoryData, false, writeMetaAndComputeDeletesStep); - writeMetaAndComputeDeletesStep.whenComplete( - deleteResults -> asyncCleanupUnlinkedShardLevelBlobs(repositoryData, snapshotIds, deleteResults, afterCleanupsListener), - afterCleanupsListener::onFailure - ); - }, listener::onFailure)); - } + // First write the new shard state metadata (with the removed snapshot) and compute deletion targets + final StepListener> writeShardMetaDataAndComputeDeletesStep = new StepListener<>(); + writeUpdatedShardMetaDataAndComputeDeletes(snapshotIds, repositoryData, true, writeShardMetaDataAndComputeDeletesStep); + // Once we have put the new shard-level metadata into place, we can update the repository metadata as follows: + // 1. Remove the snapshots from the list of existing snapshots + // 2. Update the index shard generations of all updated shard folders + // + // Note: If we fail updating any of the individual shard paths, none of them are changed since the newly created + // index-${gen_uuid} will not be referenced by the existing RepositoryData and new RepositoryData is only + // written if all shard paths have been successfully updated. + final StepListener writeUpdatedRepoDataStep = new StepListener<>(); + writeShardMetaDataAndComputeDeletesStep.whenComplete(deleteResults -> { + final ShardGenerations.Builder builder = ShardGenerations.builder(); + for (ShardSnapshotMetaDeleteResult newGen : deleteResults) { + builder.put(newGen.indexId, newGen.shardId, newGen.newGeneration); + } + final RepositoryData updatedRepoData = repositoryData.removeSnapshots(snapshotIds, builder.build()); + writeIndexGen( + updatedRepoData, + repositoryStateId, + repoMetaVersion, + Function.identity(), + ActionListener.wrap(writeUpdatedRepoDataStep::onResponse, listener::onFailure) + ); + }, listener::onFailure); + // Once we have updated the repository, run the clean-ups + writeUpdatedRepoDataStep.whenComplete(updatedRepoData -> { + // Run unreferenced blobs cleanup in parallel to shard-level snapshot deletion + final ActionListener afterCleanupsListener = new GroupedActionListener<>( + ActionListener.wrap(() -> listener.onResponse(updatedRepoData)), + 2 + ); + cleanupUnlinkedRootAndIndicesBlobs(snapshotIds, foundIndices, rootBlobs, updatedRepoData, afterCleanupsListener); + asyncCleanupUnlinkedShardLevelBlobs( + repositoryData, + snapshotIds, + writeShardMetaDataAndComputeDeletesStep.result(), + afterCleanupsListener + ); + }, listener::onFailure); } private void cleanupUnlinkedRootAndIndicesBlobs( @@ -1365,31 +1345,19 @@ public void finalizeSnapshot( final Collection indices = shardGenerations.indices(); final SnapshotId snapshotId = snapshotInfo.snapshotId(); // Once we are done writing the updated index-N blob we remove the now unreferenced index-${uuid} blobs in each shard - // directory if all nodes are at least at version SnapshotsService#SHARD_GEN_IN_REPO_DATA_VERSION - // If there are older version nodes in the cluster, we don't need to run this cleanup as it will have already happened - // when writing the index-${N} to each shard directory. - final boolean writeShardGens = SnapshotsService.useShardGenerations(repositoryMetaVersion); + // directory final Consumer onUpdateFailure = e -> listener.onFailure( new SnapshotException(metadata.name(), snapshotId, "failed to update snapshot in repository", e) ); final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT); - final boolean writeIndexGens = SnapshotsService.useIndexGenerations(repositoryMetaVersion); - final StepListener repoDataListener = new StepListener<>(); getRepositoryData(repoDataListener); repoDataListener.whenComplete(existingRepositoryData -> { - final Map indexMetas; - final Map indexMetaIdentifiers; - if (writeIndexGens) { - indexMetaIdentifiers = ConcurrentCollections.newConcurrentMap(); - indexMetas = ConcurrentCollections.newConcurrentMap(); - } else { - indexMetas = null; - indexMetaIdentifiers = null; - } + final Map indexMetas = ConcurrentCollections.newConcurrentMap(); + final Map indexMetaIdentifiers = ConcurrentCollections.newConcurrentMap(); final ActionListener allMetaListener = new GroupedActionListener<>(ActionListener.wrap(v -> { final RepositoryData updatedRepositoryData = existingRepositoryData.addSnapshot( @@ -1406,9 +1374,7 @@ public void finalizeSnapshot( repositoryMetaVersion, stateTransformer, ActionListener.wrap(newRepoData -> { - if (writeShardGens) { - cleanupOldShardGens(existingRepositoryData, updatedRepositoryData); - } + cleanupOldShardGens(existingRepositoryData, updatedRepositoryData); listener.onResponse(newRepoData); }, onUpdateFailure) ); @@ -1432,24 +1398,15 @@ public void finalizeSnapshot( for (IndexId index : indices) { executor.execute(ActionRunnable.run(allMetaListener, () -> { final IndexMetadata indexMetaData = clusterMetadata.index(index.getName()); - if (writeIndexGens) { - final String identifiers = IndexMetaDataGenerations.buildUniqueIdentifier(indexMetaData); - String metaUUID = existingRepositoryData.indexMetaDataGenerations().getIndexMetaBlobId(identifiers); - if (metaUUID == null) { - // We don't yet have this version of the metadata so we write it - metaUUID = UUIDs.base64UUID(); - INDEX_METADATA_FORMAT.write(indexMetaData, indexContainer(index), metaUUID, compress); - indexMetaIdentifiers.put(identifiers, metaUUID); - } - indexMetas.put(index, identifiers); - } else { - INDEX_METADATA_FORMAT.write( - clusterMetadata.index(index.getName()), - indexContainer(index), - snapshotId.getUUID(), - compress - ); + final String identifiers = IndexMetaDataGenerations.buildUniqueIdentifier(indexMetaData); + String metaUUID = existingRepositoryData.indexMetaDataGenerations().getIndexMetaBlobId(identifiers); + if (metaUUID == null) { + // We don't yet have this version of the metadata so we write it + metaUUID = UUIDs.base64UUID(); + INDEX_METADATA_FORMAT.write(indexMetaData, indexContainer(index), metaUUID, compress); + indexMetaIdentifiers.put(identifiers, metaUUID); } + indexMetas.put(index, identifiers); })); } executor.execute( @@ -1774,8 +1731,7 @@ private RepositoryData repositoryDataFromCachedEntry(Tuple try (InputStream input = CompressorFactory.COMPRESSOR.threadLocalInputStream(cacheEntry.v2().streamInput())) { return RepositoryData.snapshotsFromXContent( XContentType.JSON.xContent().createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, input), - cacheEntry.v1(), - false + cacheEntry.v1() ); } } @@ -1870,7 +1826,7 @@ private RepositoryData getRepositoryData(long indexGen) { XContentParser parser = XContentType.JSON.xContent() .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, blob) ) { - return RepositoryData.snapshotsFromXContent(parser, indexGen, true); + return RepositoryData.snapshotsFromXContent(parser, indexGen); } } catch (IOException ioe) { if (bestEffortConsistency) { @@ -2454,7 +2410,6 @@ public void snapshotShard( ); final String indexGeneration; - final boolean writeShardGens = SnapshotsService.useShardGenerations(repositoryMetaVersion); // build a new BlobStoreIndexShardSnapshot, that includes this one and all the saved ones List newSnapshotsList = new ArrayList<>(); newSnapshotsList.add(new SnapshotFiles(snapshotId.getName(), indexCommitPointFiles, shardStateIdentifier)); @@ -2463,71 +2418,24 @@ public void snapshotShard( } final BlobStoreIndexShardSnapshots updatedBlobStoreIndexShardSnapshots = new BlobStoreIndexShardSnapshots(newSnapshotsList); final Runnable afterWriteSnapBlob; - if (writeShardGens) { - // When using shard generations we can safely write the index-${uuid} blob before writing out any of the actual data - // for this shard since the uuid named blob will simply not be referenced in case of error and thus we will never - // reference a generation that has not had all its files fully upload. - indexGeneration = UUIDs.randomBase64UUID(); - try { - INDEX_SHARD_SNAPSHOTS_FORMAT.write(updatedBlobStoreIndexShardSnapshots, shardContainer, indexGeneration, compress); - } catch (IOException e) { - throw new IndexShardSnapshotFailedException( - shardId, - "Failed to write shard level snapshot metadata for [" - + snapshotId - + "] to [" - + INDEX_SHARD_SNAPSHOTS_FORMAT.blobName(indexGeneration) - + "]", - e - ); - } - afterWriteSnapBlob = () -> {}; - } else { - // When not using shard generations we can only write the index-${N} blob after all other work for this shard has - // completed. - // Also, in case of numeric shard generations the data node has to take care of deleting old shard generations. - final long newGen = Long.parseLong(fileListGeneration) + 1; - indexGeneration = Long.toString(newGen); - // Delete all previous index-N blobs - final List blobsToDelete = blobs.stream() - .filter(blob -> blob.startsWith(SNAPSHOT_INDEX_PREFIX)) - .collect(Collectors.toList()); - assert blobsToDelete.stream() - .mapToLong(b -> Long.parseLong(b.replaceFirst(SNAPSHOT_INDEX_PREFIX, ""))) - .max() - .orElse(-1L) < Long.parseLong(indexGeneration) : "Tried to delete an index-N blob newer than the current generation [" - + indexGeneration - + "] when deleting index-N blobs " - + blobsToDelete; - afterWriteSnapBlob = () -> { - try { - writeShardIndexBlobAtomic(shardContainer, newGen, updatedBlobStoreIndexShardSnapshots); - } catch (IOException e) { - throw new IndexShardSnapshotFailedException( - shardId, - "Failed to finalize snapshot creation [" - + snapshotId - + "] with shard index [" - + INDEX_SHARD_SNAPSHOTS_FORMAT.blobName(indexGeneration) - + "]", - e - ); - } - try { - deleteFromContainer(shardContainer, blobsToDelete); - } catch (IOException e) { - logger.warn( - () -> new ParameterizedMessage( - "[{}][{}] failed to delete old index-N blobs during finalization", - snapshotId, - shardId - ), - e - ); - } - }; + // When using shard generations we can safely write the index-${uuid} blob before writing out any of the actual data + // for this shard since the uuid named blob will simply not be referenced in case of error and thus we will never + // reference a generation that has not had all its files fully upload. + indexGeneration = UUIDs.randomBase64UUID(); + try { + INDEX_SHARD_SNAPSHOTS_FORMAT.write(updatedBlobStoreIndexShardSnapshots, shardContainer, indexGeneration, compress); + } catch (IOException e) { + throw new IndexShardSnapshotFailedException( + shardId, + "Failed to write shard level snapshot metadata for [" + + snapshotId + + "] to [" + + INDEX_SHARD_SNAPSHOTS_FORMAT.blobName(indexGeneration) + + "]", + e + ); } - + afterWriteSnapBlob = () -> {}; final StepListener> allFilesUploadedListener = new StepListener<>(); allFilesUploadedListener.whenComplete(v -> { final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.moveToFinalize(snapshotIndexCommit.getGeneration()); @@ -2970,9 +2878,7 @@ public BlobStoreIndexShardSnapshot loadShardSnapshot(BlobContainer shardContaine * the given list of blobs in the shard container. * * @param blobs list of blobs in repository - * @param generation shard generation or {@code null} in case there was no shard generation tracked in the {@link RepositoryData} for - * this shard because its snapshot was created in a version older than - * {@link SnapshotsService#SHARD_GEN_IN_REPO_DATA_VERSION}. + * @param generation shard generation * @return tuple of BlobStoreIndexShardSnapshots and the last snapshot index generation */ private Tuple buildBlobStoreIndexShardSnapshots( diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java b/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java index 38d9df0e960e0..94ab875091caf 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java @@ -31,7 +31,6 @@ package org.opensearch.snapshots; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.ShardOperationFailedException; import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; @@ -69,8 +68,6 @@ */ public final class SnapshotInfo implements Comparable, ToXContent, Writeable { - public static final Version DATA_STREAMS_IN_SNAPSHOT = LegacyESVersion.V_7_9_0; - public static final String CONTEXT_MODE_PARAM = "context_mode"; public static final String CONTEXT_MODE_SNAPSHOT = "SNAPSHOT"; private static final DateFormatter DATE_TIME_FORMATTER = DateFormatter.forPattern("strict_date_optional_time"); @@ -401,11 +398,7 @@ public SnapshotInfo(final StreamInput in) throws IOException { version = in.readBoolean() ? Version.readVersion(in) : null; includeGlobalState = in.readOptionalBoolean(); userMetadata = in.readMap(); - if (in.getVersion().onOrAfter(DATA_STREAMS_IN_SNAPSHOT)) { - dataStreams = in.readStringList(); - } else { - dataStreams = Collections.emptyList(); - } + dataStreams = in.readStringList(); } /** @@ -836,9 +829,7 @@ public void writeTo(final StreamOutput out) throws IOException { } out.writeOptionalBoolean(includeGlobalState); out.writeMap(userMetadata); - if (out.getVersion().onOrAfter(DATA_STREAMS_IN_SNAPSHOT)) { - out.writeStringCollection(dataStreams); - } + out.writeStringCollection(dataStreams); } private static SnapshotState snapshotState(final String reason, final List shardFailures) { diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java index 22a85558e3b1d..86dbc31245b11 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java @@ -67,7 +67,6 @@ import org.opensearch.repositories.IndexId; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; -import org.opensearch.repositories.ShardGenerations; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequestDeduplicator; @@ -276,11 +275,6 @@ private void startNewShards(SnapshotsInProgress.Entry entry, Map() { @Override public void onResponse(String newGeneration) { diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java index e53c2889f88e6..0123c839e4ad9 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java @@ -38,7 +38,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.ExceptionsHelper; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; @@ -141,18 +140,6 @@ */ public class SnapshotsService extends AbstractLifecycleComponent implements ClusterStateApplier { - public static final Version FULL_CONCURRENCY_VERSION = LegacyESVersion.V_7_9_0; - - public static final Version CLONE_SNAPSHOT_VERSION = LegacyESVersion.V_7_10_0; - - public static final Version SHARD_GEN_IN_REPO_DATA_VERSION = LegacyESVersion.V_7_6_0; - - public static final Version INDEX_GEN_IN_REPO_DATA_VERSION = LegacyESVersion.V_7_9_0; - - public static final Version OLD_SNAPSHOT_FORMAT = LegacyESVersion.fromId(7050099); - - public static final Version MULTI_DELETE_VERSION = LegacyESVersion.V_7_8_0; - private static final Logger logger = LogManager.getLogger(SnapshotsService.class); public static final String UPDATE_SNAPSHOT_STATUS_ACTION_NAME = "internal:cluster/snapshot/update_snapshot_status"; @@ -168,9 +155,6 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus private final Map>>> snapshotCompletionListeners = new ConcurrentHashMap<>(); - // Set of snapshots that are currently being initialized by this node - private final Set initializingSnapshots = Collections.synchronizedSet(new HashSet<>()); - /** * Listeners for snapshot deletion keyed by delete uuid as returned from {@link SnapshotDeletionsInProgress.Entry#uuid()} */ @@ -285,18 +269,10 @@ public ClusterState execute(ClusterState currentState) { final List runningSnapshots = snapshots.entries(); ensureSnapshotNameNotRunning(runningSnapshots, repositoryName, snapshotName); validate(repositoryName, snapshotName, currentState); - final boolean concurrentOperationsAllowed = currentState.nodes().getMinNodeVersion().onOrAfter(FULL_CONCURRENCY_VERSION); final SnapshotDeletionsInProgress deletionsInProgress = currentState.custom( SnapshotDeletionsInProgress.TYPE, SnapshotDeletionsInProgress.EMPTY ); - if (deletionsInProgress.hasDeletionsInProgress() && concurrentOperationsAllowed == false) { - throw new ConcurrentSnapshotExecutionException( - repositoryName, - snapshotName, - "cannot snapshot while a snapshot deletion is in-progress in [" + deletionsInProgress + "]" - ); - } final RepositoryCleanupInProgress repositoryCleanupInProgress = currentState.custom( RepositoryCleanupInProgress.TYPE, RepositoryCleanupInProgress.EMPTY @@ -308,13 +284,6 @@ public ClusterState execute(ClusterState currentState) { "cannot snapshot while a repository cleanup is in-progress in [" + repositoryCleanupInProgress + "]" ); } - // Fail if there are any concurrently running snapshots. The only exception to this being a snapshot in INIT state from a - // previous cluster-manager that we can simply ignore and remove from the cluster state because we would clean it up from - // the - // cluster state anyway in #applyClusterState. - if (concurrentOperationsAllowed == false && runningSnapshots.stream().anyMatch(entry -> entry.state() != State.INIT)) { - throw new ConcurrentSnapshotExecutionException(repositoryName, snapshotName, " a snapshot is already running"); - } ensureNoCleanupInProgress(currentState, repositoryName, snapshotName); ensureBelowConcurrencyLimit(repositoryName, snapshotName, snapshots, deletionsInProgress); // Store newSnapshot here to be processed in clusterStateProcessed @@ -339,7 +308,6 @@ public ClusterState execute(ClusterState currentState) { currentState.metadata(), currentState.routingTable(), indexIds, - useShardGenerations(version), repositoryData, repositoryName ); @@ -1817,16 +1785,6 @@ public void deleteSnapshots(final DeleteSnapshotRequest request, final ActionLis @Override public ClusterState execute(ClusterState currentState) throws Exception { - final Version minNodeVersion = currentState.nodes().getMinNodeVersion(); - if (snapshotNames.length > 1 && minNodeVersion.before(MULTI_DELETE_VERSION)) { - throw new IllegalArgumentException( - "Deleting multiple snapshots in a single request is only supported in version [ " - + MULTI_DELETE_VERSION - + "] but cluster contained node of version [" - + currentState.nodes().getMinNodeVersion() - + "]" - ); - } final SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY); final List snapshotEntries = findInProgressSnapshots(snapshots, snapshotNames, repoName); final List snapshotIds = matchingSnapshotIds( @@ -1835,76 +1793,8 @@ public ClusterState execute(ClusterState currentState) throws Exception { snapshotNames, repoName ); - if (snapshotEntries.isEmpty() || minNodeVersion.onOrAfter(SnapshotsService.FULL_CONCURRENCY_VERSION)) { - deleteFromRepoTask = createDeleteStateUpdate(snapshotIds, repoName, repositoryData, Priority.NORMAL, listener); - return deleteFromRepoTask.execute(currentState); - } - assert snapshotEntries.size() == 1 : "Expected just a single running snapshot but saw " + snapshotEntries; - final SnapshotsInProgress.Entry snapshotEntry = snapshotEntries.get(0); - runningSnapshot = snapshotEntry.snapshot(); - final ImmutableOpenMap shards; - - final State state = snapshotEntry.state(); - final String failure; - - outstandingDeletes = new ArrayList<>(snapshotIds); - if (state != State.INIT) { - // INIT state snapshots won't ever be physically written to the repository but all other states will end up in the repo - outstandingDeletes.add(runningSnapshot.getSnapshotId()); - } - if (state == State.INIT) { - // snapshot is still initializing, mark it as aborted - shards = snapshotEntry.shards(); - assert shards.isEmpty(); - failure = "Snapshot was aborted during initialization"; - abortedDuringInit = true; - } else if (state == State.STARTED) { - // snapshot is started - mark every non completed shard as aborted - final SnapshotsInProgress.Entry abortedEntry = snapshotEntry.abort(); - shards = abortedEntry.shards(); - failure = abortedEntry.failure(); - } else { - boolean hasUncompletedShards = false; - // Cleanup in case a node gone missing and snapshot wasn't updated for some reason - for (ObjectCursor shardStatus : snapshotEntry.shards().values()) { - // Check if we still have shard running on existing nodes - if (shardStatus.value.state().completed() == false - && shardStatus.value.nodeId() != null - && currentState.nodes().get(shardStatus.value.nodeId()) != null) { - hasUncompletedShards = true; - break; - } - } - if (hasUncompletedShards) { - // snapshot is being finalized - wait for shards to complete finalization process - logger.debug("trying to delete completed snapshot - should wait for shards to finalize on all nodes"); - return currentState; - } else { - // no shards to wait for but a node is gone - this is the only case - // where we force to finish the snapshot - logger.debug("trying to delete completed snapshot with no finalizing shards - can delete immediately"); - shards = snapshotEntry.shards(); - } - failure = snapshotEntry.failure(); - } - return ClusterState.builder(currentState) - .putCustom( - SnapshotsInProgress.TYPE, - SnapshotsInProgress.of( - snapshots.entries() - .stream() - // remove init state snapshot we found from a previous cluster-manager if there was one - .filter(existing -> abortedDuringInit == false || existing.equals(snapshotEntry) == false) - .map(existing -> { - if (existing.equals(snapshotEntry)) { - return snapshotEntry.fail(shards, State.ABORTED, failure); - } - return existing; - }) - .collect(Collectors.toList()) - ) - ) - .build(); + deleteFromRepoTask = createDeleteStateUpdate(snapshotIds, repoName, repositoryData, Priority.NORMAL, listener); + return deleteFromRepoTask.execute(currentState); } @Override @@ -2053,14 +1943,6 @@ public ClusterState execute(ClusterState currentState) { SnapshotDeletionsInProgress.EMPTY ); final Version minNodeVersion = currentState.nodes().getMinNodeVersion(); - if (minNodeVersion.before(FULL_CONCURRENCY_VERSION)) { - if (deletionsInProgress.hasDeletionsInProgress()) { - throw new ConcurrentSnapshotExecutionException( - new Snapshot(repoName, snapshotIds.get(0)), - "cannot delete - another snapshot is currently being deleted in [" + deletionsInProgress + "]" - ); - } - } final RepositoryCleanupInProgress repositoryCleanupInProgress = currentState.custom( RepositoryCleanupInProgress.TYPE, RepositoryCleanupInProgress.EMPTY @@ -2100,44 +1982,32 @@ public ClusterState execute(ClusterState currentState) { } // Snapshot ids that will have to be physically deleted from the repository final Set snapshotIdsRequiringCleanup = new HashSet<>(snapshotIds); - final SnapshotsInProgress updatedSnapshots; - if (minNodeVersion.onOrAfter(FULL_CONCURRENCY_VERSION)) { - updatedSnapshots = SnapshotsInProgress.of(snapshots.entries().stream().map(existing -> { - if (existing.state() == State.STARTED - && snapshotIdsRequiringCleanup.contains(existing.snapshot().getSnapshotId())) { - // snapshot is started - mark every non completed shard as aborted - final SnapshotsInProgress.Entry abortedEntry = existing.abort(); - if (abortedEntry == null) { - // No work has been done for this snapshot yet so we remove it from the cluster state directly - final Snapshot existingNotYetStartedSnapshot = existing.snapshot(); - // Adding the snapshot to #endingSnapshots since we still have to resolve its listeners to not trip - // any leaked listener assertions - if (endingSnapshots.add(existingNotYetStartedSnapshot)) { - completedNoCleanup.add(existingNotYetStartedSnapshot); - } - snapshotIdsRequiringCleanup.remove(existingNotYetStartedSnapshot.getSnapshotId()); - } else if (abortedEntry.state().completed()) { - completedWithCleanup.add(abortedEntry); + final SnapshotsInProgress updatedSnapshots = SnapshotsInProgress.of(snapshots.entries().stream().map(existing -> { + if (existing.state() == State.STARTED && snapshotIdsRequiringCleanup.contains(existing.snapshot().getSnapshotId())) { + // snapshot is started - mark every non completed shard as aborted + final SnapshotsInProgress.Entry abortedEntry = existing.abort(); + if (abortedEntry == null) { + // No work has been done for this snapshot yet so we remove it from the cluster state directly + final Snapshot existingNotYetStartedSnapshot = existing.snapshot(); + // Adding the snapshot to #endingSnapshots since we still have to resolve its listeners to not trip + // any leaked listener assertions + if (endingSnapshots.add(existingNotYetStartedSnapshot)) { + completedNoCleanup.add(existingNotYetStartedSnapshot); } - return abortedEntry; + snapshotIdsRequiringCleanup.remove(existingNotYetStartedSnapshot.getSnapshotId()); + } else if (abortedEntry.state().completed()) { + completedWithCleanup.add(abortedEntry); } - return existing; - }).filter(Objects::nonNull).collect(Collectors.toList())); - if (snapshotIdsRequiringCleanup.isEmpty()) { - // We only saw snapshots that could be removed from the cluster state right away, no need to update the deletions - return updateWithSnapshots(currentState, updatedSnapshots, null); + return abortedEntry; } - } else { - if (snapshots.entries().isEmpty() == false) { - // However other snapshots are running - cannot continue - throw new ConcurrentSnapshotExecutionException( - repoName, - snapshotIds.toString(), - "another snapshot is currently running cannot delete" - ); - } - updatedSnapshots = snapshots; + return existing; + }).filter(Objects::nonNull).collect(Collectors.toList())); + + if (snapshotIdsRequiringCleanup.isEmpty()) { + // We only saw snapshots that could be removed from the cluster state right away, no need to update the deletions + return updateWithSnapshots(currentState, updatedSnapshots, null); } + // add the snapshot deletion to the cluster state final SnapshotDeletionsInProgress.Entry replacedEntry = deletionsInProgress.getEntries() .stream() @@ -2266,41 +2136,11 @@ public Version minCompatibleVersion(Version minNodeVersion, RepositoryData repos .filter(excluded == null ? sn -> true : sn -> excluded.contains(sn) == false) .collect(Collectors.toList())) { final Version known = repositoryData.getVersion(snapshotId); - // If we don't have the version cached in the repository data yet we load it from the snapshot info blobs - if (known == null) { - assert repositoryData.shardGenerations().totalShards() == 0 : "Saw shard generations [" - + repositoryData.shardGenerations() - + "] but did not have versions tracked for snapshot [" - + snapshotId - + "]"; - return OLD_SNAPSHOT_FORMAT; - } else { - minCompatVersion = minCompatVersion.before(known) ? minCompatVersion : known; - } + minCompatVersion = minCompatVersion.before(known) ? minCompatVersion : known; } return minCompatVersion; } - /** - * Checks whether the metadata version supports writing {@link ShardGenerations} to the repository. - * - * @param repositoryMetaVersion version to check - * @return true if version supports {@link ShardGenerations} - */ - public static boolean useShardGenerations(Version repositoryMetaVersion) { - return repositoryMetaVersion.onOrAfter(SHARD_GEN_IN_REPO_DATA_VERSION); - } - - /** - * Checks whether the metadata version supports writing {@link ShardGenerations} to the repository. - * - * @param repositoryMetaVersion version to check - * @return true if version supports {@link ShardGenerations} - */ - public static boolean useIndexGenerations(Version repositoryMetaVersion) { - return repositoryMetaVersion.onOrAfter(INDEX_GEN_IN_REPO_DATA_VERSION); - } - /** Deletes snapshot from repository * * @param deleteEntry delete entry in cluster state @@ -2578,7 +2418,6 @@ private SnapshotsInProgress updatedSnapshotsInProgress(ClusterState currentState currentState.metadata(), currentState.routingTable(), entry.indices(), - entry.version().onOrAfter(SHARD_GEN_IN_REPO_DATA_VERSION), repositoryData, repoName ); @@ -2677,7 +2516,6 @@ private static void completeListenersIgnoringException(@Nullable List shards( @@ -2686,7 +2524,6 @@ private static ImmutableOpenMap indices, - boolean useShardGenerations, RepositoryData repositoryData, String repoName ) { @@ -2712,18 +2549,15 @@ private static ImmutableOpenMap RepositoryData.snapshotsFromXContent(xParser, corruptedRepositoryData.getGenId(), randomBoolean()) + () -> RepositoryData.snapshotsFromXContent(xParser, corruptedRepositoryData.getGenId()) ); assertThat( e.getMessage(), @@ -327,7 +327,7 @@ public void testIndexThatReferenceANullSnapshot() throws IOException { try (XContentParser xParser = createParser(builder)) { OpenSearchParseException e = expectThrows( OpenSearchParseException.class, - () -> RepositoryData.snapshotsFromXContent(xParser, randomNonNegativeLong(), randomBoolean()) + () -> RepositoryData.snapshotsFromXContent(xParser, randomNonNegativeLong()) ); assertThat( e.getMessage(), diff --git a/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java b/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java index f9bc0d1066d8e..0702866af35e3 100644 --- a/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java +++ b/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java @@ -136,7 +136,7 @@ public static void assertConsistency(BlobStoreRepository repository, Executor ex XContentParser parser = XContentType.JSON.xContent() .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, blob) ) { - repositoryData = RepositoryData.snapshotsFromXContent(parser, latestGen, false); + repositoryData = RepositoryData.snapshotsFromXContent(parser, latestGen); } assertIndexUUIDs(repository, repositoryData); assertSnapshotUUIDs(repository, repositoryData); diff --git a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java index bbac1ef591418..e5eac9af13de1 100644 --- a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -419,8 +419,7 @@ protected String initWithSnapshotVersion(String repoName, Path repoPath, Version DeprecationHandler.THROW_UNSUPPORTED_OPERATION, Strings.toString(jsonBuilder).replace(Version.CURRENT.toString(), version.toString()) ), - repositoryData.getGenId(), - randomBoolean() + repositoryData.getGenId() ); Files.write( repoPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + repositoryData.getGenId()), @@ -512,7 +511,7 @@ protected void addBwCFailedSnapshot(String repoName, String snapshotName, Map