From ba770b50b6bd1f11888b00cec9fecfc5e55c879a Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 12 Jan 2018 14:44:53 -0500 Subject: [PATCH 01/31] Keep one commit whose max_seqno is no_ops_performed If a 6.x node with a 5.x index is promoted to be a primary, it will flush a new index commit to make sure translog operations without seqno will never be replayed (see IndexShard#updateShardState). However the global checkpoint is still UNASSIGNED and the max_seqno of both commits are NO_OPS_PERFORMED. If the combined deletion policy considers the first commit as a safe commit, we will send the first commit without replaying translog between these commits to the replica in a peer-recovery. This causes the replica missing those operations. To prevent this, we should not keep more than one commit whose max_seqno is NO_OPS_PERFORMED. Once we can retain a safe commit, a NO_OPS_PERFORMED commit will be deleted just as other commits. Relates #28038 --- .../index/engine/CombinedDeletionPolicy.java | 10 ++++ .../engine/CombinedDeletionPolicyTests.java | 49 +++++++++++++++++++ 2 files changed, 59 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java index ffb12f5b84381..018e299a6888b 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java +++ b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java @@ -126,6 +126,16 @@ private static int indexOfKeptCommits(List commits, long return Math.min(commits.size() - 1, i + 1); } final long maxSeqNoFromCommit = Long.parseLong(commitUserData.get(SequenceNumbers.MAX_SEQ_NO)); + // If a 6.x node with a 5.x index is promoted to be a primary, it will flush a new index commit to + // make sure translog operations without seqno will never be replayed (see IndexShard#updateShardState). + // However the global checkpoint is still UNASSIGNED and the max_seqno of both commits are NO_OPS_PERFORMED. + // If this policy considers the first commit as a safe commit, we will send the first commit without replaying + // translog between these commits to the replica in a peer-recovery. This causes the replica missing those operations. + // To prevent this, we should not keep more than one commit whose max_seqno is NO_OPS_PERFORMED. + // Once we can retain a safe commit, a NO_OPS_PERFORMED commit will be deleted just as other commits. + if (maxSeqNoFromCommit == SequenceNumbers.NO_OPS_PERFORMED) { + return i; + } if (maxSeqNoFromCommit <= globalCheckpoint) { return i; } diff --git a/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java b/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java index 43e3f8f64ea44..9a08f1d030f2f 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java @@ -42,6 +42,7 @@ import static org.elasticsearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; @@ -149,6 +150,54 @@ public void testLegacyIndex() throws Exception { assertThat(translogPolicy.getTranslogGenerationOfLastCommit(), equalTo(safeTranslogGen)); } + public void testKeepSingleNoOpsCommits() throws Exception { + final AtomicLong globalCheckpoint = new AtomicLong(randomLong()); + final UUID translogUUID = UUID.randomUUID(); + TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(OPEN_INDEX_AND_TRANSLOG, translogPolicy, globalCheckpoint::get); + + final List commitList = new ArrayList<>(); + final int numOfNoOpsCommits = between(1, 10); + long lastNoopTranslogGen = 0; + for (int i = 0; i < numOfNoOpsCommits; i++) { + lastNoopTranslogGen += between(1, 20); + commitList.add(mockIndexCommit(SequenceNumbers.NO_OPS_PERFORMED, translogUUID, lastNoopTranslogGen)); + } + // Keep only one no_ops commit. + indexPolicy.onCommit(commitList); + assertThat(translogPolicy.getMinTranslogGenerationForRecovery(), equalTo(lastNoopTranslogGen)); + assertThat(translogPolicy.getTranslogGenerationOfLastCommit(), equalTo(lastNoopTranslogGen)); + for (int i = 0; i < numOfNoOpsCommits - 1; i++) { + verify(commitList.get(i), times(1)).delete(); + } + verify(commitList.get(commitList.size() - 1), never()).delete(); + // Add a some good commits. + final int numOfGoodCommits = between(1, 5); + long maxSeqNo = 0; + long lastTranslogGen = lastNoopTranslogGen; + for (int i = 0; i < numOfGoodCommits; i++) { + maxSeqNo += between(1, 1000); + lastTranslogGen += between(1, 20); + commitList.add(mockIndexCommit(maxSeqNo, translogUUID, lastTranslogGen)); + } + // If the global checkpoint is still unassigned, we should still keep one NO_OPS_PERFORMED commit. + globalCheckpoint.set(SequenceNumbers.UNASSIGNED_SEQ_NO); + indexPolicy.onCommit(commitList); + assertThat(translogPolicy.getMinTranslogGenerationForRecovery(), equalTo(lastNoopTranslogGen)); + assertThat(translogPolicy.getTranslogGenerationOfLastCommit(), equalTo(lastTranslogGen)); + for (int i = 0; i < numOfNoOpsCommits - 1; i++) { + verify(commitList.get(i), times(2)).delete(); + } + verify(commitList.get(numOfNoOpsCommits - 1), never()).delete(); + // Delete no-ops commit if global checkpoint advanced enough. + final long lower = Long.parseLong(commitList.get(numOfNoOpsCommits).getUserData().get(SequenceNumbers.MAX_SEQ_NO)); + globalCheckpoint.set(randomLongBetween(lower, Long.MAX_VALUE)); + indexPolicy.onCommit(commitList); + assertThat(translogPolicy.getMinTranslogGenerationForRecovery(), greaterThan(lastNoopTranslogGen)); + assertThat(translogPolicy.getTranslogGenerationOfLastCommit(), equalTo(lastTranslogGen)); + verify(commitList.get(numOfNoOpsCommits - 1), times(1)).delete(); + } + public void testDeleteInvalidCommits() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(randomNonNegativeLong()); TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); From cdc6085467e871bf017d7a6c553cac2fd807f6bf Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 11 Jan 2018 10:39:12 -0500 Subject: [PATCH 02/31] Primary send safe commit in file-based recovery (#28038) Today a primary shard transfers the most recent commit point to a replica shard in a file-based recovery. However, the most recent commit may not be a "safe" commit; this causes a replica shard not having a safe commit point until it can retain a safe commit by itself. This commits collapses the snapshot deletion policy into the combined deletion policy and modifies the peer recovery source to send a safe commit. Relates #10708 --- .../index/engine/CombinedDeletionPolicy.java | 109 ++++++++++++++++-- .../elasticsearch/index/engine/Engine.java | 11 +- .../index/engine/InternalEngine.java | 19 ++- .../elasticsearch/index/shard/IndexShard.java | 7 +- .../index/shard/LocalShardSnapshot.java | 2 +- .../org/elasticsearch/index/store/Store.java | 4 +- .../recovery/RecoverySourceHandler.java | 2 +- .../snapshots/SnapshotShardsService.java | 2 +- .../engine/CombinedDeletionPolicyTests.java | 86 ++++++++++---- .../index/engine/InternalEngineTests.java | 37 +++++- .../index/shard/IndexShardTests.java | 35 ------ .../recovery/RecoverySourceHandlerTests.java | 2 +- .../indices/recovery/RecoveryTests.java | 30 +++++ .../index/shard/IndexShardTestCase.java | 2 +- 14 files changed, 256 insertions(+), 92 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java index 018e299a6888b..03fbdfa5ae8aa 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java +++ b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java @@ -19,14 +19,17 @@ package org.elasticsearch.index.engine; +import com.carrotsearch.hppc.ObjectIntHashMap; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexDeletionPolicy; +import org.apache.lucene.store.Directory; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogDeletionPolicy; import java.io.IOException; import java.nio.file.Path; +import java.util.Collection; import java.util.List; import java.util.Map; import java.util.function.LongSupplier; @@ -42,12 +45,16 @@ public final class CombinedDeletionPolicy extends IndexDeletionPolicy { private final TranslogDeletionPolicy translogDeletionPolicy; private final EngineConfig.OpenMode openMode; private final LongSupplier globalCheckpointSupplier; + private final ObjectIntHashMap snapshottedCommits; // Number of snapshots held against each commit point. + private IndexCommit safeCommit; // the most recent safe commit point - its max_seqno at most the persisted global checkpoint. + private IndexCommit lastCommit; // the most recent commit point CombinedDeletionPolicy(EngineConfig.OpenMode openMode, TranslogDeletionPolicy translogDeletionPolicy, LongSupplier globalCheckpointSupplier) { this.openMode = openMode; this.translogDeletionPolicy = translogDeletionPolicy; this.globalCheckpointSupplier = globalCheckpointSupplier; + this.snapshottedCommits = new ObjectIntHashMap<>(); } @Override @@ -70,18 +77,22 @@ public void onInit(List commits) throws IOException { } @Override - public void onCommit(List commits) throws IOException { + public synchronized void onCommit(List commits) throws IOException { final int keptPosition = indexOfKeptCommits(commits, globalCheckpointSupplier.getAsLong()); + lastCommit = commits.get(commits.size() - 1); + safeCommit = commits.get(keptPosition); for (int i = 0; i < keptPosition; i++) { - commits.get(i).delete(); + if (snapshottedCommits.containsKey(commits.get(i)) == false) { + commits.get(i).delete(); + } } - updateTranslogDeletionPolicy(commits.get(keptPosition), commits.get(commits.size() - 1)); + updateTranslogDeletionPolicy(); } - private void updateTranslogDeletionPolicy(final IndexCommit minRequiredCommit, final IndexCommit lastCommit) throws IOException { - assert minRequiredCommit.isDeleted() == false : "The minimum required commit must not be deleted"; - final long minRequiredGen = Long.parseLong(minRequiredCommit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY)); - + private void updateTranslogDeletionPolicy() throws IOException { + assert Thread.holdsLock(this); + assert safeCommit.isDeleted() == false : "The safe commit must not be deleted"; + final long minRequiredGen = Long.parseLong(safeCommit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY)); assert lastCommit.isDeleted() == false : "The last commit must not be deleted"; final long lastGen = Long.parseLong(lastCommit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY)); @@ -90,6 +101,34 @@ private void updateTranslogDeletionPolicy(final IndexCommit minRequiredCommit, f translogDeletionPolicy.setMinTranslogGenerationForRecovery(minRequiredGen); } + /** + * Captures the most recent commit point {@link #lastCommit} or the most recent safe commit point {@link #safeCommit}. + * Index files of the capturing commit point won't be released until the commit reference is closed. + * + * @param acquiringSafeCommit captures the most recent safe commit point if true; otherwise captures the most recent commit point. + */ + synchronized IndexCommit acquireIndexCommit(boolean acquiringSafeCommit) { + assert safeCommit != null : "Safe commit is not initialized yet"; + assert lastCommit != null : "Last commit is not initialized yet"; + final IndexCommit snapshotting = acquiringSafeCommit ? safeCommit : lastCommit; + snapshottedCommits.addTo(snapshotting, 1); // increase refCount + return new SnapshotIndexCommit(snapshotting); + } + + /** + * Releases an index commit that acquired by {@link #acquireIndexCommit(boolean)}. + */ + synchronized void releaseCommit(final IndexCommit snapshotCommit) { + final IndexCommit releasingCommit = ((SnapshotIndexCommit) snapshotCommit).delegate; + assert snapshottedCommits.containsKey(releasingCommit) : "Release non-snapshotted commit;" + + "snapshotted commits [" + snapshottedCommits + "], releasing commit [" + releasingCommit + "]"; + final int refCount = snapshottedCommits.addTo(releasingCommit, -1); // release refCount + assert refCount >= 0 : "Number of snapshots can not be negative [" + refCount + "]"; + if (refCount == 0) { + snapshottedCommits.remove(releasingCommit); + } + } + /** * Find a safe commit point from a list of existing commits based on the supplied global checkpoint. * The max sequence number of a safe commit point should be at most the global checkpoint. @@ -149,4 +188,60 @@ private static int indexOfKeptCommits(List commits, long */ return 0; } + + /** + * A wrapper of an index commit that prevents it from being deleted. + */ + private static class SnapshotIndexCommit extends IndexCommit { + private final IndexCommit delegate; + + SnapshotIndexCommit(IndexCommit delegate) { + this.delegate = delegate; + } + + @Override + public String getSegmentsFileName() { + return delegate.getSegmentsFileName(); + } + + @Override + public Collection getFileNames() throws IOException { + return delegate.getFileNames(); + } + + @Override + public Directory getDirectory() { + return delegate.getDirectory(); + } + + @Override + public void delete() { + throw new UnsupportedOperationException("A snapshot commit does not support deletion"); + } + + @Override + public boolean isDeleted() { + return delegate.isDeleted(); + } + + @Override + public int getSegmentCount() { + return delegate.getSegmentCount(); + } + + @Override + public long getGeneration() { + return delegate.getGeneration(); + } + + @Override + public Map getUserData() throws IOException { + return delegate.getUserData(); + } + + @Override + public String toString() { + return "SnapshotIndexCommit{" + delegate + "}"; + } + } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index 473dc2ba88ed1..998a8e7f17eaf 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -32,7 +32,6 @@ import org.apache.lucene.index.SegmentCommitInfo; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.index.SegmentReader; -import org.apache.lucene.index.SnapshotDeletionPolicy; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.ReferenceManager; @@ -92,7 +91,6 @@ import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.BiFunction; -import java.util.stream.Collectors; public abstract class Engine implements Closeable { @@ -880,9 +878,10 @@ public void forceMerge(boolean flush) throws IOException { * Snapshots the index and returns a handle to it. If needed will try and "commit" the * lucene index to make sure we have a "fresh" copy of the files to snapshot. * + * @param safeCommit indicates whether the engine should acquire the most recent safe commit, or the most recent commit. * @param flushFirst indicates whether the engine should flush before returning the snapshot */ - public abstract IndexCommitRef acquireIndexCommit(boolean flushFirst) throws EngineException; + public abstract IndexCommitRef acquireIndexCommit(boolean safeCommit, boolean flushFirst) throws EngineException; /** * fail engine due to some error. the engine will also be closed. @@ -1458,9 +1457,9 @@ public static class IndexCommitRef implements Closeable { private final CheckedRunnable onClose; private final IndexCommit indexCommit; - IndexCommitRef(SnapshotDeletionPolicy deletionPolicy) throws IOException { - indexCommit = deletionPolicy.snapshot(); - onClose = () -> deletionPolicy.release(indexCommit); + IndexCommitRef(IndexCommit indexCommit, CheckedRunnable onClose) { + this.indexCommit = indexCommit; + this.onClose = onClose; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 91b7a5fd8dc09..6c39eb626da8d 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -126,7 +126,7 @@ public class InternalEngine extends Engine { private final String uidField; - private final SnapshotDeletionPolicy snapshotDeletionPolicy; + private final CombinedDeletionPolicy combinedDeletionPolicy; // How many callers are currently requesting index throttling. Currently there are only two situations where we do this: when merges // are falling behind and when writing indexing buffer to disk is too slow. When this is 0, there is no throttling, else we throttling @@ -185,9 +185,8 @@ public InternalEngine(EngineConfig engineConfig) { translog = openTranslog(engineConfig, translogDeletionPolicy, engineConfig.getGlobalCheckpointSupplier(), startingCommit); assert translog.getGeneration() != null; this.translog = translog; - this.snapshotDeletionPolicy = new SnapshotDeletionPolicy( - new CombinedDeletionPolicy(openMode, translogDeletionPolicy, translog::getLastSyncedGlobalCheckpoint) - ); + this.combinedDeletionPolicy = new CombinedDeletionPolicy(openMode, translogDeletionPolicy, + translog::getLastSyncedGlobalCheckpoint); writer = createWriter(openMode == EngineConfig.OpenMode.CREATE_INDEX_AND_TRANSLOG, startingCommit); updateMaxUnsafeAutoIdTimestampFromWriter(writer); assert engineConfig.getForceNewHistoryUUID() == false @@ -1699,7 +1698,7 @@ public void forceMerge(final boolean flush, int maxNumSegments, boolean onlyExpu } @Override - public IndexCommitRef acquireIndexCommit(final boolean flushFirst) throws EngineException { + public IndexCommitRef acquireIndexCommit(final boolean safeCommit, final boolean flushFirst) throws EngineException { // we have to flush outside of the readlock otherwise we might have a problem upgrading // the to a write lock when we fail the engine in this operation if (flushFirst) { @@ -1707,12 +1706,8 @@ public IndexCommitRef acquireIndexCommit(final boolean flushFirst) throws Engine flush(false, true); logger.trace("finish flush for snapshot"); } - try (ReleasableLock lock = readLock.acquire()) { - logger.trace("pulling snapshot"); - return new IndexCommitRef(snapshotDeletionPolicy); - } catch (IOException e) { - throw new SnapshotFailedEngineException(shardId, e); - } + final IndexCommit snapshotCommit = combinedDeletionPolicy.acquireIndexCommit(safeCommit); + return new Engine.IndexCommitRef(snapshotCommit, () -> combinedDeletionPolicy.releaseCommit(snapshotCommit)); } private boolean failOnTragicEvent(AlreadyClosedException ex) { @@ -1883,7 +1878,7 @@ private IndexWriterConfig getIndexWriterConfig(boolean create, IndexCommit start iwc.setCommitOnClose(false); // we by default don't commit on close iwc.setOpenMode(create ? IndexWriterConfig.OpenMode.CREATE : IndexWriterConfig.OpenMode.APPEND); iwc.setIndexCommit(startingCommit); - iwc.setIndexDeletionPolicy(snapshotDeletionPolicy); + iwc.setIndexDeletionPolicy(combinedDeletionPolicy); // with tests.verbose, lucene sets this up: plumb to align with filesystem stream boolean verbose = false; try { diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index eb4ccb2fe9ada..8a5b255f43c74 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -1101,13 +1101,14 @@ public org.apache.lucene.util.Version minimumCompatibleVersion() { * Creates a new {@link IndexCommit} snapshot form the currently running engine. All resources referenced by this * commit won't be freed until the commit / snapshot is closed. * + * @param safeCommit true capture the most recent safe commit point; otherwise the most recent commit point. * @param flushFirst true if the index should first be flushed to disk / a low level lucene commit should be executed */ - public Engine.IndexCommitRef acquireIndexCommit(boolean flushFirst) throws EngineException { + public Engine.IndexCommitRef acquireIndexCommit(boolean safeCommit, boolean flushFirst) throws EngineException { IndexShardState state = this.state; // one time volatile read // we allow snapshot on closed index shard, since we want to do one after we close the shard and before we close the engine if (state == IndexShardState.STARTED || state == IndexShardState.RELOCATED || state == IndexShardState.CLOSED) { - return getEngine().acquireIndexCommit(flushFirst); + return getEngine().acquireIndexCommit(safeCommit, flushFirst); } else { throw new IllegalIndexShardStateException(shardId, state, "snapshot is not allowed"); } @@ -1141,7 +1142,7 @@ public Store.MetadataSnapshot snapshotStoreMetadata() throws IOException { return store.getMetadata(null, true); } } - indexCommit = engine.acquireIndexCommit(false); + indexCommit = engine.acquireIndexCommit(false, false); return store.getMetadata(indexCommit.getIndexCommit()); } finally { store.decRef(); diff --git a/server/src/main/java/org/elasticsearch/index/shard/LocalShardSnapshot.java b/server/src/main/java/org/elasticsearch/index/shard/LocalShardSnapshot.java index e156e988c8700..f8f92fbb5fa8b 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/LocalShardSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/shard/LocalShardSnapshot.java @@ -48,7 +48,7 @@ final class LocalShardSnapshot implements Closeable { store.incRef(); boolean success = false; try { - indexCommit = shard.acquireIndexCommit(true); + indexCommit = shard.acquireIndexCommit(false, true); success = true; } finally { if (success == false) { diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index 41878c46011a3..dab39c26a3c5b 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -246,7 +246,7 @@ final void ensureOpen() { * * {@link #readMetadataSnapshot(Path, ShardId, NodeEnvironment.ShardLocker, Logger)} to read a meta data while locking * {@link IndexShard#snapshotStoreMetadata()} to safely read from an existing shard - * {@link IndexShard#acquireIndexCommit(boolean)} to get an {@link IndexCommit} which is safe to use but has to be freed + * {@link IndexShard#acquireIndexCommit(boolean, boolean)} to get an {@link IndexCommit} which is safe to use but has to be freed * @param commit the index commit to read the snapshot from or null if the latest snapshot should be read from the * directory * @throws CorruptIndexException if the lucene index is corrupted. This can be caused by a checksum mismatch or an @@ -270,7 +270,7 @@ public MetadataSnapshot getMetadata(IndexCommit commit) throws IOException { * * {@link #readMetadataSnapshot(Path, ShardId, NodeEnvironment.ShardLocker, Logger)} to read a meta data while locking * {@link IndexShard#snapshotStoreMetadata()} to safely read from an existing shard - * {@link IndexShard#acquireIndexCommit(boolean)} to get an {@link IndexCommit} which is safe to use but has to be freed + * {@link IndexShard#acquireIndexCommit(boolean, boolean)} to get an {@link IndexCommit} which is safe to use but has to be freed * * @param commit the index commit to read the snapshot from or null if the latest snapshot should be read from the * directory diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 4ebce1c0b4bee..7afe6c977da21 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -159,7 +159,7 @@ public RecoveryResponse recoverToTarget() throws IOException { } else { final Engine.IndexCommitRef phase1Snapshot; try { - phase1Snapshot = shard.acquireIndexCommit(false); + phase1Snapshot = shard.acquireIndexCommit(true, false); } catch (final Exception e) { throw new RecoveryEngineException(shard.shardId(), 1, "snapshot failed", e); } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index bffd346796198..13b1b8ff1c6f9 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -415,7 +415,7 @@ private void snapshot(final IndexShard indexShard, final Snapshot snapshot, fina final Repository repository = snapshotsService.getRepositoriesService().repository(snapshot.getRepository()); try { // we flush first to make sure we get the latest writes snapshotted - try (Engine.IndexCommitRef snapshotRef = indexShard.acquireIndexCommit(true)) { + try (Engine.IndexCommitRef snapshotRef = indexShard.acquireIndexCommit(false, true)) { repository.snapshotShard(indexShard, snapshot.getSnapshotId(), indexId, snapshotRef.getIndexCommit(), snapshotStatus); if (logger.isDebugEnabled()) { StringBuilder details = new StringBuilder(); diff --git a/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java b/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java index 9a08f1d030f2f..f286856f32423 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java @@ -21,7 +21,7 @@ import com.carrotsearch.hppc.LongArrayList; import org.apache.lucene.index.IndexCommit; -import org.apache.lucene.index.SnapshotDeletionPolicy; +import org.apache.lucene.store.Directory; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogDeletionPolicy; @@ -34,14 +34,15 @@ import java.util.List; import java.util.Map; import java.util.UUID; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import static java.util.Collections.singletonList; import static org.elasticsearch.index.engine.EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG; import static org.elasticsearch.index.engine.EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG; import static org.elasticsearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; -import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.doAnswer; import static org.hamcrest.Matchers.greaterThan; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; @@ -89,29 +90,64 @@ public void testKeepCommitsAfterGlobalCheckpoint() throws Exception { assertThat(translogPolicy.getTranslogGenerationOfLastCommit(), equalTo(lastTranslogGen)); } - public void testIgnoreSnapshottingCommits() throws Exception { + public void testAcquireIndexCommit() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(); final UUID translogUUID = UUID.randomUUID(); TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(OPEN_INDEX_AND_TRANSLOG, translogPolicy, globalCheckpoint::get); - - long firstMaxSeqNo = randomLongBetween(0, Long.MAX_VALUE - 1); - long secondMaxSeqNo = randomLongBetween(firstMaxSeqNo + 1, Long.MAX_VALUE); - - long lastTranslogGen = randomNonNegativeLong(); - final IndexCommit firstCommit = mockIndexCommit(firstMaxSeqNo, translogUUID, randomLongBetween(0, lastTranslogGen)); - final IndexCommit secondCommit = mockIndexCommit(secondMaxSeqNo, translogUUID, lastTranslogGen); - SnapshotDeletionPolicy snapshotDeletionPolicy = new SnapshotDeletionPolicy(indexPolicy); - - snapshotDeletionPolicy.onInit(Arrays.asList(firstCommit)); - snapshotDeletionPolicy.snapshot(); - assertThat(snapshotDeletionPolicy.getSnapshots(), contains(firstCommit)); - - // SnapshotPolicy prevents the first commit from deleting, but CombinedPolicy does not retain its translog. - globalCheckpoint.set(randomLongBetween(secondMaxSeqNo, Long.MAX_VALUE)); - snapshotDeletionPolicy.onCommit(Arrays.asList(firstCommit, secondCommit)); - verify(firstCommit, never()).delete(); - verify(secondCommit, never()).delete(); + long lastMaxSeqNo = between(1, 1000); + long lastTranslogGen = between(1, 20); + int safeIndex = 0; + List commitList = new ArrayList<>(); + List snapshottingCommits = new ArrayList<>(); + final int iters = between(10, 100); + for (int i = 0; i < iters; i++) { + int newCommits = between(1, 10); + for (int n = 0; n < newCommits; n++) { + lastMaxSeqNo += between(1, 1000); + lastTranslogGen += between(1, 20); + commitList.add(mockIndexCommit(lastMaxSeqNo, translogUUID, lastTranslogGen)); + } + // Advance the global checkpoint to between [safeIndex, safeIndex + 1) + safeIndex = randomIntBetween(safeIndex, commitList.size() - 1); + long lower = Math.max(globalCheckpoint.get(), + Long.parseLong(commitList.get(safeIndex).getUserData().get(SequenceNumbers.MAX_SEQ_NO))); + long upper = safeIndex == commitList.size() - 1 ? lastMaxSeqNo : + Long.parseLong(commitList.get(safeIndex + 1).getUserData().get(SequenceNumbers.MAX_SEQ_NO)) - 1; + globalCheckpoint.set(randomLongBetween(lower, upper)); + indexPolicy.onCommit(commitList); + // Captures and releases some commits + int captures = between(0, 5); + for (int n = 0; n < captures; n++) { + boolean safe = randomBoolean(); + final IndexCommit snapshot = indexPolicy.acquireIndexCommit(safe); + expectThrows(UnsupportedOperationException.class, snapshot::delete); + snapshottingCommits.add(snapshot); + if (safe) { + assertThat(snapshot.getUserData(), equalTo(commitList.get(safeIndex).getUserData())); + } else { + assertThat(snapshot.getUserData(), equalTo(commitList.get(commitList.size() - 1).getUserData())); + } + } + randomSubsetOf(snapshottingCommits).forEach(snapshot -> { + snapshottingCommits.remove(snapshot); + indexPolicy.releaseCommit(snapshot); + }); + // Snapshotting commits must not be deleted. + snapshottingCommits.forEach(snapshot -> assertThat(snapshot.isDeleted(), equalTo(false))); + // We don't need to retain translog for snapshotting commits. + assertThat(translogPolicy.getMinTranslogGenerationForRecovery(), + equalTo(Long.parseLong(commitList.get(safeIndex).getUserData().get(Translog.TRANSLOG_GENERATION_KEY)))); + assertThat(translogPolicy.getTranslogGenerationOfLastCommit(), + equalTo(Long.parseLong(commitList.get(commitList.size() - 1).getUserData().get(Translog.TRANSLOG_GENERATION_KEY)))); + } + snapshottingCommits.forEach(indexPolicy::releaseCommit); + globalCheckpoint.set(randomLongBetween(lastMaxSeqNo, Long.MAX_VALUE)); + indexPolicy.onCommit(commitList); + for (int i = 0; i < commitList.size() - 1; i++) { + assertThat(commitList.get(i).isDeleted(), equalTo(true)); + } + assertThat(commitList.get(commitList.size() - 1).isDeleted(), equalTo(false)); assertThat(translogPolicy.getMinTranslogGenerationForRecovery(), equalTo(lastTranslogGen)); assertThat(translogPolicy.getTranslogGenerationOfLastCommit(), equalTo(lastTranslogGen)); } @@ -229,8 +265,16 @@ IndexCommit mockIndexCommit(long maxSeqNo, UUID translogUUID, long translogGen) userData.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(maxSeqNo)); userData.put(Translog.TRANSLOG_UUID_KEY, translogUUID.toString()); userData.put(Translog.TRANSLOG_GENERATION_KEY, Long.toString(translogGen)); + final AtomicBoolean deleted = new AtomicBoolean(); final IndexCommit commit = mock(IndexCommit.class); + final Directory directory = mock(Directory.class); when(commit.getUserData()).thenReturn(userData); + when(commit.getDirectory()).thenReturn(directory); + when(commit.isDeleted()).thenAnswer(args -> deleted.get()); + doAnswer(arg -> { + deleted.set(true); + return null; + }).when(commit).delete(); return commit; } diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 9a482eed1249b..dd51c7aa8d898 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -167,6 +167,7 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; @@ -2133,7 +2134,7 @@ public void testConcurrentWritesAndCommits() throws Exception { boolean doneIndexing; do { doneIndexing = doneLatch.await(sleepTime, TimeUnit.MILLISECONDS); - commits.add(engine.acquireIndexCommit(true)); + commits.add(engine.acquireIndexCommit(false, true)); if (commits.size() > commitLimit) { // don't keep on piling up too many commits IOUtils.close(commits.remove(randomIntBetween(0, commits.size()-1))); // we increase the wait time to make sure we eventually if things are slow wait for threads to finish. @@ -4337,4 +4338,38 @@ public void testConcurrentAppendUpdateAndRefresh() throws InterruptedException, assertEquals(totalNumDocs, searcher.reader().numDocs()); } } + + public void testAcquireIndexCommit() throws Exception { + IOUtils.close(engine, store); + store = createStore(); + final AtomicLong globalCheckpoint = new AtomicLong(); + try (InternalEngine engine = createEngine(store, createTempDir(), globalCheckpoint::get)) { + int numDocs = between(1, 20); + for (int i = 0; i < numDocs; i++) { + index(engine, i); + } + final boolean inSync = randomBoolean(); + if (inSync) { + globalCheckpoint.set(numDocs - 1); + } + final boolean flushFirst = randomBoolean(); + final boolean safeCommit = randomBoolean(); + Engine.IndexCommitRef commit = engine.acquireIndexCommit(safeCommit, flushFirst); + int moreDocs = between(1, 20); + for (int i = 0; i < moreDocs; i++) { + index(engine, numDocs + i); + } + globalCheckpoint.set(numDocs + moreDocs - 1); + engine.flush(); + // check that we can still read the commit that we captured + try (IndexReader reader = DirectoryReader.open(commit.getIndexCommit())) { + assertThat(reader.numDocs(), equalTo(flushFirst && (safeCommit == false || inSync) ? numDocs : 0)); + } + assertThat(DirectoryReader.listCommits(engine.store.directory()), hasSize(2)); + commit.close(); + // check it's clean up + engine.flush(true, true); + assertThat(DirectoryReader.listCommits(engine.store.directory()), hasSize(1)); + } + } } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 7b23523eeb595..0e5fa77940178 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -1043,41 +1043,6 @@ public void onFailure(Exception e) { closeShards(indexShard); } - public void testAcquireIndexCommit() throws Exception { - boolean isPrimary = randomBoolean(); - final IndexShard shard = newStartedShard(isPrimary); - int numDocs = randomInt(20); - for (int i = 0; i < numDocs; i++) { - indexDoc(shard, "type", "id_" + i); - } - final boolean flushFirst = randomBoolean(); - Engine.IndexCommitRef commit = shard.acquireIndexCommit(flushFirst); - int moreDocs = randomInt(20); - for (int i = 0; i < moreDocs; i++) { - indexDoc(shard, "type", "id_" + numDocs + i); - } - flushShard(shard); - // check that we can still read the commit that we captured - try (IndexReader reader = DirectoryReader.open(commit.getIndexCommit())) { - assertThat(reader.numDocs(), equalTo(flushFirst ? numDocs : 0)); - } - commit.close(); - // Make the global checkpoint in sync with the local checkpoint. - if (isPrimary) { - final String allocationId = shard.shardRouting.allocationId().getId(); - shard.updateLocalCheckpointForShard(allocationId, numDocs + moreDocs - 1); - shard.updateGlobalCheckpointForShard(allocationId, shard.getLocalCheckpoint()); - } else { - shard.updateGlobalCheckpointOnReplica(numDocs + moreDocs - 1, "test"); - } - flushShard(shard, true); - - // check it's clean up - assertThat(DirectoryReader.listCommits(shard.store().directory()), hasSize(1)); - - closeShards(shard); - } - /*** * test one can snapshot the store at various lifecycle stages */ diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index cf5f24d2a6e18..4963c1b74a53f 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -396,7 +396,7 @@ public void testThrowExceptionOnPrimaryRelocatedBeforePhase1Started() throws IOE when(shard.seqNoStats()).thenReturn(mock(SeqNoStats.class)); when(shard.segmentStats(anyBoolean())).thenReturn(mock(SegmentsStats.class)); when(shard.state()).thenReturn(IndexShardState.RELOCATED); - when(shard.acquireIndexCommit(anyBoolean())).thenReturn(mock(Engine.IndexCommitRef.class)); + when(shard.acquireIndexCommit(anyBoolean(), anyBoolean())).thenReturn(mock(Engine.IndexCommitRef.class)); doAnswer(invocation -> { ((ActionListener)invocation.getArguments()[0]).onResponse(() -> {}); return null; diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java index 4a449463b5e8c..85dc3a5fc3906 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java @@ -19,6 +19,8 @@ package org.elasticsearch.indices.recovery; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.NoMergePolicy; @@ -27,6 +29,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.xcontent.XContentType; @@ -36,11 +39,13 @@ import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.replication.ESIndexLevelReplicationTestCase; import org.elasticsearch.index.replication.RecoveryDuringReplicationTests; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogConfig; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Future; @@ -48,6 +53,7 @@ import static org.elasticsearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; public class RecoveryTests extends ESIndexLevelReplicationTestCase { @@ -241,4 +247,28 @@ public void testPeerRecoveryPersistGlobalCheckpoint() throws Exception { assertThat(replica.getTranslog().getLastSyncedGlobalCheckpoint(), equalTo(numDocs - 1)); } } + + public void testPeerRecoverySendSafeCommitInFileBased() throws Exception { + IndexShard primaryShard = newStartedShard(true); + int numDocs = between(1, 100); + long globalCheckpoint = 0; + for (int i = 0; i < numDocs; i++) { + primaryShard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, + SourceToParse.source(primaryShard.shardId().getIndexName(), "test", Integer.toString(i), new BytesArray("{}"), + XContentType.JSON), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, getMappingUpdater(primaryShard, "test")); + if (randomBoolean()) { + globalCheckpoint = randomLongBetween(globalCheckpoint, i); + primaryShard.updateLocalCheckpointForShard(primaryShard.routingEntry().allocationId().getId(), globalCheckpoint); + primaryShard.updateGlobalCheckpointForShard(primaryShard.routingEntry().allocationId().getId(), globalCheckpoint); + primaryShard.flush(new FlushRequest()); + } + } + IndexShard replicaShard = newShard(primaryShard.shardId(), false); + updateMappings(replicaShard, primaryShard.indexSettings().getIndexMetaData()); + recoverReplica(replicaShard, primaryShard); + List commits = DirectoryReader.listCommits(replicaShard.store().directory()); + long maxSeqNo = Long.parseLong(commits.get(0).getUserData().get(SequenceNumbers.MAX_SEQ_NO)); + assertThat(maxSeqNo, lessThanOrEqualTo(globalCheckpoint)); + closeShards(primaryShard, replicaShard); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index fd97660051b32..e7cb62b421282 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -621,7 +621,7 @@ protected void snapshotShard(final IndexShard shard, final Snapshot snapshot, final Repository repository) throws IOException { final IndexShardSnapshotStatus snapshotStatus = new IndexShardSnapshotStatus(); - try (Engine.IndexCommitRef indexCommitRef = shard.acquireIndexCommit(true)) { + try (Engine.IndexCommitRef indexCommitRef = shard.acquireIndexCommit(false, true)) { Index index = shard.shardId().getIndex(); IndexId indexId = new IndexId(index.getName(), index.getUUID()); From 95cdf1254ea1bc89b5946835586717e388f61001 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 12 Jan 2018 19:06:04 -0500 Subject: [PATCH 03/31] Truncate tlog cli should assign global checkpoint (#28192) We are targeting to always have a safe index once the recovery is done. This invariant does not hold if the translog is manually truncated by users because the truncate translog cli resets the global checkpoint to unassigned. This commit assigns the global checkpoint to the max_seqno of the last commit when truncating translog. We can only safely do it because the truncate translog command will generate a new history uuid for that shard. With a new history UUID, sequence-based recovery between that shard and other old shards will be disabled. Relates #28181 --- .../translog/TruncateTranslogCommand.java | 18 ++++++++++++++---- .../index/translog/TruncateTranslogIT.java | 17 +++++++++++++++++ 2 files changed, 31 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java index d9b77f841ed09..222e3e13d65e1 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java @@ -132,9 +132,19 @@ protected void execute(Terminal terminal, OptionSet options, Environment env) th } // Retrieve the generation and UUID from the existing data - commitData = commits.get(commits.size() - 1).getUserData(); + commitData = new HashMap<>(commits.get(commits.size() - 1).getUserData()); String translogGeneration = commitData.get(Translog.TRANSLOG_GENERATION_KEY); String translogUUID = commitData.get(Translog.TRANSLOG_UUID_KEY); + final long globalCheckpoint; + // In order to have a safe commit invariant, we have to assign the global checkpoint to the max_seqno of the last commit. + // We can only safely do it because we will generate a new history uuid this shard. + if (commitData.containsKey(SequenceNumbers.MAX_SEQ_NO)) { + globalCheckpoint = Long.parseLong(commitData.get(SequenceNumbers.MAX_SEQ_NO)); + // Also advances the local checkpoint of the last commit to its max_seqno. + commitData.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(globalCheckpoint)); + } else { + globalCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; + } if (translogGeneration == null || translogUUID == null) { throw new ElasticsearchException("shard must have a valid translog generation and UUID but got: [{}] and: [{}]", translogGeneration, translogUUID); @@ -153,7 +163,7 @@ protected void execute(Terminal terminal, OptionSet options, Environment env) th // Write empty checkpoint and translog to empty files long gen = Long.parseLong(translogGeneration); int translogLen = writeEmptyTranslog(tempEmptyTranslog, translogUUID); - writeEmptyCheckpoint(tempEmptyCheckpoint, translogLen, gen); + writeEmptyCheckpoint(tempEmptyCheckpoint, translogLen, gen, globalCheckpoint); terminal.println("Removing existing translog files"); IOUtils.rm(translogFiles.toArray(new Path[]{})); @@ -190,9 +200,9 @@ protected void execute(Terminal terminal, OptionSet options, Environment env) th } /** Write a checkpoint file to the given location with the given generation */ - public static void writeEmptyCheckpoint(Path filename, int translogLength, long translogGeneration) throws IOException { + static void writeEmptyCheckpoint(Path filename, int translogLength, long translogGeneration, long globalCheckpoint) throws IOException { Checkpoint emptyCheckpoint = Checkpoint.emptyTranslogCheckpoint(translogLength, translogGeneration, - SequenceNumbers.UNASSIGNED_SEQ_NO, translogGeneration); + globalCheckpoint, translogGeneration); Checkpoint.write(FileChannel::open, filename, emptyCheckpoint, StandardOpenOption.WRITE, StandardOpenOption.READ, StandardOpenOption.CREATE_NEW); // fsync with metadata here to make sure. diff --git a/server/src/test/java/org/elasticsearch/index/translog/TruncateTranslogIT.java b/server/src/test/java/org/elasticsearch/index/translog/TruncateTranslogIT.java index e35c0676aac4a..297314fc2dedf 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TruncateTranslogIT.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TruncateTranslogIT.java @@ -31,6 +31,7 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; +import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.cli.MockTerminal; @@ -47,6 +48,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.MockEngineFactoryPlugin; +import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; @@ -74,6 +76,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, numDataNodes = 0) @@ -212,6 +215,10 @@ public void testCorruptTranslogTruncation() throws Exception { final RecoveryState replicaRecoveryState = recoveryResponse.shardRecoveryStates().get("test").stream() .filter(recoveryState -> recoveryState.getPrimary() == false).findFirst().get(); assertThat(replicaRecoveryState.getIndex().toString(), replicaRecoveryState.getIndex().recoveredFileCount(), greaterThan(0)); + // Ensure that the global checkpoint and local checkpoint are restored from the max seqno of the last commit. + final SeqNoStats seqNoStats = getSeqNoStats("test", 0); + assertThat(seqNoStats.getGlobalCheckpoint(), equalTo(seqNoStats.getMaxSeqNo())); + assertThat(seqNoStats.getLocalCheckpoint(), equalTo(seqNoStats.getMaxSeqNo())); } public void testCorruptTranslogTruncationOfReplica() throws Exception { @@ -312,6 +319,10 @@ public void testCorruptTranslogTruncationOfReplica() throws Exception { .filter(recoveryState -> recoveryState.getPrimary() == false).findFirst().get(); // the replica translog was disabled so it doesn't know what hte global checkpoint is and thus can't do ops based recovery assertThat(replicaRecoveryState.getIndex().toString(), replicaRecoveryState.getIndex().recoveredFileCount(), greaterThan(0)); + // Ensure that the global checkpoint and local checkpoint are restored from the max seqno of the last commit. + final SeqNoStats seqNoStats = getSeqNoStats("test", 0); + assertThat(seqNoStats.getGlobalCheckpoint(), equalTo(seqNoStats.getMaxSeqNo())); + assertThat(seqNoStats.getLocalCheckpoint(), equalTo(seqNoStats.getMaxSeqNo())); } private Set getTranslogDirs(String indexName) throws IOException { @@ -356,4 +367,10 @@ private static void disableTranslogFlush(String index) { client().admin().indices().prepareUpdateSettings(index).setSettings(settings).get(); } + private SeqNoStats getSeqNoStats(String index, int shardId) { + final ShardStats[] shardStats = client().admin().indices() + .prepareStats(index).get() + .getIndices().get(index).getShards(); + return shardStats[shardId].getSeqNoStats(); + } } From 326fa1c9a0fd91dc51e77a7316924a4b0e94f84d Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 12 Jan 2018 20:09:34 -0500 Subject: [PATCH 04/31] TEST: init unassigned gcp in testAcquireIndexCommit The global checkpoint should be assigned to unassigned rather than 0. If a single document is indexed and the global checkpoint is initialized with 0, the first commit is safe which the test does not suppose. Relates #28038 --- .../org/elasticsearch/index/engine/InternalEngineTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index dd51c7aa8d898..e1aa09b9b6542 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -4342,7 +4342,7 @@ public void testConcurrentAppendUpdateAndRefresh() throws InterruptedException, public void testAcquireIndexCommit() throws Exception { IOUtils.close(engine, store); store = createStore(); - final AtomicLong globalCheckpoint = new AtomicLong(); + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO); try (InternalEngine engine = createEngine(store, createTempDir(), globalCheckpoint::get)) { int numDocs = between(1, 20); for (int i = 0; i < numDocs; i++) { From a48be3f1f82bbab6c9ef5c13e16706e27d0e4aed Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Sat, 13 Jan 2018 11:43:15 -0500 Subject: [PATCH 05/31] AwaitsFix #testRecoveryAfterPrimaryPromotion Relates #28209 --- .../index/replication/RecoveryDuringReplicationTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index 2bf7de6b94a82..7b111cbbff2e1 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -213,6 +213,7 @@ public void testRecoveryToReplicaThatReceivedExtraDocument() throws Exception { } @TestLogging("org.elasticsearch.index.shard:TRACE,org.elasticsearch.indices.recovery:TRACE") + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/28209") public void testRecoveryAfterPrimaryPromotion() throws Exception { try (ReplicationGroup shards = createGroup(2)) { shards.startAll(); From 8a4d27a9894d4245e9964e6a4718825359bb809d Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 12 Jan 2018 19:09:31 -0500 Subject: [PATCH 06/31] Replica start peer recovery with safe commit (#28181) Today a replica starts a peer-recovery with the last commit. If the last commit is not a safe commit, a replica will immediately fallback to the file based sync which is more expensive than the sequence based recovery. This commit modifies the peer-recovery in replica to start with a safe commit. Moreover we can keep the existing translog on the target if the recovery is sequence based recovery. Relates #10708 --- .../elasticsearch/index/engine/Engine.java | 5 + .../index/engine/InternalEngine.java | 9 ++ .../elasticsearch/index/shard/IndexShard.java | 21 ++- .../index/shard/StoreRecovery.java | 2 +- .../recovery/PeerRecoveryTargetService.java | 14 +- ...ryPrepareForTranslogOperationsRequest.java | 19 ++- .../recovery/RecoverySourceHandler.java | 11 +- .../indices/recovery/RecoveryTarget.java | 10 +- .../recovery/RecoveryTargetHandler.java | 6 +- .../recovery/RemoteRecoveryTargetHandler.java | 4 +- .../RecoveryDuringReplicationTests.java | 29 +++- .../index/shard/IndexShardTests.java | 11 +- .../PeerRecoveryTargetServiceTests.java | 128 ++++++------------ .../recovery/RecoverySourceHandlerTests.java | 2 +- .../indices/recovery/RecoveryTests.java | 35 +++++ 15 files changed, 188 insertions(+), 118 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index 998a8e7f17eaf..1fbe17b752e2c 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -1533,6 +1533,11 @@ public interface Warmer { */ public abstract Engine recoverFromTranslog() throws IOException; + /** + * Do not replay translog operations, but make the engine be ready. + */ + public abstract void skipTranslogRecovery(); + /** * Returns true iff this engine is currently recovering from translog. */ diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 6c39eb626da8d..db25a784a6a02 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -406,6 +406,15 @@ public InternalEngine recoverFromTranslog() throws IOException { return this; } + @Override + public void skipTranslogRecovery() { + if (openMode != EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) { + throw new IllegalStateException("Can't skip translog recovery with open mode: " + openMode); + } + assert pendingTranslogRecovery.get() : "translogRecovery is not pending but should be"; + pendingTranslogRecovery.set(false); // we are good - now we can commit + } + private IndexCommit getStartingCommitPoint() throws IOException { if (openMode == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) { final Path translogPath = engineConfig.getTranslogConfig().getTranslogPath(); diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 8a5b255f43c74..cf3037c371ce0 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -1298,6 +1298,7 @@ public void createIndexAndTranslog() throws IOException { translogStats.totalOperationsOnStart(0); globalCheckpointTracker.updateGlobalCheckpointOnReplica(SequenceNumbers.NO_OPS_PERFORMED, "index created"); innerOpenEngineAndTranslog(EngineConfig.OpenMode.CREATE_INDEX_AND_TRANSLOG, false); + assertSequenceNumbersInCommit(); } /** opens the engine on top of the existing lucene engine but creates an empty translog **/ @@ -1310,15 +1311,29 @@ public void openIndexAndCreateTranslog(boolean forceNewHistoryUUID, long globalC + globalCheckpoint + "]"; globalCheckpointTracker.updateGlobalCheckpointOnReplica(globalCheckpoint, "opening index with a new translog"); innerOpenEngineAndTranslog(EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG, forceNewHistoryUUID); + assertSequenceNumbersInCommit(); } /** * opens the engine on top of the existing lucene engine and translog. * Operations from the translog will be replayed to bring lucene up to date. **/ - public void openIndexAndTranslog() throws IOException { + public void openIndexAndRecoveryFromTranslog() throws IOException { assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.EXISTING_STORE; innerOpenEngineAndTranslog(EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG, false); + getEngine().recoverFromTranslog(); + assertSequenceNumbersInCommit(); + } + + /** + * Opens the engine on top of the existing lucene engine and translog. + * The translog is kept but its operations won't be replayed. + */ + public void openIndexAndSkipTranslogRecovery() throws IOException { + assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.PEER; + innerOpenEngineAndTranslog(EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG, false); + getEngine().skipTranslogRecovery(); + assertSequenceNumbersInCommit(); } private void innerOpenEngineAndTranslog(final EngineConfig.OpenMode openMode, final boolean forceNewHistoryUUID) throws IOException { @@ -1350,15 +1365,13 @@ private void innerOpenEngineAndTranslog(final EngineConfig.OpenMode openMode, fi globalCheckpointTracker.updateGlobalCheckpointOnReplica(Translog.readGlobalCheckpoint(translogConfig.getTranslogPath()), "read from translog checkpoint"); } - Engine newEngine = createNewEngine(config); + createNewEngine(config); verifyNotClosed(); if (openMode == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) { // We set active because we are now writing operations to the engine; this way, if we go idle after some time and become inactive, // we still give sync'd flush a chance to run: active.set(true); - newEngine.recoverFromTranslog(); } - assertSequenceNumbersInCommit(); assert recoveryState.getStage() == RecoveryState.Stage.TRANSLOG : "TRANSLOG stage expected but was: " + recoveryState.getStage(); } diff --git a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index 6bc1ce2882c92..81ffbea642c58 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -401,7 +401,7 @@ private void internalRecoverFromStore(IndexShard indexShard) throws IndexShardRe logger.debug("failed to list file details", e); } if (indexShouldExists) { - indexShard.openIndexAndTranslog(); + indexShard.openIndexAndRecoveryFromTranslog(); indexShard.getEngine().fillSeqNoGaps(indexShard.getPrimaryTerm()); } else { indexShard.createIndexAndTranslog(); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index ba5dc5c60f29f..0bc225efbeb6c 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -21,6 +21,8 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexCommit; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.RateLimiter; import org.elasticsearch.ElasticsearchException; @@ -39,6 +41,7 @@ import org.elasticsearch.common.util.CancellableThreads; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.engine.CombinedDeletionPolicy; import org.elasticsearch.index.engine.RecoveryEngineException; import org.elasticsearch.index.mapper.MapperException; import org.elasticsearch.index.seqno.SequenceNumbers; @@ -60,6 +63,7 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; +import java.util.List; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; @@ -108,8 +112,8 @@ public PeerRecoveryTargetService(Settings settings, ThreadPool threadPool, Trans FileChunkTransportRequestHandler()); transportService.registerRequestHandler(Actions.CLEAN_FILES, RecoveryCleanFilesRequest::new, ThreadPool.Names.GENERIC, new CleanFilesRequestHandler()); - transportService.registerRequestHandler(Actions.PREPARE_TRANSLOG, RecoveryPrepareForTranslogOperationsRequest::new, ThreadPool - .Names.GENERIC, new PrepareForTranslogOperationsRequestHandler()); + transportService.registerRequestHandler(Actions.PREPARE_TRANSLOG, RecoveryPrepareForTranslogOperationsRequest::new, + ThreadPool.Names.GENERIC, new PrepareForTranslogOperationsRequestHandler()); transportService.registerRequestHandler(Actions.TRANSLOG_OPS, RecoveryTranslogOperationsRequest::new, ThreadPool.Names.GENERIC, new TranslogOperationsRequestHandler()); transportService.registerRequestHandler(Actions.FINALIZE, RecoveryFinalizeRecoveryRequest::new, ThreadPool.Names.GENERIC, new @@ -353,7 +357,9 @@ private StartRecoveryRequest getStartRecoveryRequest(final RecoveryTarget recove public static long getStartingSeqNo(final RecoveryTarget recoveryTarget) { try { final long globalCheckpoint = Translog.readGlobalCheckpoint(recoveryTarget.translogLocation()); - final SequenceNumbers.CommitInfo seqNoStats = recoveryTarget.store().loadSeqNoInfo(null); + final List existingCommits = DirectoryReader.listCommits(recoveryTarget.store().directory()); + final IndexCommit safeCommit = CombinedDeletionPolicy.findSafeCommitPoint(existingCommits, globalCheckpoint); + final SequenceNumbers.CommitInfo seqNoStats = recoveryTarget.store().loadSeqNoInfo(safeCommit); if (seqNoStats.maxSeqNo <= globalCheckpoint) { assert seqNoStats.localCheckpoint <= globalCheckpoint; /* @@ -387,7 +393,7 @@ class PrepareForTranslogOperationsRequestHandler implements TransportRequestHand public void messageReceived(RecoveryPrepareForTranslogOperationsRequest request, TransportChannel channel) throws Exception { try (RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId() )) { - recoveryRef.target().prepareForTranslogOperations(request.totalTranslogOps()); + recoveryRef.target().prepareForTranslogOperations(request.createNewTranslog(), request.totalTranslogOps()); } channel.sendResponse(TransportResponse.Empty.INSTANCE); } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java index 61cd986a1aef4..a784954f459c8 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java @@ -33,14 +33,16 @@ public class RecoveryPrepareForTranslogOperationsRequest extends TransportReques private long recoveryId; private ShardId shardId; private int totalTranslogOps = RecoveryState.Translog.UNKNOWN; + private boolean createNewTranslog; public RecoveryPrepareForTranslogOperationsRequest() { } - RecoveryPrepareForTranslogOperationsRequest(long recoveryId, ShardId shardId, int totalTranslogOps) { + RecoveryPrepareForTranslogOperationsRequest(long recoveryId, ShardId shardId, int totalTranslogOps, boolean createNewTranslog) { this.recoveryId = recoveryId; this.shardId = shardId; this.totalTranslogOps = totalTranslogOps; + this.createNewTranslog = createNewTranslog; } public long recoveryId() { @@ -55,6 +57,13 @@ public int totalTranslogOps() { return totalTranslogOps; } + /** + * Whether or not the recover target should create a new local translog + */ + boolean createNewTranslog() { + return createNewTranslog; + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); @@ -64,6 +73,11 @@ public void readFrom(StreamInput in) throws IOException { if (in.getVersion().before(Version.V_6_0_0_alpha1)) { in.readLong(); // maxUnsafeAutoIdTimestamp } + if (in.getVersion().onOrAfter(Version.V_6_2_0)) { + createNewTranslog = in.readBoolean(); + } else { + createNewTranslog = true; + } } @Override @@ -75,5 +89,8 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().before(Version.V_6_0_0_alpha1)) { out.writeLong(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP); // maxUnsafeAutoIdTimestamp } + if (out.getVersion().onOrAfter(Version.V_6_2_0)) { + out.writeBoolean(createNewTranslog); + } } } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 7afe6c977da21..3ee9b953757c3 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -150,9 +150,9 @@ public RecoveryResponse recoverToTarget() throws IOException { final long startingSeqNo; final long requiredSeqNoRangeStart; - final boolean isSequenceNumberBasedRecoveryPossible = request.startingSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && + final boolean isSequenceNumberBasedRecovery = request.startingSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && isTargetSameHistory() && isTranslogReadyForSequenceNumberBasedRecovery(); - if (isSequenceNumberBasedRecoveryPossible) { + if (isSequenceNumberBasedRecovery) { logger.trace("performing sequence numbers based recovery. starting at [{}]", request.startingSeqNo()); startingSeqNo = request.startingSeqNo(); requiredSeqNoRangeStart = startingSeqNo; @@ -188,7 +188,8 @@ public RecoveryResponse recoverToTarget() throws IOException { runUnderPrimaryPermit(() -> shard.initiateTracking(request.targetAllocationId())); try { - prepareTargetForTranslog(translog.estimateTotalOperationsFromMinSeq(startingSeqNo)); + // For a sequence based recovery, the target can keep its local translog + prepareTargetForTranslog(isSequenceNumberBasedRecovery == false, translog.estimateTotalOperationsFromMinSeq(startingSeqNo)); } catch (final Exception e) { throw new RecoveryEngineException(shard.shardId(), 1, "prepare target for translog failed", e); } @@ -421,13 +422,13 @@ public void phase1(final IndexCommit snapshot, final Supplier translogO } } - void prepareTargetForTranslog(final int totalTranslogOps) throws IOException { + void prepareTargetForTranslog(final boolean createNewTranslog, final int totalTranslogOps) throws IOException { StopWatch stopWatch = new StopWatch().start(); logger.trace("recovery [phase1]: prepare remote engine for translog"); final long startEngineStart = stopWatch.totalTime().millis(); // Send a request preparing the new shard's translog to receive operations. This ensures the shard engine is started and disables // garbage collection (not the JVM's GC!) of tombstone deletes. - cancellableThreads.executeIO(() -> recoveryTarget.prepareForTranslogOperations(totalTranslogOps)); + cancellableThreads.executeIO(() -> recoveryTarget.prepareForTranslogOperations(createNewTranslog, totalTranslogOps)); stopWatch.stop(); response.startTime = stopWatch.totalTime().millis() - startEngineStart; diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index d383891345818..1bbcb9efa9644 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -362,10 +362,14 @@ private void ensureRefCount() { /*** Implementation of {@link RecoveryTargetHandler } */ @Override - public void prepareForTranslogOperations(int totalTranslogOps) throws IOException { + public void prepareForTranslogOperations(boolean createNewTranslog, int totalTranslogOps) throws IOException { state().getTranslog().totalOperations(totalTranslogOps); - // TODO: take the local checkpoint from store as global checkpoint, once we know it's safe - indexShard().openIndexAndCreateTranslog(false, SequenceNumbers.UNASSIGNED_SEQ_NO); + if (createNewTranslog) { + // TODO: Assigns the global checkpoint to the max_seqno of the safe commit if the index version >= 6.2 + indexShard().openIndexAndCreateTranslog(false, SequenceNumbers.UNASSIGNED_SEQ_NO); + } else { + indexShard().openIndexAndSkipTranslogRecovery(); + } } @Override diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java index e7403986dc233..736d602044656 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java @@ -32,10 +32,10 @@ public interface RecoveryTargetHandler { /** * Prepares the target to receive translog operations, after all file have been copied - * - * @param totalTranslogOps total translog operations expected to be sent + * @param createNewTranslog whether or not to delete the local translog on the target + * @param totalTranslogOps total translog operations expected to be sent */ - void prepareForTranslogOperations(int totalTranslogOps) throws IOException; + void prepareForTranslogOperations(boolean createNewTranslog, int totalTranslogOps) throws IOException; /** * The finalize request refreshes the engine now that new segments are available, enables garbage collection of tombstone files, and diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java index 279bec186a433..4ea2be0e72659 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java @@ -76,9 +76,9 @@ public RemoteRecoveryTargetHandler(long recoveryId, ShardId shardId, TransportSe } @Override - public void prepareForTranslogOperations(int totalTranslogOps) throws IOException { + public void prepareForTranslogOperations(boolean createNewTranslog, int totalTranslogOps) throws IOException { transportService.submitRequest(targetNode, PeerRecoveryTargetService.Actions.PREPARE_TRANSLOG, - new RecoveryPrepareForTranslogOperationsRequest(recoveryId, shardId, totalTranslogOps), + new RecoveryPrepareForTranslogOperationsRequest(recoveryId, shardId, totalTranslogOps, createNewTranslog), TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(), EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); } diff --git a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index 7b111cbbff2e1..77576426252d9 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -31,7 +31,9 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineConfig; @@ -227,7 +229,6 @@ public void testRecoveryAfterPrimaryPromotion() throws Exception { final IndexShard oldPrimary = shards.getPrimary(); final IndexShard newPrimary = shards.getReplicas().get(0); final IndexShard replica = shards.getReplicas().get(1); - boolean expectSeqNoRecovery = true; if (randomBoolean()) { // simulate docs that were inflight when primary failed, these will be rolled back final int rollbackDocs = randomIntBetween(1, 5); @@ -240,7 +241,6 @@ public void testRecoveryAfterPrimaryPromotion() throws Exception { } if (randomBoolean()) { oldPrimary.flush(new FlushRequest(index.getName())); - expectSeqNoRecovery = false; } } @@ -253,9 +253,30 @@ public void testRecoveryAfterPrimaryPromotion() throws Exception { equalTo(totalDocs - 1L)); // index some more - totalDocs += shards.indexDocs(randomIntBetween(0, 5)); + int moreDocs = shards.indexDocs(randomIntBetween(0, 5)); + totalDocs += moreDocs; + + // As a replica keeps a safe commit, the file-based recovery only happens if the required translog + // for the sequence based recovery are not fully retained and extra documents were added to the primary. + boolean expectSeqNoRecovery = (moreDocs == 0 || randomBoolean()); + int uncommittedOpsOnPrimary = 0; + if (expectSeqNoRecovery == false) { + IndexMetaData.Builder builder = IndexMetaData.builder(newPrimary.indexSettings().getIndexMetaData()); + builder.settings(Settings.builder().put(newPrimary.indexSettings().getSettings()) + .put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), "-1") + .put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), "-1") + ); + newPrimary.indexSettings().updateIndexMetaData(builder.build()); + newPrimary.onSettingsChanged(); + shards.syncGlobalCheckpoint(); + newPrimary.flush(new FlushRequest()); + uncommittedOpsOnPrimary = shards.indexDocs(randomIntBetween(0, 10)); + totalDocs += uncommittedOpsOnPrimary; + } if (randomBoolean()) { + uncommittedOpsOnPrimary = 0; + shards.syncGlobalCheckpoint(); newPrimary.flush(new FlushRequest()); } @@ -270,7 +291,7 @@ public void testRecoveryAfterPrimaryPromotion() throws Exception { assertThat(newReplica.recoveryState().getTranslog().recoveredOperations(), equalTo(totalDocs - committedDocs)); } else { assertThat(newReplica.recoveryState().getIndex().fileDetails(), not(empty())); - assertThat(newReplica.recoveryState().getTranslog().recoveredOperations(), equalTo(totalDocs)); + assertThat(newReplica.recoveryState().getTranslog().recoveredOperations(), equalTo(uncommittedOpsOnPrimary)); } // roll back the extra ops in the replica diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 0e5fa77940178..a699ba7aee12f 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -22,7 +22,6 @@ import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; -import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.TermQuery; @@ -2107,7 +2106,7 @@ public void testShardActiveDuringInternalRecovery() throws IOException { shard.prepareForIndexRecovery(); // Shard is still inactive since we haven't started recovering yet assertFalse(shard.isActive()); - shard.openIndexAndTranslog(); + shard.openIndexAndRecoveryFromTranslog(); // Shard should now be active since we did recover: assertTrue(shard.isActive()); closeShards(shard); @@ -2135,8 +2134,8 @@ public void testShardActiveDuringPeerRecovery() throws IOException { new RecoveryTarget(shard, discoveryNode, recoveryListener, aLong -> { }) { @Override - public void prepareForTranslogOperations(int totalTranslogOps) throws IOException { - super.prepareForTranslogOperations(totalTranslogOps); + public void prepareForTranslogOperations(boolean createNewTranslog, int totalTranslogOps) throws IOException { + super.prepareForTranslogOperations(createNewTranslog, totalTranslogOps); // Shard is still inactive since we haven't started recovering yet assertFalse(replica.isActive()); @@ -2184,8 +2183,8 @@ public void testRefreshListenersDuringPeerRecovery() throws IOException { }) { // we're only checking that listeners are called when the engine is open, before there is no point @Override - public void prepareForTranslogOperations(int totalTranslogOps) throws IOException { - super.prepareForTranslogOperations(totalTranslogOps); + public void prepareForTranslogOperations(boolean createNewTranslog, int totalTranslogOps) throws IOException { + super.prepareForTranslogOperations(createNewTranslog, totalTranslogOps); assertListenerCalled.accept(replica); } diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java index f691cfd0238d4..31521e33f21b6 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java @@ -19,103 +19,63 @@ package org.elasticsearch.indices.recovery; -import org.elasticsearch.action.admin.indices.flush.FlushRequest; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.VersionType; -import org.elasticsearch.index.mapper.SourceToParse; -import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardTestCase; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.index.translog.TranslogConfig; -import org.elasticsearch.index.translog.TranslogWriter; - -import java.io.IOException; -import java.nio.channels.FileChannel; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.concurrent.atomic.AtomicReference; import static org.hamcrest.Matchers.equalTo; public class PeerRecoveryTargetServiceTests extends IndexShardTestCase { public void testGetStartingSeqNo() throws Exception { - IndexShard replica = newShard(false); - final AtomicReference translogLocation = new AtomicReference<>(); - RecoveryTarget recoveryTarget = new RecoveryTarget(replica, null, null, null) { - @Override - Path translogLocation() { - return translogLocation.get(); - } - }; + final IndexShard replica = newShard(false); try { - recoveryEmptyReplica(replica); - int docs = randomIntBetween(1, 10); - final String index = replica.shardId().getIndexName(); - long seqNo = 0; - for (int i = 0; i < docs; i++) { - replica.applyIndexOperationOnReplica(seqNo++, 1, VersionType.EXTERNAL, - IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, - SourceToParse.source(index, "type", "doc_" + i, new BytesArray("{}"), XContentType.JSON), - update -> {}); - if (rarely()) { - // insert a gap - seqNo++; + // Empty store + { + recoveryEmptyReplica(replica); + final RecoveryTarget recoveryTarget = new RecoveryTarget(replica, null, null, null); + assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(0L)); + recoveryTarget.decRef(); + } + // Last commit is good - use it. + final long initDocs = scaledRandomIntBetween(1, 10); + { + for (int i = 0; i < initDocs; i++) { + indexDoc(replica, "doc", Integer.toString(i)); + if (randomBoolean()) { + flushShard(replica); + } } + flushShard(replica); + replica.updateGlobalCheckpointOnReplica(initDocs - 1, "test"); + replica.getTranslog().sync(); + final RecoveryTarget recoveryTarget = new RecoveryTarget(replica, null, null, null); + assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(initDocs)); + recoveryTarget.decRef(); + } + // Global checkpoint does not advance, last commit is not good - use the previous commit + final int moreDocs = randomIntBetween(1, 10); + { + for (int i = 0; i < moreDocs; i++) { + indexDoc(replica, "doc", Long.toString(i)); + if (randomBoolean()) { + flushShard(replica); + } + } + flushShard(replica); + final RecoveryTarget recoveryTarget = new RecoveryTarget(replica, null, null, null); + assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(initDocs)); + recoveryTarget.decRef(); + } + // Advances the global checkpoint, a safe commit also advances + { + replica.updateGlobalCheckpointOnReplica(initDocs + moreDocs - 1, "test"); + replica.getTranslog().sync(); + final RecoveryTarget recoveryTarget = new RecoveryTarget(replica, null, null, null); + assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(initDocs + moreDocs)); + recoveryTarget.decRef(); } - - final long maxSeqNo = replica.seqNoStats().getMaxSeqNo(); - final long localCheckpoint = replica.getLocalCheckpoint(); - - translogLocation.set(replica.getTranslog().location()); - - final Translog translog = replica.getTranslog(); - final String translogUUID = translog.getTranslogUUID(); - assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(0L)); - - translogLocation.set(writeTranslog(replica.shardId(), translogUUID, translog.currentFileGeneration(), maxSeqNo - 1)); - - // commit is good, global checkpoint is at least max *committed* which is NO_OPS_PERFORMED - assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(0L)); - - replica.flush(new FlushRequest()); - - translogLocation.set(replica.getTranslog().location()); - - // commit is not good, global checkpoint is below max - assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(SequenceNumbers.UNASSIGNED_SEQ_NO)); - - translogLocation.set(writeTranslog(replica.shardId(), translogUUID, translog.currentFileGeneration(), maxSeqNo)); - - // commit is good, global checkpoint is above max - assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(localCheckpoint + 1)); } finally { closeShards(replica); - recoveryTarget.decRef(); } } - - private Path writeTranslog( - final ShardId shardId, - final String translogUUID, - final long generation, - final long globalCheckpoint - ) throws IOException { - final Path tempDir = createTempDir(); - final Path resolve = tempDir.resolve(Translog.getFilename(generation)); - Files.createFile(tempDir.resolve(Translog.CHECKPOINT_FILE_NAME)); - try (TranslogWriter ignored = TranslogWriter.create( - shardId, - translogUUID, - generation, - resolve, - FileChannel::open, - TranslogConfig.DEFAULT_BUFFER_SIZE, generation, globalCheckpoint, () -> globalCheckpoint, () -> generation)) {} - return tempDir; - } - } diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index 4963c1b74a53f..7ab6925ce57b9 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -423,7 +423,7 @@ public void phase1(final IndexCommit snapshot, final Supplier translogO } @Override - void prepareTargetForTranslog(final int totalTranslogOps) throws IOException { + void prepareTargetForTranslog(final boolean createNewTranslog, final int totalTranslogOps) throws IOException { prepareTargetForTranslogCalled.set(true); } diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java index 85dc3a5fc3906..2089c36d06bc0 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java @@ -41,6 +41,7 @@ import org.elasticsearch.index.replication.RecoveryDuringReplicationTests; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.translog.SnapshotMatchers; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogConfig; @@ -271,4 +272,38 @@ public void testPeerRecoverySendSafeCommitInFileBased() throws Exception { assertThat(maxSeqNo, lessThanOrEqualTo(globalCheckpoint)); closeShards(primaryShard, replicaShard); } + + public void testSequenceBasedRecoveryKeepsTranslog() throws Exception { + try (ReplicationGroup shards = createGroup(1)) { + shards.startAll(); + final IndexShard replica = shards.getReplicas().get(0); + final int initDocs = scaledRandomIntBetween(0, 20); + int uncommittedDocs = 0; + for (int i = 0; i < initDocs; i++) { + shards.indexDocs(1); + uncommittedDocs++; + if (randomBoolean()) { + shards.syncGlobalCheckpoint(); + shards.flush(); + uncommittedDocs = 0; + } + } + shards.removeReplica(replica); + final int moreDocs = shards.indexDocs(scaledRandomIntBetween(0, 20)); + if (randomBoolean()) { + shards.flush(); + } + replica.close("test", randomBoolean()); + replica.store().close(); + final IndexShard newReplica = shards.addReplicaWithExistingPath(replica.shardPath(), replica.routingEntry().currentNodeId()); + shards.recoverReplica(newReplica); + + try (Translog.Snapshot snapshot = newReplica.getTranslog().newSnapshot()) { + assertThat("Sequence based recovery should keep existing translog", snapshot, SnapshotMatchers.size(initDocs + moreDocs)); + } + assertThat(newReplica.recoveryState().getTranslog().recoveredOperations(), equalTo(uncommittedDocs + moreDocs)); + assertThat(newReplica.recoveryState().getIndex().fileDetails(), empty()); + } + } + } From f28d3c104356c77587f697ed903dfa9d6cef76fb Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Sat, 13 Jan 2018 21:59:26 -0500 Subject: [PATCH 07/31] TEST: Tightens file-based condition in peer-recovery As a replica always keeps a safe commit and starts peer-recovery with that commit; file-based recovery only happens if new operations are added to the primary and the required translog is not fully retained. In the test, we tried to produce this condition by flushing a new commit in order to trim all translog. However, if the new global checkpoint is not persisted yet, we will keep two commits and not trim translog. This commit tightens the file-based condition in the test by waiting for the global checkpoint persisted properly on the new primary before flushing. Close #28209 Relates #28181 --- .../index/replication/RecoveryDuringReplicationTests.java | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index 77576426252d9..aa97c2049915f 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -215,7 +215,6 @@ public void testRecoveryToReplicaThatReceivedExtraDocument() throws Exception { } @TestLogging("org.elasticsearch.index.shard:TRACE,org.elasticsearch.indices.recovery:TRACE") - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/28209") public void testRecoveryAfterPrimaryPromotion() throws Exception { try (ReplicationGroup shards = createGroup(2)) { shards.startAll(); @@ -268,7 +267,12 @@ public void testRecoveryAfterPrimaryPromotion() throws Exception { ); newPrimary.indexSettings().updateIndexMetaData(builder.build()); newPrimary.onSettingsChanged(); - shards.syncGlobalCheckpoint(); + // Make sure the global checkpoint on the new primary is persisted properly, + // otherwise the deletion policy won't trim translog + assertBusy(() -> { + shards.syncGlobalCheckpoint(); + assertThat(newPrimary.getTranslog().getLastSyncedGlobalCheckpoint(), equalTo(newPrimary.seqNoStats().getMaxSeqNo())); + }); newPrimary.flush(new FlushRequest()); uncommittedOpsOnPrimary = shards.indexDocs(randomIntBetween(0, 10)); totalDocs += uncommittedOpsOnPrimary; From 8eb2e632103bba3760cbfb7512cf8ec357aeba00 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Fri, 12 Jan 2018 15:34:17 -0500 Subject: [PATCH 08/31] Add ability to associate an ID with tasks (#27764) Adds support for capturing the X-Opaque-Id header from a REST request and storing it's value in the tasks that this request started. It works for all user-initiated tasks (not only search). Closes #23250 Usage: ``` $ curl -H "X-Opaque-Id: imotov" -H "foo:bar" "localhost:9200/_tasks?pretty&group_by=parents" { "tasks" : { "7qrTVbiDQKiZfubUP7DPkg:6998" : { "node" : "7qrTVbiDQKiZfubUP7DPkg", "id" : 6998, "type" : "transport", "action" : "cluster:monitor/tasks/lists", "start_time_in_millis" : 1513029940042, "running_time_in_nanos" : 266794, "cancellable" : false, "headers" : { "X-Opaque-Id" : "imotov" }, "children" : [ { "node" : "V-PuCjPhRp2ryuEsNw6V1g", "id" : 6088, "type" : "netty", "action" : "cluster:monitor/tasks/lists[n]", "start_time_in_millis" : 1513029940043, "running_time_in_nanos" : 67785, "cancellable" : false, "parent_task_id" : "7qrTVbiDQKiZfubUP7DPkg:6998", "headers" : { "X-Opaque-Id" : "imotov" } }, { "node" : "7qrTVbiDQKiZfubUP7DPkg", "id" : 6999, "type" : "direct", "action" : "cluster:monitor/tasks/lists[n]", "start_time_in_millis" : 1513029940043, "running_time_in_nanos" : 98754, "cancellable" : false, "parent_task_id" : "7qrTVbiDQKiZfubUP7DPkg:6998", "headers" : { "X-Opaque-Id" : "imotov" } } ] } } } ``` --- docs/reference/cluster/tasks.asciidoc | 68 +++++++++++++++++++ .../reindex/AsyncBulkByScrollActionTests.java | 8 ++- .../TransportRethrottleActionTests.java | 9 ++- .../netty4/SimpleNetty4TransportTests.java | 2 +- .../test/old_cluster/10_basic.yml | 41 +++++++++++ .../test/upgraded_cluster/10_basic.yml | 39 +++++++++++ .../rest-api-spec/api/tasks.list.json | 2 +- .../test/tasks.list/10_basic.yml | 16 +++++ .../elasticsearch/action/ActionModule.java | 7 +- .../cancel/TransportCancelTasksAction.java | 2 +- .../node/tasks/list/ListTasksResponse.java | 15 ++++ .../action/index/IndexRequest.java | 2 +- .../action/search/SearchRequest.java | 5 +- .../action/search/SearchScrollRequest.java | 5 +- .../action/search/SearchTask.java | 6 +- .../replication/ReplicationRequest.java | 5 +- .../support/replication/ReplicationTask.java | 5 +- .../TransportReplicationAction.java | 5 +- .../client/transport/TransportClient.java | 2 +- .../reindex/AbstractBulkByScrollRequest.java | 5 +- .../index/reindex/BulkByScrollTask.java | 6 +- .../index/shard/PrimaryReplicaSyncer.java | 9 +-- .../java/org/elasticsearch/node/Node.java | 11 ++- .../elasticsearch/plugins/ActionPlugin.java | 7 ++ .../admin/cluster/RestListTasksAction.java | 13 +++- .../search/fetch/ShardFetchRequest.java | 5 +- .../internal/InternalScrollSearchRequest.java | 5 +- .../internal/ShardSearchTransportRequest.java | 5 +- .../search/query/QuerySearchRequest.java | 5 +- .../elasticsearch/tasks/CancellableTask.java | 5 +- .../java/org/elasticsearch/tasks/Task.java | 21 ++++-- .../elasticsearch/tasks/TaskAwareRequest.java | 6 +- .../org/elasticsearch/tasks/TaskInfo.java | 43 ++++++++++-- .../org/elasticsearch/tasks/TaskManager.java | 34 +++++++++- .../tasks/TaskResultsService.java | 19 +++++- .../transport/TransportService.java | 10 +-- .../tasks/task-index-mapping.json | 7 ++ .../node/tasks/CancellableTasksTests.java | 9 +-- .../node/tasks/TaskManagerTestCase.java | 15 ++-- .../admin/cluster/node/tasks/TaskTests.java | 5 +- .../admin/cluster/node/tasks/TasksIT.java | 61 ++++++++++++----- .../cluster/node/tasks/TestTaskPlugin.java | 19 ++++-- .../node/tasks/TransportTasksActionTests.java | 8 +-- .../action/bulk/TransportBulkActionTests.java | 2 +- .../bulk/TransportBulkActionTookTests.java | 3 +- .../action/main/MainActionTests.java | 3 +- .../action/search/MockSearchPhaseContext.java | 2 +- .../TransportMultiSearchActionTests.java | 25 ++++++- .../TransportActionFilterChainTests.java | 18 ++++- .../TransportBroadcastByNodeActionTests.java | 3 +- .../TransportMasterNodeActionTests.java | 2 +- .../nodes/TransportNodesActionTests.java | 2 +- .../BroadcastReplicationTests.java | 2 +- .../TransportReplicationActionTests.java | 6 +- .../TransportWriteActionTests.java | 6 +- ...ortInstanceSingleOperationActionTests.java | 4 +- .../client/node/NodeClientHeadersTests.java | 2 +- .../TransportClientNodesServiceTests.java | 2 +- .../cluster/NodeConnectionsServiceTests.java | 3 +- .../action/shard/ShardStateActionTests.java | 3 +- .../health/ClusterStateHealthTests.java | 3 +- .../discovery/ZenFaultDetectionTests.java | 2 +- .../discovery/zen/UnicastZenPingTests.java | 18 +++-- .../LeaderBulkByScrollTaskStateTests.java | 3 +- .../WorkerBulkByScrollTaskStateTests.java | 3 +- .../ESIndexLevelReplicationTestCase.java | 3 +- .../GlobalCheckpointSyncActionTests.java | 2 +- .../shard/PrimaryReplicaSyncerTests.java | 11 +-- .../indices/cluster/ClusterStateChanges.java | 3 +- ...ClusterStateServiceRandomUpdatesTests.java | 3 +- .../search/SearchServiceTests.java | 4 +- .../search/query/QueryPhaseTests.java | 17 ++--- .../tasks/ListTasksResponseTests.java | 7 +- .../elasticsearch/tasks/TaskResultTests.java | 4 +- .../TransportServiceHandshakeTests.java | 2 +- ...stractAsyncBulkByScrollActionTestCase.java | 4 +- .../java/org/elasticsearch/node/MockNode.java | 7 +- .../test/tasks/MockTaskManager.java | 6 +- .../test/transport/MockTransportService.java | 19 +++--- .../AbstractSimpleTransportTestCase.java | 8 ++- .../transport/MockTcpTransportTests.java | 2 +- .../nio/SimpleNioTransportTests.java | 2 +- 82 files changed, 622 insertions(+), 176 deletions(-) diff --git a/docs/reference/cluster/tasks.asciidoc b/docs/reference/cluster/tasks.asciidoc index ed73290883d23..b3457953f46e5 100644 --- a/docs/reference/cluster/tasks.asciidoc +++ b/docs/reference/cluster/tasks.asciidoc @@ -195,3 +195,71 @@ The following command will change the grouping to parent tasks: GET _tasks?group_by=parents -------------------------------------------------- // CONSOLE + +The grouping can be disabled by specifying `none` as a `group_by` parameter: + +[source,js] +-------------------------------------------------- +GET _tasks?group_by=none +-------------------------------------------------- +// CONSOLE + +[float] +=== Identifying running tasks + +The `X-Opaque-Id` header, when provided on the HTTP request header, is going to be returned as a header in the response as well as +in the `headers` field for in the task information. This allows to track certain calls, or associate certain tasks with +a the client that started them: + +[source,sh] +-------------------------------------------------- +curl -i -H "X-Opaque-Id: 123456" "http://localhost:9200/_tasks?group_by=parents" +-------------------------------------------------- +// NOTCONSOLE + +The result will look similar to the following: + +[source,js] +-------------------------------------------------- +HTTP/1.1 200 OK +X-Opaque-Id: 123456 <1> +content-type: application/json; charset=UTF-8 +content-length: 831 + +{ + "tasks" : { + "u5lcZHqcQhu-rUoFaqDphA:45" : { + "node" : "u5lcZHqcQhu-rUoFaqDphA", + "id" : 45, + "type" : "transport", + "action" : "cluster:monitor/tasks/lists", + "start_time_in_millis" : 1513823752749, + "running_time_in_nanos" : 293139, + "cancellable" : false, + "headers" : { + "X-Opaque-Id" : "123456" <2> + }, + "children" : [ + { + "node" : "u5lcZHqcQhu-rUoFaqDphA", + "id" : 46, + "type" : "direct", + "action" : "cluster:monitor/tasks/lists[n]", + "start_time_in_millis" : 1513823752750, + "running_time_in_nanos" : 92133, + "cancellable" : false, + "parent_task_id" : "u5lcZHqcQhu-rUoFaqDphA:45", + "headers" : { + "X-Opaque-Id" : "123456" <3> + } + } + ] + } + } +} +-------------------------------------------------- +// NOTCONSOLE + +<1> id as a part of the response header +<2> id for the tasks that was initiated by the REST request +<3> the child task of the task initiated by the REST request diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java index a13bdea0ef2f4..db259de411165 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java @@ -81,6 +81,7 @@ import org.junit.Before; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.IdentityHashMap; import java.util.Iterator; @@ -123,6 +124,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { private SearchRequest firstSearchRequest; private PlainActionFuture listener; private String scrollId; + private ThreadPool threadPool; private TaskManager taskManager; private BulkByScrollTask testTask; private WorkerBulkByScrollTaskState worker; @@ -141,7 +143,8 @@ public void setupForTest() { testRequest = new DummyAbstractBulkByScrollRequest(firstSearchRequest); listener = new PlainActionFuture<>(); scrollId = null; - taskManager = new TaskManager(Settings.EMPTY); + threadPool = new TestThreadPool(getClass().getName()); + taskManager = new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet()); testTask = (BulkByScrollTask) taskManager.register("don'tcare", "hereeither", testRequest); testTask.setWorker(testRequest.getRequestsPerSecond(), null); worker = testTask.getWorkerState(); @@ -159,8 +162,9 @@ private void setupClient(ThreadPool threadPool) { } @After - public void tearDownAndVerifyCommonStuff() { + public void tearDownAndVerifyCommonStuff() throws Exception { client.close(); + terminate(threadPool); } /** diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/TransportRethrottleActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/TransportRethrottleActionTests.java index 62a2c34ea582c..3c2f5194fceda 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/TransportRethrottleActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/TransportRethrottleActionTests.java @@ -32,6 +32,7 @@ import org.mockito.ArgumentCaptor; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.function.Consumer; @@ -53,7 +54,7 @@ public class TransportRethrottleActionTests extends ESTestCase { @Before public void createTask() { slices = between(2, 50); - task = new BulkByScrollTask(1, "test_type", "test_action", "test", TaskId.EMPTY_TASK_ID); + task = new BulkByScrollTask(1, "test_type", "test_action", "test", TaskId.EMPTY_TASK_ID, Collections.emptyMap()); task.setWorkerCount(slices); } @@ -101,7 +102,8 @@ public void testRethrottleSuccessfulResponse() { List sliceStatuses = new ArrayList<>(slices); for (int i = 0; i < slices; i++) { BulkByScrollTask.Status status = believeableInProgressStatus(i); - tasks.add(new TaskInfo(new TaskId("test", 123), "test", "test", "test", status, 0, 0, true, new TaskId("test", task.getId()))); + tasks.add(new TaskInfo(new TaskId("test", 123), "test", "test", "test", status, 0, 0, true, new TaskId("test", task.getId()), + Collections.emptyMap())); sliceStatuses.add(new BulkByScrollTask.StatusOrException(status)); } rethrottleTestCase(slices, @@ -121,7 +123,8 @@ public void testRethrottleWithSomeSucceeded() { List tasks = new ArrayList<>(); for (int i = succeeded; i < slices; i++) { BulkByScrollTask.Status status = believeableInProgressStatus(i); - tasks.add(new TaskInfo(new TaskId("test", 123), "test", "test", "test", status, 0, 0, true, new TaskId("test", task.getId()))); + tasks.add(new TaskInfo(new TaskId("test", 123), "test", "test", "test", status, 0, 0, true, new TaskId("test", task.getId()), + Collections.emptyMap())); sliceStatuses.add(new BulkByScrollTask.StatusOrException(status)); } rethrottleTestCase(slices - succeeded, diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java index b2126b1b61185..efa296b6278af 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java @@ -73,7 +73,7 @@ protected Version getCurrentVersion() { } }; MockTransportService mockTransportService = - MockTransportService.createNewService(Settings.EMPTY, transport, version, threadPool, clusterSettings); + MockTransportService.createNewService(Settings.EMPTY, transport, version, threadPool, clusterSettings, Collections.emptySet()); mockTransportService.start(); return mockTransportService; } diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml index d711831b30a2d..de9ad887326ec 100644 --- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml @@ -356,3 +356,44 @@ field3: value - match: { hits.total: 1 } - match: { hits.hits.0._id: q3 } + +--- +"Create a task result record in the old cluster": + - do: + indices.create: + index: reindexed_index + body: + settings: + index: + number_of_replicas: 0 + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "reindexed_index", "_type": "doc"}}' + - '{"f1": "1"}' + - '{"index": {"_index": "reindexed_index", "_type": "doc"}}' + - '{"f1": "2"}' + - '{"index": {"_index": "reindexed_index", "_type": "doc"}}' + - '{"f1": "3"}' + - '{"index": {"_index": "reindexed_index", "_type": "doc"}}' + - '{"f1": "4"}' + - '{"index": {"_index": "reindexed_index", "_type": "doc"}}' + - '{"f1": "5"}' + + - do: + reindex: + wait_for_completion: false + body: + source: + index: reindexed_index + size: 1 + dest: + index: reindexed_index_copy + - match: {task: '/.+:\d+/'} + - set: {task: task} + + - do: + tasks.get: + wait_for_completion: true + task_id: $task diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml index 452fe443537ad..2a21bf738dc12 100644 --- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml @@ -172,3 +172,42 @@ field3: value - match: { hits.total: 1 } - match: { hits.hits.0._id: q3 } + +--- +"Find a task result record from the old cluster": + - do: + search: + index: .tasks + body: + query: + match_all: {} + - match: { hits.total: 1 } + - match: { hits.hits.0._id: '/.+:\d+/' } + - set: {hits.hits.0._id: task_id} + + - do: + tasks.get: + wait_for_completion: true + task_id: $task_id + + - is_false: node_failures + - is_true: task + + - do: + headers: { "X-Opaque-Id": "Reindexing Again" } + reindex: + wait_for_completion: false + body: + source: + index: reindexed_index_copy + size: 1 + dest: + index: reindexed_index_another_copy + - match: { task: '/.+:\d+/' } + - set: { task: task_id } + + - do: + tasks.get: + wait_for_completion: true + task_id: $task_id + - match: { task.headers.X-Opaque-Id: "Reindexing Again" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.list.json b/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.list.json index fbe355ee164b0..1110c3c111b99 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.list.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.list.json @@ -34,7 +34,7 @@ "group_by": { "type" : "enum", "description": "Group tasks by nodes or parent/child relationships", - "options" : ["nodes", "parents"], + "options" : ["nodes", "parents", "none"], "default" : "nodes" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.list/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.list/10_basic.yml index dd1c415876fa7..57bf5b629b76a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.list/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.list/10_basic.yml @@ -17,3 +17,19 @@ group_by: parents - is_true: tasks + +--- +"tasks_list headers": + - skip: + version: " - 6.99.99" + reason: task headers has been added in 7.0.0 + + - do: + headers: { "X-Opaque-Id": "That is me" } + tasks.list: + actions: "cluster:monitor/tasks/lists" + group_by: none + + - is_true: tasks + - match: { tasks.0.headers.X-Opaque-Id: "That is me" } + diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 28fd3458b902a..872c217f98091 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -312,6 +312,7 @@ import org.elasticsearch.rest.action.search.RestMultiSearchAction; import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.rest.action.search.RestSearchScrollAction; +import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.usage.UsageService; @@ -324,6 +325,7 @@ import java.util.function.Supplier; import java.util.function.UnaryOperator; import java.util.stream.Collectors; +import java.util.stream.Stream; import static java.util.Collections.unmodifiableMap; @@ -362,7 +364,10 @@ public ActionModule(boolean transportClient, Settings settings, IndexNameExpress actionFilters = setupActionFilters(actionPlugins); autoCreateIndex = transportClient ? null : new AutoCreateIndex(settings, clusterSettings, indexNameExpressionResolver); destructiveOperations = new DestructiveOperations(settings, clusterSettings); - Set headers = actionPlugins.stream().flatMap(p -> p.getRestHeaders().stream()).collect(Collectors.toSet()); + Set headers = Stream.concat( + actionPlugins.stream().flatMap(p -> p.getRestHeaders().stream()), + Stream.of("X-Opaque-Id") + ).collect(Collectors.toSet()); UnaryOperator restWrapper = null; for (ActionPlugin plugin : actionPlugins) { UnaryOperator newRestWrapper = plugin.getRestHandlerWrapper(threadPool.getThreadContext()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java index aca1be7adff4c..0bd1ff2945bd7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java @@ -56,7 +56,7 @@ * Transport action that can be used to cancel currently running cancellable tasks. *

* For a task to be cancellable it has to return an instance of - * {@link CancellableTask} from {@link TransportRequest#createTask(long, String, String, TaskId)} + * {@link CancellableTask} from {@link TransportRequest#createTask} */ public class TransportCancelTasksAction extends TransportTasksAction { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java index de5fcf9345d23..88d8ff4679917 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java @@ -186,6 +186,21 @@ public XContentBuilder toXContentGroupedByParents(XContentBuilder builder, Param return builder; } + /** + * Presents a flat list of tasks + */ + public XContentBuilder toXContentGroupedByNone(XContentBuilder builder, Params params) throws IOException { + toXContentCommon(builder, params); + builder.startArray("tasks"); + for (TaskInfo taskInfo : getTasks()) { + builder.startObject(); + taskInfo.toXContent(builder, params); + builder.endObject(); + } + builder.endArray(); + return builder; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java index 59a0b5e198087..46d51ee0b40e4 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -77,7 +77,7 @@ public class IndexRequest extends ReplicatedWriteRequest implement /** * Max length of the source document to include into toString() * - * @see ReplicationRequest#createTask(long, java.lang.String, java.lang.String, org.elasticsearch.tasks.TaskId) + * @see ReplicationRequest#createTask */ static final int MAX_SOURCE_LENGTH_IN_TOSTRING = 2048; diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index e658f7f69c456..9be4b9b4fb75e 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -40,6 +40,7 @@ import java.io.IOException; import java.util.Arrays; import java.util.Collections; +import java.util.Map; import java.util.Objects; import static org.elasticsearch.action.ValidateActions.addValidationError; @@ -382,9 +383,9 @@ public boolean isSuggestOnly() { } @Override - public Task createTask(long id, String type, String action, TaskId parentTaskId) { + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { // generating description in a lazy way since source can be quite big - return new SearchTask(id, type, action, null, parentTaskId) { + return new SearchTask(id, type, action, null, parentTaskId, headers) { @Override public String getDescription() { StringBuilder sb = new StringBuilder(); diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequest.java index fbe648cceaa80..1dc5c8e56798d 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequest.java @@ -32,6 +32,7 @@ import org.elasticsearch.tasks.TaskId; import java.io.IOException; +import java.util.Map; import java.util.Objects; import static org.elasticsearch.action.ValidateActions.addValidationError; @@ -113,8 +114,8 @@ public void writeTo(StreamOutput out) throws IOException { } @Override - public Task createTask(long id, String type, String action, TaskId parentTaskId) { - return new SearchTask(id, type, action, getDescription(), parentTaskId); + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new SearchTask(id, type, action, getDescription(), parentTaskId, headers); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTask.java b/server/src/main/java/org/elasticsearch/action/search/SearchTask.java index d0a1cdd456f47..699448909a2b5 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTask.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTask.java @@ -22,13 +22,15 @@ import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.TaskId; +import java.util.Map; + /** * Task storing information about a currently running search request. */ public class SearchTask extends CancellableTask { - public SearchTask(long id, String type, String action, String description, TaskId parentTaskId) { - super(id, type, action, description, parentTaskId); + public SearchTask(long id, String type, String action, String description, TaskId parentTaskId, Map headers) { + super(id, type, action, description, parentTaskId, headers); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java index 07822bebaa9ac..81584a7bb6467 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java @@ -35,6 +35,7 @@ import org.elasticsearch.tasks.TaskId; import java.io.IOException; +import java.util.Map; import java.util.concurrent.TimeUnit; import static org.elasticsearch.action.ValidateActions.addValidationError; @@ -207,8 +208,8 @@ public void writeTo(StreamOutput out) throws IOException { } @Override - public Task createTask(long id, String type, String action, TaskId parentTaskId) { - return new ReplicationTask(id, type, action, getDescription(), parentTaskId); + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new ReplicationTask(id, type, action, getDescription(), parentTaskId, headers); } /** diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationTask.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationTask.java index 2e0baa057b223..1cf8b8bf0ff68 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationTask.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationTask.java @@ -27,6 +27,7 @@ import org.elasticsearch.tasks.TaskId; import java.io.IOException; +import java.util.Map; import static java.util.Objects.requireNonNull; @@ -36,8 +37,8 @@ public class ReplicationTask extends Task { private volatile String phase = "starting"; - public ReplicationTask(long id, String type, String action, String description, TaskId parentTaskId) { - super(id, type, action, description, parentTaskId); + public ReplicationTask(long id, String type, String action, String description, TaskId parentTaskId, Map headers) { + super(id, type, action, description, parentTaskId, headers); } /** diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index a63d14d7f9d12..1a57b6a5d9500 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -81,6 +81,7 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; +import java.util.Map; import java.util.Objects; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; @@ -1228,8 +1229,8 @@ public TaskId getParentTask() { return request.getParentTask(); } @Override - public Task createTask(long id, String type, String action, TaskId parentTaskId) { - return request.createTask(id, type, action, parentTaskId); + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return request.createTask(id, type, action, parentTaskId, headers); } @Override diff --git a/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java b/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java index 6a93074172e19..474374af94558 100644 --- a/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java +++ b/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java @@ -179,7 +179,7 @@ private static ClientTemplate buildTemplate(Settings providedSettings, Settings final TransportService transportService = new TransportService(settings, transport, threadPool, networkModule.getTransportInterceptor(), boundTransportAddress -> DiscoveryNode.createLocal(settings, new TransportAddress(TransportAddress.META_ADDRESS, 0), - UUIDs.randomBase64UUID()), null); + UUIDs.randomBase64UUID()), null, Collections.emptySet()); modules.add((b -> { b.bind(BigArrays.class).toInstance(bigArrays); b.bind(PluginsService.class).toInstance(pluginsService); diff --git a/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java b/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java index 7af59e8ce3c84..c45e0c62d5c39 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java @@ -35,6 +35,7 @@ import java.io.IOException; import java.util.Arrays; +import java.util.Map; import static org.elasticsearch.action.ValidateActions.addValidationError; import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; @@ -408,8 +409,8 @@ protected Self doForSlice(Self request, TaskId slicingTask, int totalSlices) { } @Override - public Task createTask(long id, String type, String action, TaskId parentTaskId) { - return new BulkByScrollTask(id, type, action, getDescription(), parentTaskId); + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new BulkByScrollTask(id, type, action, getDescription(), parentTaskId, headers); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java b/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java index d5e656489558c..276484b055253 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.ToXContent.Params; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.tasks.CancellableTask; @@ -38,6 +37,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.Objects; import static java.lang.Math.min; @@ -62,8 +62,8 @@ public class BulkByScrollTask extends CancellableTask { private volatile LeaderBulkByScrollTaskState leaderState; private volatile WorkerBulkByScrollTaskState workerState; - public BulkByScrollTask(long id, String type, String action, String description, TaskId parentTaskId) { - super(id, type, action, description, parentTaskId); + public BulkByScrollTask(long id, String type, String action, String description, TaskId parentTaskId, Map headers) { + super(id, type, action, description, parentTaskId, headers); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java b/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java index b1bd1c5b3138e..1e31eae7d417f 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java +++ b/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java @@ -45,6 +45,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -271,8 +272,8 @@ public ResyncRequest(ShardId shardId, String allocationId) { } @Override - public Task createTask(long id, String type, String action, TaskId parentTaskId) { - return new ResyncTask(id, type, action, getDescription(), parentTaskId); + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new ResyncTask(id, type, action, getDescription(), parentTaskId, headers); } @Override @@ -297,8 +298,8 @@ public static class ResyncTask extends Task { private volatile int resyncedOperations; private volatile int skippedOperations; - public ResyncTask(long id, String type, String action, String description, TaskId parentTaskId) { - super(id, type, action, description, parentTaskId); + public ResyncTask(long id, String type, String action, String description, TaskId parentTaskId, Map headers) { + super(id, type, action, description, parentTaskId, headers); } /** diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index df29b93f35e44..02d6c205831fa 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -156,6 +156,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; @@ -424,8 +425,12 @@ protected Node(final Environment environment, Collection metaDataIndexUpgradeService, metaDataUpgrader); new TemplateUpgradeService(settings, client, clusterService, threadPool, indexTemplateMetaDataUpgraders); final Transport transport = networkModule.getTransportSupplier().get(); + Set taskHeaders = Stream.concat( + pluginsService.filterPlugins(ActionPlugin.class).stream().flatMap(p -> p.getTaskHeaders().stream()), + Stream.of("X-Opaque-Id") + ).collect(Collectors.toSet()); final TransportService transportService = newTransportService(settings, transport, threadPool, - networkModule.getTransportInterceptor(), localNodeFactory, settingsModule.getClusterSettings()); + networkModule.getTransportInterceptor(), localNodeFactory, settingsModule.getClusterSettings(), taskHeaders); final ResponseCollectorService responseCollectorService = new ResponseCollectorService(this.settings, clusterService); final SearchTransportService searchTransportService = new SearchTransportService(settings, transportService, SearchExecutionStatsCollector.makeWrapper(responseCollectorService)); @@ -543,8 +548,8 @@ static void warnIfPreRelease(final Version version, final boolean isSnapshot, fi protected TransportService newTransportService(Settings settings, Transport transport, ThreadPool threadPool, TransportInterceptor interceptor, Function localNodeFactory, - ClusterSettings clusterSettings) { - return new TransportService(settings, transport, threadPool, interceptor, localNodeFactory, clusterSettings); + ClusterSettings clusterSettings, Set taskHeaders) { + return new TransportService(settings, transport, threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders); } protected void processRecoverySettings(ClusterSettings clusterSettings, RecoverySettings recoverySettings) { diff --git a/server/src/main/java/org/elasticsearch/plugins/ActionPlugin.java b/server/src/main/java/org/elasticsearch/plugins/ActionPlugin.java index 377da56f6018b..41f0ed86116ad 100644 --- a/server/src/main/java/org/elasticsearch/plugins/ActionPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/ActionPlugin.java @@ -84,6 +84,13 @@ default Collection getRestHeaders() { return Collections.emptyList(); } + /** + * Returns headers which should be copied from internal requests into tasks. + */ + default Collection getTaskHeaders() { + return Collections.emptyList(); + } + /** * Returns a function used to wrap each rest request before handling the request. * diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestListTasksAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestListTasksAction.java index 6ef5d5a2de2bf..8e6447e0e4980 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestListTasksAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestListTasksAction.java @@ -103,10 +103,21 @@ public RestResponse buildResponse(T response, XContentBuilder builder) throws Ex return new BytesRestResponse(RestStatus.OK, builder); } }; + } else if ("none".equals(groupBy)) { + return new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(T response, XContentBuilder builder) throws Exception { + builder.startObject(); + response.toXContentGroupedByNone(builder, channel.request()); + builder.endObject(); + return new BytesRestResponse(RestStatus.OK, builder); + } + }; + } else if ("parents".equals(groupBy)) { return new RestToXContentListener<>(channel); } else { - throw new IllegalArgumentException("[group_by] must be one of [nodes] or [parents] but was [" + groupBy + "]"); + throw new IllegalArgumentException("[group_by] must be one of [nodes], [parents] or [none] but was [" + groupBy + "]"); } } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/ShardFetchRequest.java b/server/src/main/java/org/elasticsearch/search/fetch/ShardFetchRequest.java index dcea42e5ecb7f..7967650797aed 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/ShardFetchRequest.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/ShardFetchRequest.java @@ -31,6 +31,7 @@ import org.elasticsearch.transport.TransportRequest; import java.io.IOException; +import java.util.Map; /** * Shard level fetch base request. Holds all the info needed to execute a fetch. @@ -111,8 +112,8 @@ public void writeTo(StreamOutput out) throws IOException { } @Override - public Task createTask(long id, String type, String action, TaskId parentTaskId) { - return new SearchTask(id, type, action, getDescription(), parentTaskId); + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new SearchTask(id, type, action, getDescription(), parentTaskId, headers); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/internal/InternalScrollSearchRequest.java b/server/src/main/java/org/elasticsearch/search/internal/InternalScrollSearchRequest.java index f112c97dd0f63..bfd2364acc4e7 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/InternalScrollSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/search/internal/InternalScrollSearchRequest.java @@ -29,6 +29,7 @@ import org.elasticsearch.transport.TransportRequest; import java.io.IOException; +import java.util.Map; public class InternalScrollSearchRequest extends TransportRequest { @@ -72,8 +73,8 @@ public void writeTo(StreamOutput out) throws IOException { } @Override - public Task createTask(long id, String type, String action, TaskId parentTaskId) { - return new SearchTask(id, type, action, getDescription(), parentTaskId); + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new SearchTask(id, type, action, getDescription(), parentTaskId, headers); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java index 1c2ac0e4d179c..75d0f34bcd450 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java @@ -40,6 +40,7 @@ import org.elasticsearch.transport.TransportRequest; import java.io.IOException; +import java.util.Map; /** * Shard level search request that represents an actual search sent from the coordinating node to the nodes holding @@ -174,8 +175,8 @@ public boolean isProfile() { } @Override - public Task createTask(long id, String type, String action, TaskId parentTaskId) { - return new SearchTask(id, type, action, getDescription(), parentTaskId); + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new SearchTask(id, type, action, getDescription(), parentTaskId, headers); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/query/QuerySearchRequest.java b/server/src/main/java/org/elasticsearch/search/query/QuerySearchRequest.java index 86a9c70dc0be1..ed2ac5e6f6b8d 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QuerySearchRequest.java +++ b/server/src/main/java/org/elasticsearch/search/query/QuerySearchRequest.java @@ -32,6 +32,7 @@ import org.elasticsearch.transport.TransportRequest; import java.io.IOException; +import java.util.Map; import static org.elasticsearch.search.dfs.AggregatedDfs.readAggregatedDfs; @@ -87,8 +88,8 @@ public void writeTo(StreamOutput out) throws IOException { } @Override - public Task createTask(long id, String type, String action, TaskId parentTaskId) { - return new SearchTask(id, type, action, getDescription(), parentTaskId); + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new SearchTask(id, type, action, getDescription(), parentTaskId, headers); } public String getDescription() { diff --git a/server/src/main/java/org/elasticsearch/tasks/CancellableTask.java b/server/src/main/java/org/elasticsearch/tasks/CancellableTask.java index 685e9bcf35251..1d43076305ccd 100644 --- a/server/src/main/java/org/elasticsearch/tasks/CancellableTask.java +++ b/server/src/main/java/org/elasticsearch/tasks/CancellableTask.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.Nullable; +import java.util.Map; import java.util.concurrent.atomic.AtomicReference; /** @@ -30,8 +31,8 @@ public abstract class CancellableTask extends Task { private final AtomicReference reason = new AtomicReference<>(); - public CancellableTask(long id, String type, String action, String description, TaskId parentTaskId) { - super(id, type, action, description, parentTaskId); + public CancellableTask(long id, String type, String action, String description, TaskId parentTaskId, Map headers) { + super(id, type, action, description, parentTaskId, headers); } /** diff --git a/server/src/main/java/org/elasticsearch/tasks/Task.java b/server/src/main/java/org/elasticsearch/tasks/Task.java index e59970b84ee47..9fd9019cd213c 100644 --- a/server/src/main/java/org/elasticsearch/tasks/Task.java +++ b/server/src/main/java/org/elasticsearch/tasks/Task.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import java.io.IOException; +import java.util.Map; /** * Current task information @@ -43,6 +44,8 @@ public class Task { private final TaskId parentTask; + private final Map headers; + /** * The task's start time as a wall clock time since epoch ({@link System#currentTimeMillis()} style). */ @@ -53,11 +56,12 @@ public class Task { */ private final long startTimeNanos; - public Task(long id, String type, String action, String description, TaskId parentTask) { - this(id, type, action, description, parentTask, System.currentTimeMillis(), System.nanoTime()); + public Task(long id, String type, String action, String description, TaskId parentTask, Map headers) { + this(id, type, action, description, parentTask, System.currentTimeMillis(), System.nanoTime(), headers); } - public Task(long id, String type, String action, String description, TaskId parentTask, long startTime, long startTimeNanos) { + public Task(long id, String type, String action, String description, TaskId parentTask, long startTime, long startTimeNanos, + Map headers) { this.id = id; this.type = type; this.action = action; @@ -65,6 +69,7 @@ public Task(long id, String type, String action, String description, TaskId pare this.parentTask = parentTask; this.startTime = startTime; this.startTimeNanos = startTimeNanos; + this.headers = headers; } /** @@ -92,7 +97,7 @@ public final TaskInfo taskInfo(String localNodeId, boolean detailed) { */ protected final TaskInfo taskInfo(String localNodeId, String description, Status status) { return new TaskInfo(new TaskId(localNodeId, getId()), getType(), getAction(), description, status, startTime, - System.nanoTime() - startTimeNanos, this instanceof CancellableTask, parentTask); + System.nanoTime() - startTimeNanos, this instanceof CancellableTask, parentTask, headers); } /** @@ -149,6 +154,14 @@ public Status getStatus() { public interface Status extends ToXContentObject, NamedWriteable {} + + /** + * Returns stored task header associated with the task + */ + public String getHeader(String header) { + return headers.get(header); + } + public TaskResult result(DiscoveryNode node, Exception error) throws IOException { return new TaskResult(taskInfo(node.getId(), true), error); } diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskAwareRequest.java b/server/src/main/java/org/elasticsearch/tasks/TaskAwareRequest.java index a2364ac8e4047..86ba59ebcc804 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskAwareRequest.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskAwareRequest.java @@ -19,6 +19,8 @@ package org.elasticsearch.tasks; +import java.util.Map; + /** * An interface for a request that can be used to register a task manager task */ @@ -47,8 +49,8 @@ default void setParentTask(String parentTaskNode, long parentTaskId) { * A request can override this method and return null to avoid being tracked by the task * manager. */ - default Task createTask(long id, String type, String action, TaskId parentTaskId) { - return new Task(id, type, action, getDescription(), parentTaskId); + default Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new Task(id, type, action, getDescription(), parentTaskId, headers); } /** diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java b/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java index d0fd66703e09e..19e9baedd753b 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java @@ -19,6 +19,7 @@ package org.elasticsearch.tasks; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; @@ -31,6 +32,8 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; +import java.util.Collections; +import java.util.Map; import java.util.Objects; import java.util.concurrent.TimeUnit; @@ -65,8 +68,10 @@ public final class TaskInfo implements Writeable, ToXContentFragment { private final TaskId parentTaskId; + private final Map headers; + public TaskInfo(TaskId taskId, String type, String action, String description, Task.Status status, long startTime, - long runningTimeNanos, boolean cancellable, TaskId parentTaskId) { + long runningTimeNanos, boolean cancellable, TaskId parentTaskId, Map headers) { this.taskId = taskId; this.type = type; this.action = action; @@ -76,6 +81,7 @@ public TaskInfo(TaskId taskId, String type, String action, String description, T this.runningTimeNanos = runningTimeNanos; this.cancellable = cancellable; this.parentTaskId = parentTaskId; + this.headers = headers; } /** @@ -91,6 +97,11 @@ public TaskInfo(StreamInput in) throws IOException { runningTimeNanos = in.readLong(); cancellable = in.readBoolean(); parentTaskId = TaskId.readFromStream(in); + if (in.getVersion().onOrAfter(Version.V_6_2_0)) { + headers = in.readMap(StreamInput::readString, StreamInput::readString); + } else { + headers = Collections.emptyMap(); + } } @Override @@ -104,6 +115,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(runningTimeNanos); out.writeBoolean(cancellable); parentTaskId.writeTo(out); + if (out.getVersion().onOrAfter(Version.V_6_2_0)) { + out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); + } } public TaskId getTaskId() { @@ -162,6 +176,13 @@ public TaskId getParentTaskId() { return parentTaskId; } + /** + * Returns the task headers + */ + public Map getHeaders() { + return headers; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field("node", taskId.getNodeId()); @@ -180,6 +201,11 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (parentTaskId.isSet()) { builder.field("parent_task_id", parentTaskId.toString()); } + builder.startObject("headers"); + for(Map.Entry attribute : headers.entrySet()) { + builder.field(attribute.getKey(), attribute.getValue()); + } + builder.endObject(); return builder; } @@ -195,10 +221,15 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws long runningTimeNanos = (Long) a[i++]; boolean cancellable = (Boolean) a[i++]; String parentTaskIdString = (String) a[i++]; - + @SuppressWarnings("unchecked") Map headers = (Map) a[i++]; + if (headers == null) { + // This might happen if we are reading an old version of task info + headers = Collections.emptyMap(); + } RawTaskStatus status = statusBytes == null ? null : new RawTaskStatus(statusBytes); TaskId parentTaskId = parentTaskIdString == null ? TaskId.EMPTY_TASK_ID : new TaskId(parentTaskIdString); - return new TaskInfo(id, type, action, description, status, startTime, runningTimeNanos, cancellable, parentTaskId); + return new TaskInfo(id, type, action, description, status, startTime, runningTimeNanos, cancellable, parentTaskId, + headers); }); static { // Note for the future: this has to be backwards and forwards compatible with all changes to the task storage format @@ -212,6 +243,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws PARSER.declareLong(constructorArg(), new ParseField("running_time_in_nanos")); PARSER.declareBoolean(constructorArg(), new ParseField("cancellable")); PARSER.declareString(optionalConstructorArg(), new ParseField("parent_task_id")); + PARSER.declareObject(optionalConstructorArg(), (p, c) -> p.mapStrings(), new ParseField("headers")); } @Override @@ -234,11 +266,12 @@ public boolean equals(Object obj) { && Objects.equals(runningTimeNanos, other.runningTimeNanos) && Objects.equals(parentTaskId, other.parentTaskId) && Objects.equals(cancellable, other.cancellable) - && Objects.equals(status, other.status); + && Objects.equals(status, other.status) + && Objects.equals(headers, other.headers); } @Override public int hashCode() { - return Objects.hash(taskId, type, action, description, startTime, runningTimeNanos, parentTaskId, cancellable, status); + return Objects.hash(taskId, type, action, description, startTime, runningTimeNanos, parentTaskId, cancellable, status, headers); } } diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskManager.java b/server/src/main/java/org/elasticsearch/tasks/TaskManager.java index afeeeeedd1168..16212e066bbff 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskManager.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskManager.java @@ -32,19 +32,26 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ConcurrentMapLong; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; +import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE; /** * Task Manager service for keeping track of currently running tasks on the nodes @@ -52,6 +59,10 @@ public class TaskManager extends AbstractComponent implements ClusterStateApplier { private static final TimeValue WAIT_FOR_COMPLETION_POLL = timeValueMillis(100); + /** Rest headers that are copied to the task */ + private final List taskHeaders; + private final ThreadPool threadPool; + private final ConcurrentMapLong tasks = ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency(); private final ConcurrentMapLong cancellableTasks = ConcurrentCollections @@ -65,8 +76,13 @@ public class TaskManager extends AbstractComponent implements ClusterStateApplie private DiscoveryNodes lastDiscoveryNodes = DiscoveryNodes.EMPTY_NODES; - public TaskManager(Settings settings) { + private final ByteSizeValue maxHeaderSize; + + public TaskManager(Settings settings, ThreadPool threadPool, Set taskHeaders) { super(settings); + this.threadPool = threadPool; + this.taskHeaders = new ArrayList<>(taskHeaders); + this.maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings); } public void setTaskResultsService(TaskResultsService taskResultsService) { @@ -80,7 +96,21 @@ public void setTaskResultsService(TaskResultsService taskResultsService) { * Returns the task manager tracked task or null if the task doesn't support the task manager */ public Task register(String type, String action, TaskAwareRequest request) { - Task task = request.createTask(taskIdGenerator.incrementAndGet(), type, action, request.getParentTask()); + Map headers = new HashMap<>(); + long headerSize = 0; + long maxSize = maxHeaderSize.getBytes(); + ThreadContext threadContext = threadPool.getThreadContext(); + for (String key : taskHeaders) { + String httpHeader = threadContext.getHeader(key); + if (httpHeader != null) { + headerSize += key.length() * 2 + httpHeader.length() * 2; + if (headerSize > maxSize) { + throw new IllegalArgumentException("Request exceeded the maximum size of task headers " + maxHeaderSize); + } + headers.put(key, httpHeader); + } + } + Task task = request.createTask(taskIdGenerator.incrementAndGet(), type, action, request.getParentTask(), headers); if (task == null) { return null; } diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java b/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java index 69549c611f1e6..f661095d6bd47 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java @@ -34,6 +34,7 @@ import org.elasticsearch.client.Requests; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; @@ -48,6 +49,7 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; +import java.util.Map; /** * Service that can store task results. @@ -60,6 +62,10 @@ public class TaskResultsService extends AbstractComponent { public static final String TASK_RESULT_INDEX_MAPPING_FILE = "task-index-mapping.json"; + public static final String TASK_RESULT_MAPPING_VERSION_META_FIELD = "version"; + + public static final int TASK_RESULT_MAPPING_VERSION = 2; + private final Client client; private final ClusterService clusterService; @@ -109,7 +115,7 @@ public void onFailure(Exception e) { }); } else { IndexMetaData metaData = state.getMetaData().index(TASK_INDEX); - if (metaData.getMappings().containsKey(TASK_TYPE) == false) { + if (getTaskResultMappingVersion(metaData) < TASK_RESULT_MAPPING_VERSION) { // The index already exists but doesn't have our mapping client.admin().indices().preparePutMapping(TASK_INDEX).setType(TASK_TYPE) .setSource(taskResultIndexMapping(), XContentType.JSON) @@ -131,6 +137,17 @@ public void onFailure(Exception e) { } } + private int getTaskResultMappingVersion(IndexMetaData metaData) { + MappingMetaData mappingMetaData = metaData.getMappings().get(TASK_TYPE); + if (mappingMetaData == null) { + return 0; + } + @SuppressWarnings("unchecked") Map meta = (Map) mappingMetaData.sourceAsMap().get("_meta"); + if (meta == null || meta.containsKey(TASK_RESULT_MAPPING_VERSION_META_FIELD) == false) { + return 1; // The mapping was created before meta field was introduced + } + return (int) meta.get(TASK_RESULT_MAPPING_VERSION_META_FIELD); + } private void doStoreResult(TaskResult taskResult, ActionListener listener) { IndexRequestBuilder index = client.prepareIndex(TASK_INDEX, TASK_TYPE, taskResult.getTask().getTaskId().toString()); diff --git a/server/src/main/java/org/elasticsearch/transport/TransportService.java b/server/src/main/java/org/elasticsearch/transport/TransportService.java index 3e226ca772bad..b53327b507dc5 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportService.java @@ -59,6 +59,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; @@ -147,7 +148,8 @@ public void close() throws IOException { * updates for {@link #TRACE_LOG_EXCLUDE_SETTING} and {@link #TRACE_LOG_INCLUDE_SETTING}. */ public TransportService(Settings settings, Transport transport, ThreadPool threadPool, TransportInterceptor transportInterceptor, - Function localNodeFactory, @Nullable ClusterSettings clusterSettings) { + Function localNodeFactory, @Nullable ClusterSettings clusterSettings, + Set taskHeaders) { super(settings); this.transport = transport; this.threadPool = threadPool; @@ -156,7 +158,7 @@ public TransportService(Settings settings, Transport transport, ThreadPool threa setTracerLogInclude(TRACE_LOG_INCLUDE_SETTING.get(settings)); setTracerLogExclude(TRACE_LOG_EXCLUDE_SETTING.get(settings)); tracerLog = Loggers.getLogger(logger, ".tracer"); - taskManager = createTaskManager(); + taskManager = createTaskManager(settings, threadPool, taskHeaders); this.interceptor = transportInterceptor; this.asyncSender = interceptor.interceptSender(this::sendRequestInternal); this.connectToRemoteCluster = RemoteClusterService.ENABLE_REMOTE_CLUSTERS.get(settings); @@ -182,8 +184,8 @@ public TaskManager getTaskManager() { return taskManager; } - protected TaskManager createTaskManager() { - return new TaskManager(settings); + protected TaskManager createTaskManager(Settings settings, ThreadPool threadPool, Set taskHeaders) { + return new TaskManager(settings, threadPool, taskHeaders); } /** diff --git a/server/src/main/resources/org/elasticsearch/tasks/task-index-mapping.json b/server/src/main/resources/org/elasticsearch/tasks/task-index-mapping.json index 0f1a32e1bef81..435e6c5759cbb 100644 --- a/server/src/main/resources/org/elasticsearch/tasks/task-index-mapping.json +++ b/server/src/main/resources/org/elasticsearch/tasks/task-index-mapping.json @@ -1,5 +1,8 @@ { "task" : { + "_meta": { + "version": 2 + }, "dynamic" : "strict", "properties" : { "completed": { @@ -37,6 +40,10 @@ }, "description": { "type": "text" + }, + "headers": { + "type" : "object", + "enabled" : false } } }, diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java index c28fddf68ad72..6b2e2040bca80 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java @@ -45,6 +45,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.List; +import java.util.Map; import java.util.Random; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; @@ -91,8 +92,8 @@ public String getDescription() { } @Override - public Task createTask(long id, String type, String action, TaskId parentTaskId) { - return new CancellableTask(id, type, action, getDescription(), parentTaskId) { + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new CancellableTask(id, type, action, getDescription(), parentTaskId, headers) { @Override public boolean shouldCancelChildrenOnCancellation() { return false; @@ -131,8 +132,8 @@ public String getDescription() { } @Override - public Task createTask(long id, String type, String action, TaskId parentTaskId) { - return new CancellableTask(id, type, action, getDescription(), parentTaskId) { + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new CancellableTask(id, type, action, getDescription(), parentTaskId, headers) { @Override public boolean shouldCancelChildrenOnCancellation() { return true; diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java index 8927fed567ed9..62313d01b95c3 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java @@ -56,9 +56,11 @@ import org.junit.BeforeClass; import java.io.IOException; +import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.List; +import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.function.Supplier; @@ -175,15 +177,16 @@ public TestNode(String name, ThreadPool threadPool, Settings settings) { }; transportService = new TransportService(settings, new MockTcpTransport(settings, threadPool, BigArrays.NON_RECYCLING_INSTANCE, new NoneCircuitBreakerService(), - new NamedWriteableRegistry(ClusterModule.getNamedWriteables()), - new NetworkService(Collections.emptyList())), - threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddressDiscoveryNodeFunction, null) { + new NamedWriteableRegistry(ClusterModule.getNamedWriteables()), + new NetworkService(Collections.emptyList())), + threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddressDiscoveryNodeFunction, null, + Collections.emptySet()) { @Override - protected TaskManager createTaskManager() { + protected TaskManager createTaskManager(Settings settings, ThreadPool threadPool, Set taskHeaders) { if (MockTaskManager.USE_MOCK_TASK_MANAGER_SETTING.get(settings)) { - return new MockTaskManager(settings); + return new MockTaskManager(settings, threadPool, taskHeaders); } else { - return super.createTaskManager(); + return super.createTaskManager(settings, threadPool, taskHeaders); } } }; diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskTests.java index c5d8b39c3da39..8628a8ee2c391 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.test.ESTestCase; import java.nio.charset.StandardCharsets; +import java.util.Collections; import java.util.Map; public class TaskTests extends ESTestCase { @@ -36,7 +37,8 @@ public void testTaskInfoToString() { long runningTime = randomNonNegativeLong(); boolean cancellable = randomBoolean(); TaskInfo taskInfo = new TaskInfo(new TaskId(nodeId, taskId), "test_type", - "test_action", "test_description", null, startTime, runningTime, cancellable, TaskId.EMPTY_TASK_ID); + "test_action", "test_description", null, startTime, runningTime, cancellable, TaskId.EMPTY_TASK_ID, + Collections.singletonMap("foo", "bar")); String taskInfoString = taskInfo.toString(); Map map = XContentHelper.convertToMap(new BytesArray(taskInfoString.getBytes(StandardCharsets.UTF_8)), true).v2(); assertEquals(((Number)map.get("id")).longValue(), taskId); @@ -46,6 +48,7 @@ public void testTaskInfoToString() { assertEquals(((Number)map.get("start_time_in_millis")).longValue(), startTime); assertEquals(((Number)map.get("running_time_in_nanos")).longValue(), runningTime); assertEquals(map.get("cancellable"), cancellable); + assertEquals(map.get("headers"), Collections.singletonMap("foo", "bar")); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java index 778930e7d05ac..b04205ed01813 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java @@ -84,6 +84,7 @@ import static java.util.Collections.singleton; import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; @@ -355,19 +356,26 @@ public void testSearchTaskDescriptions() { client().prepareIndex("test", "doc", "test_id").setSource("{\"foo\": \"bar\"}", XContentType.JSON) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); - assertSearchResponse(client().prepareSearch("test").setTypes("doc").setQuery(QueryBuilders.matchAllQuery()).get()); + Map headers = new HashMap<>(); + headers.put("X-Opaque-Id", "my_id"); + headers.put("Foo-Header", "bar"); + headers.put("Custom-Task-Header", "my_value"); + assertSearchResponse( + client().filterWithHeader(headers).prepareSearch("test").setTypes("doc").setQuery(QueryBuilders.matchAllQuery()).get()); // the search operation should produce one main task List mainTask = findEvents(SearchAction.NAME, Tuple::v1); assertEquals(1, mainTask.size()); assertThat(mainTask.get(0).getDescription(), startsWith("indices[test], types[doc], search_type[")); assertThat(mainTask.get(0).getDescription(), containsString("\"query\":{\"match_all\"")); + assertTaskHeaders(mainTask.get(0)); // check that if we have any shard-level requests they all have non-zero length description List shardTasks = findEvents(SearchAction.NAME + "[*]", Tuple::v1); for (TaskInfo taskInfo : shardTasks) { assertThat(taskInfo.getParentTaskId(), notNullValue()); assertEquals(mainTask.get(0).getTaskId(), taskInfo.getParentTaskId()); + assertTaskHeaders(taskInfo); switch (taskInfo.getAction()) { case SearchTransportService.QUERY_ACTION_NAME: case SearchTransportService.DFS_ACTION_NAME: @@ -392,6 +400,25 @@ public void testSearchTaskDescriptions() { } + public void testSearchTaskHeaderLimit() { + int maxSize = Math.toIntExact(SETTING_HTTP_MAX_HEADER_SIZE.getDefault(Settings.EMPTY).getBytes() / 2 + 1); + + Map headers = new HashMap<>(); + headers.put("X-Opaque-Id", "my_id"); + headers.put("Custom-Task-Header", randomAlphaOfLengthBetween(maxSize, maxSize + 100)); + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> client().filterWithHeader(headers).admin().cluster().prepareListTasks().get() + ); + assertThat(ex.getMessage(), startsWith("Request exceeded the maximum size of task headers ")); + } + + private void assertTaskHeaders(TaskInfo taskInfo) { + assertThat(taskInfo.getHeaders().keySet(), hasSize(2)); + assertEquals("my_id", taskInfo.getHeaders().get("X-Opaque-Id")); + assertEquals("my_value", taskInfo.getHeaders().get("Custom-Task-Header")); + } + /** * Very basic "is it plugged in" style test that indexes a document and makes sure that you can fetch the status of the process. The * goal here is to verify that the large moving parts that make fetching task status work fit together rather than to verify any @@ -802,24 +829,24 @@ public void testNodeNotFoundButTaskFound() throws Exception { // Save a fake task that looks like it is from a node that isn't part of the cluster CyclicBarrier b = new CyclicBarrier(2); TaskResultsService resultsService = internalCluster().getInstance(TaskResultsService.class); - resultsService.storeResult( - new TaskResult(new TaskInfo(new TaskId("fake", 1), "test", "test", "", null, 0, 0, false, TaskId.EMPTY_TASK_ID), - new RuntimeException("test")), - new ActionListener() { - @Override - public void onResponse(Void response) { - try { - b.await(); - } catch (InterruptedException | BrokenBarrierException e) { - onFailure(e); - } + resultsService.storeResult(new TaskResult( + new TaskInfo(new TaskId("fake", 1), "test", "test", "", null, 0, 0, false, TaskId.EMPTY_TASK_ID, Collections.emptyMap()), + new RuntimeException("test")), + new ActionListener() { + @Override + public void onResponse(Void response) { + try { + b.await(); + } catch (InterruptedException | BrokenBarrierException e) { + onFailure(e); } + } - @Override - public void onFailure(Exception e) { - throw new RuntimeException(e); - } - }); + @Override + public void onFailure(Exception e) { + throw new RuntimeException(e); + } + }); b.await(); // Now we can find it! diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java index 88674bfec74d8..5bf000a17bac7 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java @@ -59,9 +59,11 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.List; +import java.util.Map; import static org.elasticsearch.test.ESTestCase.awaitBusy; @@ -76,12 +78,17 @@ public class TestTaskPlugin extends Plugin implements ActionPlugin { new ActionHandler<>(UnblockTestTasksAction.INSTANCE, TransportUnblockTestTasksAction.class)); } + @Override + public Collection getTaskHeaders() { + return Collections.singleton("Custom-Task-Header"); + } + static class TestTask extends CancellableTask { private volatile boolean blocked = true; - TestTask(long id, String type, String action, String description, TaskId parentTaskId) { - super(id, type, action, description, parentTaskId); + TestTask(long id, String type, String action, String description, TaskId parentTaskId, Map headers) { + super(id, type, action, description, parentTaskId, headers); } @Override @@ -178,8 +185,8 @@ public String getDescription() { } @Override - public Task createTask(long id, String type, String action, TaskId parentTaskId) { - return new TestTask(id, type, action, this.getDescription(), parentTaskId); + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new TestTask(id, type, action, this.getDescription(), parentTaskId, headers); } } @@ -247,8 +254,8 @@ public String getDescription() { } @Override - public Task createTask(long id, String type, String action, TaskId parentTaskId) { - return new CancellableTask(id, type, action, getDescription(), parentTaskId) { + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new CancellableTask(id, type, action, getDescription(), parentTaskId, headers) { @Override public boolean shouldCancelChildrenOnCancellation() { return true; diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java index d0d5be5b4178d..2fb23b26709bd 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java @@ -109,9 +109,9 @@ public String getDescription() { } @Override - public Task createTask(long id, String type, String action, TaskId parentTaskId) { + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { if (enableTaskManager) { - return super.createTask(id, type, action, parentTaskId); + return super.createTask(id, type, action, parentTaskId, headers); } else { return null; } @@ -156,9 +156,9 @@ public String getDescription() { } @Override - public Task createTask(long id, String type, String action, TaskId parentTaskId) { + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { if (enableTaskManager) { - return super.createTask(id, type, action, parentTaskId); + return super.createTask(id, type, action, parentTaskId, headers); } else { return null; } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java index 5141b9cd47187..3bd66af1bab05 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java @@ -82,7 +82,7 @@ public void setUp() throws Exception { CapturingTransport capturingTransport = new CapturingTransport(); transportService = new TransportService(clusterService.getSettings(), capturingTransport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - boundAddress -> clusterService.localNode(), null); + boundAddress -> clusterService.localNode(), null, Collections.emptySet()); transportService.start(); transportService.acceptIncomingRequests(); bulkAction = new TestTransportBulkAction(); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java index e35f98e220e03..af8289f0c45b1 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java @@ -49,6 +49,7 @@ import org.junit.BeforeClass; import java.nio.charset.StandardCharsets; +import java.util.Collections; import java.util.HashSet; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -92,7 +93,7 @@ private TransportBulkAction createAction(boolean controlled, AtomicLong expected CapturingTransport capturingTransport = new CapturingTransport(); TransportService transportService = new TransportService(clusterService.getSettings(), capturingTransport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - boundAddress -> clusterService.localNode(), null); + boundAddress -> clusterService.localNode(), null, Collections.emptySet()); transportService.start(); transportService.acceptIncomingRequests(); IndexNameExpressionResolver resolver = new Resolver(Settings.EMPTY); diff --git a/server/src/test/java/org/elasticsearch/action/main/MainActionTests.java b/server/src/test/java/org/elasticsearch/action/main/MainActionTests.java index 6cc0afa3fadf2..34f9bc15ecfa6 100644 --- a/server/src/test/java/org/elasticsearch/action/main/MainActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/main/MainActionTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.Collections; import java.util.concurrent.atomic.AtomicReference; import static org.mockito.Mockito.mock; @@ -68,7 +69,7 @@ public void testMainActionClusterAvailable() { when(clusterService.state()).thenReturn(state); TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null); + x -> null, null, Collections.emptySet()); TransportMainAction action = new TransportMainAction(settings, mock(ThreadPool.class), transportService, mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), clusterService); AtomicReference responseRef = new AtomicReference<>(); diff --git a/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java b/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java index 80fbd4cc43ddf..b0ac2ed5fa0d3 100644 --- a/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java +++ b/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java @@ -73,7 +73,7 @@ public Logger getLogger() { @Override public SearchTask getTask() { - return new SearchTask(0, "n/a", "n/a", "test", null); + return new SearchTask(0, "n/a", "n/a", "test", null, Collections.emptyMap()); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java index e811da82c47a8..6b6bbc4ae98a4 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java @@ -35,8 +35,11 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.junit.After; +import org.junit.Before; import java.util.Arrays; import java.util.Collections; @@ -54,6 +57,22 @@ public class TransportMultiSearchActionTests extends ESTestCase { + protected ThreadPool threadPool; + + @Before + @Override + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool(getTestName()); + } + + @After + @Override + public void tearDown() throws Exception { + threadPool.shutdown(); + super.tearDown(); + } + public void testBatchExecute() throws Exception { // Initialize dependencies of TransportMultiSearchAction Settings settings = Settings.builder() @@ -63,8 +82,10 @@ public void testBatchExecute() throws Exception { when(actionFilters.filters()).thenReturn(new ActionFilter[0]); ThreadPool threadPool = new ThreadPool(settings); TaskManager taskManager = mock(TaskManager.class); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - boundAddress -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), null) { + TransportService transportService = new TransportService(Settings.EMPTY, null, threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + boundAddress -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), null, + Collections.emptySet()) { @Override public TaskManager getTaskManager() { return taskManager; diff --git a/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java b/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java index 3eb1616348d84..d576d440c0263 100644 --- a/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java @@ -25,12 +25,16 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.node.Node; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; import org.junit.Before; import java.util.ArrayList; +import java.util.Collections; import java.util.Comparator; import java.util.HashSet; import java.util.List; @@ -48,10 +52,17 @@ public class TransportActionFilterChainTests extends ESTestCase { private AtomicInteger counter; + private ThreadPool threadPool; @Before public void init() throws Exception { counter = new AtomicInteger(); + threadPool = new ThreadPool(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "TransportActionFilterChainTests").build()); + } + + @After + public void shutdown() throws Exception { + terminate(threadPool); } public void testActionFiltersRequest() throws ExecutionException, InterruptedException { @@ -68,7 +79,9 @@ public void testActionFiltersRequest() throws ExecutionException, InterruptedExc String actionName = randomAlphaOfLength(randomInt(30)); ActionFilters actionFilters = new ActionFilters(filters); - TransportAction transportAction = new TransportAction(Settings.EMPTY, actionName, null, actionFilters, null, new TaskManager(Settings.EMPTY)) { + TransportAction transportAction = + new TransportAction(Settings.EMPTY, actionName, null, actionFilters, null, + new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet())) { @Override protected void doExecute(TestRequest request, ActionListener listener) { listener.onResponse(new TestResponse()); @@ -144,7 +157,8 @@ public void exe String actionName = randomAlphaOfLength(randomInt(30)); ActionFilters actionFilters = new ActionFilters(filters); - TransportAction transportAction = new TransportAction(Settings.EMPTY, actionName, null, actionFilters, null, new TaskManager(Settings.EMPTY)) { + TransportAction transportAction = new TransportAction(Settings.EMPTY, + actionName, null, actionFilters, null, new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet())) { @Override protected void doExecute(TestRequest request, ActionListener listener) { listener.onResponse(new TestResponse()); diff --git a/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java b/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java index 54253e9620745..470da323043ae 100644 --- a/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java @@ -67,6 +67,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -191,7 +192,7 @@ public void setUp() throws Exception { transport = new CapturingTransport(); clusterService = createClusterService(THREAD_POOL); final TransportService transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL, - TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null); + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, Collections.emptySet()); transportService.start(); transportService.acceptIncomingRequests(); setClusterState(clusterService, TEST_INDEX); diff --git a/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java b/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java index b14b030a5dc88..de65d2a3f9240 100644 --- a/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java @@ -88,7 +88,7 @@ public void setUp() throws Exception { transport = new CapturingTransport(); clusterService = createClusterService(threadPool); transportService = new TransportService(clusterService.getSettings(), transport, threadPool, - TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null); + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, Collections.emptySet()); transportService.start(); transportService.acceptIncomingRequests(); localNode = new DiscoveryNode("local_node", buildNewFakeTransportAddress(), Collections.emptyMap(), diff --git a/server/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java b/server/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java index 7d471f77f83d0..60a46876a7126 100644 --- a/server/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java @@ -181,7 +181,7 @@ public void setUp() throws Exception { transport = new CapturingTransport(); clusterService = createClusterService(THREAD_POOL); transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL, - TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null); + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, Collections.emptySet()); transportService.start(); transportService.acceptIncomingRequests(); int numNodes = randomIntBetween(3, 10); diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java index 9f1591f6a540b..3aeab0fa5fb5b 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java @@ -96,7 +96,7 @@ threadPool, BigArrays.NON_RECYCLING_INSTANCE, circuitBreakerService, new NamedWr new NetworkService(Collections.emptyList())); clusterService = createClusterService(threadPool); transportService = new TransportService(clusterService.getSettings(), transport, threadPool, - TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null); + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, Collections.emptySet()); transportService.start(); transportService.acceptIncomingRequests(); broadcastReplicationAction = new TestBroadcastReplicationAction(Settings.EMPTY, threadPool, clusterService, transportService, diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index d2472da34f56c..9356fd12a3a5b 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -163,7 +163,7 @@ public void setUp() throws Exception { transport = new CapturingTransport(); clusterService = createClusterService(threadPool); transportService = new TransportService(clusterService.getSettings(), transport, threadPool, - TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null); + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, Collections.emptySet()); transportService.start(); transportService.acceptIncomingRequests(); shardStateAction = new ShardStateAction(Settings.EMPTY, clusterService, transportService, null, null, threadPool); @@ -977,7 +977,7 @@ public void testRetryOnReplicaWithRealTransport() throws Exception { new NoneCircuitBreakerService(), namedWriteableRegistry, new NetworkService(Collections.emptyList()), Version.CURRENT); transportService = new MockTransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> clusterService.localNode(),null); + x -> clusterService.localNode(), null, Collections.emptySet()); transportService.start(); transportService.acceptIncomingRequests(); @@ -1040,7 +1040,7 @@ private void assertIndexShardCounter(int expected) { * half the time. */ private ReplicationTask maybeTask() { - return random().nextBoolean() ? new ReplicationTask(0, null, null, null, null) : null; + return random().nextBoolean() ? new ReplicationTask(0, null, null, null, null, null) : null; } /** diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java index b3db10f920973..47ce090d895fa 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java @@ -62,6 +62,7 @@ import org.junit.BeforeClass; import org.mockito.ArgumentCaptor; +import java.util.Collections; import java.util.HashSet; import java.util.Locale; import java.util.concurrent.ExecutionException; @@ -254,7 +255,7 @@ public void testDocumentFailureInShardOperationOnReplica() throws Exception { public void testReplicaProxy() throws InterruptedException, ExecutionException { CapturingTransport transport = new CapturingTransport(); TransportService transportService = new TransportService(clusterService.getSettings(), transport, threadPool, - TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null); + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, Collections.emptySet()); transportService.start(); transportService.acceptIncomingRequests(); ShardStateAction shardStateAction = new ShardStateAction(Settings.EMPTY, clusterService, transportService, null, null, threadPool); @@ -355,7 +356,8 @@ protected TestAction() { protected TestAction(boolean withDocumentFailureOnPrimary, boolean withDocumentFailureOnReplica) { super(Settings.EMPTY, "test", - new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null), null, + new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, + Collections.emptySet()), null, null, null, null, new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver(Settings.EMPTY), TestRequest::new, TestRequest::new, ThreadPool.Names.SAME); this.withDocumentFailureOnPrimary = withDocumentFailureOnPrimary; diff --git a/server/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java b/server/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java index 29235329d6669..8db45cc5508ef 100644 --- a/server/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java @@ -53,6 +53,7 @@ import org.junit.Before; import org.junit.BeforeClass; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; @@ -144,7 +145,8 @@ public void setUp() throws Exception { transport = new CapturingTransport(); clusterService = createClusterService(THREAD_POOL); transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL, - TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null); + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, Collections.emptySet() + ); transportService.start(); transportService.acceptIncomingRequests(); action = new TestTransportInstanceSingleOperationAction( diff --git a/server/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java b/server/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java index 160c14c243cb2..bca04738d8b89 100644 --- a/server/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java +++ b/server/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java @@ -59,7 +59,7 @@ private Actions(Settings settings, ThreadPool threadPool, GenericAction[] action private static class InternalTransportAction extends TransportAction { private InternalTransportAction(Settings settings, String actionName, ThreadPool threadPool) { - super(settings, actionName, threadPool, EMPTY_FILTERS, null, new TaskManager(settings)); + super(settings, actionName, threadPool, EMPTY_FILTERS, null, new TaskManager(settings, threadPool, Collections.emptySet())); } @Override diff --git a/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java b/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java index 7ba064046f1d6..88db9f18d5c79 100644 --- a/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java @@ -163,7 +163,7 @@ public void sendRequest(Transport.Connection conne }, (addr) -> { assert addr == null : "boundAddress: " + addr; return DiscoveryNode.createLocal(settings, buildNewFakeTransportAddress(), UUIDs.randomBase64UUID()); - }, null); + }, null, Collections.emptySet()); transportService.start(); transportService.acceptIncomingRequests(); transportClientNodesService = diff --git a/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java index 51908a45380f0..828b385f85fa5 100644 --- a/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java @@ -155,7 +155,8 @@ public void setUp() throws Exception { this.threadPool = new TestThreadPool(getClass().getName()); this.transport = new MockTransport(); transportService = new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - boundAddress -> DiscoveryNode.createLocal(Settings.EMPTY, buildNewFakeTransportAddress(), UUIDs.randomBase64UUID()), null); + boundAddress -> DiscoveryNode.createLocal(Settings.EMPTY, buildNewFakeTransportAddress(), UUIDs.randomBase64UUID()), null, + Collections.emptySet()); transportService.start(); transportService.acceptIncomingRequests(); } diff --git a/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java index 58c9651a82af1..b1ff626fa39b8 100644 --- a/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java @@ -48,6 +48,7 @@ import org.junit.Before; import org.junit.BeforeClass; +import java.util.Collections; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -108,7 +109,7 @@ public void setUp() throws Exception { this.transport = new CapturingTransport(); clusterService = createClusterService(THREAD_POOL); transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL, - TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null); + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, Collections.emptySet()); transportService.start(); transportService.acceptIncomingRequests(); shardStateAction = new TestShardStateAction(Settings.EMPTY, clusterService, transportService, null, null); diff --git a/server/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java b/server/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java index f2537e746ad0c..0f5f4870ae1bb 100644 --- a/server/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java @@ -60,6 +60,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -94,7 +95,7 @@ public void setUp() throws Exception { super.setUp(); clusterService = createClusterService(threadPool); transportService = new TransportService(clusterService.getSettings(), new CapturingTransport(), threadPool, - TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null); + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, Collections.emptySet()); transportService.start(); transportService.acceptIncomingRequests(); } diff --git a/server/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java b/server/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java index 1a837b825d867..f32e93bb82dbd 100644 --- a/server/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java @@ -145,7 +145,7 @@ namedWriteableRegistry, new NetworkService(Collections.emptyList()), version), (boundAddress) -> new DiscoveryNode(Node.NODE_NAME_SETTING.get(settings), boundAddress.publishAddress(), Node.NODE_ATTRIBUTES.getAsMap(settings), DiscoveryNode.getRolesFromSettings(settings), version), - null); + null, Collections.emptySet()); transportService.start(); transportService.acceptIncomingRequests(); return transportService; diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java b/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java index e0593a694d0b4..44914b1958777 100644 --- a/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java @@ -402,7 +402,8 @@ public BoundTransportAddress boundAddress() { }; closeables.push(transport); final TransportService transportService = - new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null); + new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, + Collections.emptySet()); closeables.push(transportService); final int limitPortCounts = randomIntBetween(1, 10); final List discoveryNodes = TestUnicastZenPing.resolveHostsLists( @@ -447,7 +448,8 @@ public BoundTransportAddress boundAddress() { }; closeables.push(transport); final TransportService transportService = - new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null); + new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, + Collections.emptySet()); closeables.push(transportService); final List discoveryNodes = TestUnicastZenPing.resolveHostsLists( executorService, @@ -497,7 +499,8 @@ public TransportAddress[] addressesFromString(String address, int perAddressLimi closeables.push(transport); final TransportService transportService = - new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null); + new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, + Collections.emptySet()); closeables.push(transportService); final List discoveryNodes = TestUnicastZenPing.resolveHostsLists( @@ -555,7 +558,8 @@ public TransportAddress[] addressesFromString(String address, int perAddressLimi closeables.push(transport); final TransportService transportService = - new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null); + new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, + Collections.emptySet()); closeables.push(transportService); final TimeValue resolveTimeout = TimeValue.timeValueSeconds(randomIntBetween(1, 3)); try { @@ -723,7 +727,8 @@ public BoundTransportAddress boundAddress() { closeables.push(transport); final TransportService transportService = - new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null); + new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, + Collections.emptySet()); closeables.push(transportService); final List discoveryNodes = TestUnicastZenPing.resolveHostsLists( executorService, @@ -772,7 +777,8 @@ private NetworkHandle startServices( final Transport transport = supplier.apply(nodeSettings, version); final MockTransportService transportService = new MockTransportService(nodeSettings, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> - new DiscoveryNode(nodeId, nodeId, boundAddress.publishAddress(), emptyMap(), nodeRoles, version), null); + new DiscoveryNode(nodeId, nodeId, boundAddress.publishAddress(), emptyMap(), nodeRoles, version), null, + Collections.emptySet()); transportService.start(); transportService.acceptIncomingRequests(); final ConcurrentMap counters = ConcurrentCollections.newConcurrentMap(); diff --git a/server/src/test/java/org/elasticsearch/index/reindex/LeaderBulkByScrollTaskStateTests.java b/server/src/test/java/org/elasticsearch/index/reindex/LeaderBulkByScrollTaskStateTests.java index 2b15181ca3930..16d9df8c820ee 100644 --- a/server/src/test/java/org/elasticsearch/index/reindex/LeaderBulkByScrollTaskStateTests.java +++ b/server/src/test/java/org/elasticsearch/index/reindex/LeaderBulkByScrollTaskStateTests.java @@ -26,6 +26,7 @@ import org.mockito.ArgumentCaptor; import java.util.Arrays; +import java.util.Collections; import java.util.List; import static java.util.Collections.emptyList; @@ -42,7 +43,7 @@ public class LeaderBulkByScrollTaskStateTests extends ESTestCase { @Before public void createTask() { slices = between(2, 50); - task = new BulkByScrollTask(1, "test_type", "test_action", "test", TaskId.EMPTY_TASK_ID); + task = new BulkByScrollTask(1, "test_type", "test_action", "test", TaskId.EMPTY_TASK_ID, Collections.emptyMap()); task.setWorkerCount(slices); taskState = task.getLeaderState(); } diff --git a/server/src/test/java/org/elasticsearch/index/reindex/WorkerBulkByScrollTaskStateTests.java b/server/src/test/java/org/elasticsearch/index/reindex/WorkerBulkByScrollTaskStateTests.java index 64bf52c319e68..db624798bb71c 100644 --- a/server/src/test/java/org/elasticsearch/index/reindex/WorkerBulkByScrollTaskStateTests.java +++ b/server/src/test/java/org/elasticsearch/index/reindex/WorkerBulkByScrollTaskStateTests.java @@ -28,6 +28,7 @@ import org.junit.Before; import java.io.IOException; +import java.util.Collections; import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CyclicBarrier; @@ -52,7 +53,7 @@ public class WorkerBulkByScrollTaskStateTests extends ESTestCase { @Before public void createTask() { - task = new BulkByScrollTask(1, "test_type", "test_action", "test", TaskId.EMPTY_TASK_ID); + task = new BulkByScrollTask(1, "test_type", "test_action", "test", TaskId.EMPTY_TASK_ID, Collections.emptyMap()); task.setWorker(Float.POSITIVE_INFINITY, null); workerState = task.getWorkerState(); } diff --git a/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index 7e8949cd15fbf..e80c2df4ea060 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -130,7 +130,8 @@ protected class ReplicationGroup implements AutoCloseable, Iterable private final AtomicInteger replicaId = new AtomicInteger(); private final AtomicInteger docId = new AtomicInteger(); boolean closed = false; - private final PrimaryReplicaSyncer primaryReplicaSyncer = new PrimaryReplicaSyncer(Settings.EMPTY, new TaskManager(Settings.EMPTY), + private final PrimaryReplicaSyncer primaryReplicaSyncer = new PrimaryReplicaSyncer(Settings.EMPTY, + new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet()), (request, parentTask, primaryAllocationId, primaryTerm, listener) -> { try { new ResyncAction(request, listener, ReplicationGroup.this).execute(); diff --git a/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java b/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java index 71faecfcea59a..618714fc9d959 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java @@ -58,7 +58,7 @@ public void setUp() throws Exception { transport = new CapturingTransport(); clusterService = createClusterService(threadPool); transportService = new TransportService(clusterService.getSettings(), transport, threadPool, - TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null); + TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null, Collections.emptySet()); transportService.start(); transportService.acceptIncomingRequests(); shardStateAction = new ShardStateAction(Settings.EMPTY, clusterService, transportService, null, null, threadPool); diff --git a/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java b/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java index 1b9a0ff629066..433f662062735 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java @@ -50,7 +50,7 @@ public class PrimaryReplicaSyncerTests extends IndexShardTestCase { public void testSyncerSendsOffCorrectDocuments() throws Exception { IndexShard shard = newStartedShard(true); - TaskManager taskManager = new TaskManager(Settings.EMPTY); + TaskManager taskManager = new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet()); AtomicBoolean syncActionCalled = new AtomicBoolean(); PrimaryReplicaSyncer.SyncAction syncAction = (request, parentTask, allocationId, primaryTerm, listener) -> { @@ -112,7 +112,8 @@ public void testSyncerOnClosingShard() throws Exception { syncCalledLatch.countDown(); threadPool.generic().execute(() -> listener.onResponse(new ResyncReplicationResponse())); }; - PrimaryReplicaSyncer syncer = new PrimaryReplicaSyncer(Settings.EMPTY, new TaskManager(Settings.EMPTY), syncAction); + PrimaryReplicaSyncer syncer = new PrimaryReplicaSyncer(Settings.EMPTY, + new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet()), syncAction); syncer.setChunkSize(new ByteSizeValue(1)); // every document is sent off separately int numDocs = 10; @@ -158,7 +159,8 @@ public void testStatusSerialization() throws IOException { } public void testStatusEquals() throws IOException { - PrimaryReplicaSyncer.ResyncTask task = new PrimaryReplicaSyncer.ResyncTask(0, "type", "action", "desc", null); + PrimaryReplicaSyncer.ResyncTask task = + new PrimaryReplicaSyncer.ResyncTask(0, "type", "action", "desc", null, Collections.emptyMap()); task.setPhase(randomAlphaOfLength(10)); task.setResyncedOperations(randomIntBetween(0, 1000)); task.setTotalOperations(randomIntBetween(0, 1000)); @@ -181,7 +183,8 @@ public void testStatusEquals() throws IOException { } public void testStatusReportsCorrectNumbers() throws IOException { - PrimaryReplicaSyncer.ResyncTask task = new PrimaryReplicaSyncer.ResyncTask(0, "type", "action", "desc", null); + PrimaryReplicaSyncer.ResyncTask task = + new PrimaryReplicaSyncer.ResyncTask(0, "type", "action", "desc", null, Collections.emptyMap()); task.setPhase(randomAlphaOfLength(10)); task.setResyncedOperations(randomIntBetween(0, 1000)); task.setTotalOperations(randomIntBetween(0, 1000)); diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java index 6e6eaf726a599..dd10dd2747df6 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java @@ -159,7 +159,8 @@ public ClusterStateChanges(NamedXContentRegistry xContentRegistry, ThreadPool th // services TransportService transportService = new TransportService(settings, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - boundAddress -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), clusterSettings); + boundAddress -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), clusterSettings, + Collections.emptySet()); MetaDataIndexUpgradeService metaDataIndexUpgradeService = new MetaDataIndexUpgradeService(settings, xContentRegistry, null, null, null) { // metaData upgrader should do nothing diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index bc5a5b95b958a..d76429c53f3a5 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -401,7 +401,8 @@ private IndicesClusterStateService createIndicesClusterStateService(DiscoveryNod final Settings settings = Settings.builder().put("node.name", discoveryNode.getName()).build(); final TransportService transportService = new TransportService(settings, null, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - boundAddress -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), null); + boundAddress -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), null, + Collections.emptySet()); final ClusterService clusterService = mock(ClusterService.class); final RepositoriesService repositoriesService = new RepositoriesService(settings, clusterService, transportService, null); diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java index 92f018f282a43..5ed708ecb7581 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -213,11 +213,11 @@ public void onFailure(Exception e) { SearchPhaseResult searchPhaseResult = service.executeQueryPhase( new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT, new SearchSourceBuilder(), new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f), - new SearchTask(123L, "", "", "", null)); + new SearchTask(123L, "", "", "", null, Collections.emptyMap())); IntArrayList intCursors = new IntArrayList(1); intCursors.add(0); ShardFetchRequest req = new ShardFetchRequest(searchPhaseResult.getRequestId(), intCursors, null /* not a scroll */); - service.executeFetchPhase(req, new SearchTask(123L, "", "", "", null)); + service.executeFetchPhase(req, new SearchTask(123L, "", "", "", null, Collections.emptyMap())); } catch (AlreadyClosedException ex) { throw ex; } catch (IllegalStateException ex) { diff --git a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java index d651c92cd611e..06d738cfb6016 100644 --- a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java @@ -62,6 +62,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import static org.hamcrest.Matchers.anyOf; @@ -94,7 +95,7 @@ private void countTestCase(Query query, IndexReader reader, boolean shouldCollec TestSearchContext context = new TestSearchContext(null, indexShard); context.parsedQuery(new ParsedQuery(query)); context.setSize(0); - context.setTask(new SearchTask(123L, "", "", "", null)); + context.setTask(new SearchTask(123L, "", "", "", null, Collections.emptyMap())); final IndexSearcher searcher = shouldCollect ? new IndexSearcher(reader) : getAssertingEarlyTerminationSearcher(reader, 0); @@ -166,7 +167,7 @@ public void testPostFilterDisablesCountOptimization() throws Exception { IndexReader reader = DirectoryReader.open(dir); IndexSearcher contextSearcher = getAssertingEarlyTerminationSearcher(reader, 0); TestSearchContext context = new TestSearchContext(null, indexShard); - context.setTask(new SearchTask(123L, "", "", "", null)); + context.setTask(new SearchTask(123L, "", "", "", null, Collections.emptyMap())); context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); @@ -195,7 +196,7 @@ public void testMinScoreDisablesCountOptimization() throws Exception { TestSearchContext context = new TestSearchContext(null, indexShard); context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); context.setSize(0); - context.setTask(new SearchTask(123L, "", "", "", null)); + context.setTask(new SearchTask(123L, "", "", "", null, Collections.emptyMap())); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertEquals(1, context.queryResult().topDocs().totalHits); @@ -209,7 +210,7 @@ public void testMinScoreDisablesCountOptimization() throws Exception { public void testQueryCapturesThreadPoolStats() throws Exception { TestSearchContext context = new TestSearchContext(null, indexShard); - context.setTask(new SearchTask(123L, "", "", "", null)); + context.setTask(new SearchTask(123L, "", "", "", null, Collections.emptyMap())); context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); Directory dir = newDirectory(); @@ -251,7 +252,7 @@ public void testInOrderScrollOptimization() throws Exception { scrollContext.maxScore = Float.NaN; scrollContext.totalHits = -1; context.scrollContext(scrollContext); - context.setTask(new SearchTask(123L, "", "", "", null)); + context.setTask(new SearchTask(123L, "", "", "", null, Collections.emptyMap())); int size = randomIntBetween(2, 5); context.setSize(size); @@ -290,7 +291,7 @@ public void testTerminateAfterEarlyTermination() throws Exception { } w.close(); TestSearchContext context = new TestSearchContext(null, indexShard); - context.setTask(new SearchTask(123L, "", "", "", null)); + context.setTask(new SearchTask(123L, "", "", "", null, Collections.emptyMap())); context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); context.terminateAfter(1); @@ -384,7 +385,7 @@ public void testIndexSortingEarlyTermination() throws Exception { TestSearchContext context = new TestSearchContext(null, indexShard); context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); context.setSize(1); - context.setTask(new SearchTask(123L, "", "", "", null)); + context.setTask(new SearchTask(123L, "", "", "", null, Collections.emptyMap())); context.sort(new SortAndFormats(sort, new DocValueFormat[] {DocValueFormat.RAW})); final IndexReader reader = DirectoryReader.open(dir); @@ -471,7 +472,7 @@ public void testIndexSortScrollOptimization() throws Exception { scrollContext.maxScore = Float.NaN; scrollContext.totalHits = -1; context.scrollContext(scrollContext); - context.setTask(new SearchTask(123L, "", "", "", null)); + context.setTask(new SearchTask(123L, "", "", "", null, Collections.emptyMap())); context.setSize(10); context.sort(searchSortAndFormat); diff --git a/server/src/test/java/org/elasticsearch/tasks/ListTasksResponseTests.java b/server/src/test/java/org/elasticsearch/tasks/ListTasksResponseTests.java index 6643a71b0962f..be0624d6bba83 100644 --- a/server/src/test/java/org/elasticsearch/tasks/ListTasksResponseTests.java +++ b/server/src/test/java/org/elasticsearch/tasks/ListTasksResponseTests.java @@ -22,6 +22,8 @@ import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.test.ESTestCase; +import java.util.Collections; + import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; @@ -33,10 +35,11 @@ public void testEmptyToString() { public void testNonEmptyToString() { TaskInfo info = new TaskInfo( - new TaskId("node1", 1), "dummy-type", "dummy-action", "dummy-description", null, 0, 1, true, new TaskId("node1", 0)); + new TaskId("node1", 1), "dummy-type", "dummy-action", "dummy-description", null, 0, 1, true, new TaskId("node1", 0), + Collections.singletonMap("foo", "bar")); ListTasksResponse tasksResponse = new ListTasksResponse(singletonList(info), emptyList(), emptyList()); assertEquals("{\"tasks\":{\"node1:1\":{\"node\":\"node1\",\"id\":1,\"type\":\"dummy-type\",\"action\":\"dummy-action\"," + "\"description\":\"dummy-description\",\"start_time_in_millis\":0,\"running_time_in_nanos\":1,\"cancellable\":true," - + "\"parent_task_id\":\"node1:0\"}}}", tasksResponse.toString()); + + "\"parent_task_id\":\"node1:0\",\"headers\":{\"foo\":\"bar\"}}}}", tasksResponse.toString()); } } diff --git a/server/src/test/java/org/elasticsearch/tasks/TaskResultTests.java b/server/src/test/java/org/elasticsearch/tasks/TaskResultTests.java index e70c2b7119421..d4da4f8f1c5cb 100644 --- a/server/src/test/java/org/elasticsearch/tasks/TaskResultTests.java +++ b/server/src/test/java/org/elasticsearch/tasks/TaskResultTests.java @@ -134,7 +134,9 @@ private static TaskInfo randomTaskInfo() throws IOException { long runningTimeNanos = randomLong(); boolean cancellable = randomBoolean(); TaskId parentTaskId = randomBoolean() ? TaskId.EMPTY_TASK_ID : randomTaskId(); - return new TaskInfo(taskId, type, action, description, status, startTime, runningTimeNanos, cancellable, parentTaskId); + Map headers = + randomBoolean() ? Collections.emptyMap() : Collections.singletonMap(randomAlphaOfLength(5), randomAlphaOfLength(5)); + return new TaskInfo(taskId, type, action, description, status, startTime, runningTimeNanos, cancellable, parentTaskId, headers); } private static TaskId randomTaskId() { diff --git a/server/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java b/server/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java index c4fe88d2fce46..08d88ad2e0486 100644 --- a/server/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java @@ -73,7 +73,7 @@ private NetworkHandle startServices(String nodeNameAndId, Settings settings, Ver boundAddress.publishAddress(), emptyMap(), emptySet(), - version), null); + version), null, Collections.emptySet()); transportService.start(); transportService.acceptIncomingRequests(); transportServices.add(transportService); diff --git a/test/framework/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollActionTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollActionTestCase.java index a0752b0048564..73cff7717b44d 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollActionTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollActionTestCase.java @@ -27,6 +27,8 @@ import org.junit.After; import org.junit.Before; +import java.util.Collections; + public abstract class AbstractAsyncBulkByScrollActionTestCase< Request extends AbstractBulkByScrollRequest, Response extends BulkByScrollResponse> @@ -37,7 +39,7 @@ public abstract class AbstractAsyncBulkByScrollActionTestCase< @Before public void setupForTest() { threadPool = new TestThreadPool(getTestName()); - task = new BulkByScrollTask(1, "test", "test", "test", TaskId.EMPTY_TASK_ID); + task = new BulkByScrollTask(1, "test", "test", "test", TaskId.EMPTY_TASK_ID, Collections.emptyMap()); task.setWorker(Float.POSITIVE_INFINITY, null); } diff --git a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java index 9e2efc955e4f3..3bca113dc7c4b 100644 --- a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java +++ b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java @@ -50,6 +50,7 @@ import java.nio.file.Path; import java.util.Collection; import java.util.Collections; +import java.util.Set; import java.util.function.Consumer; import java.util.function.Function; @@ -115,15 +116,15 @@ protected SearchService newSearchService(ClusterService clusterService, IndicesS protected TransportService newTransportService(Settings settings, Transport transport, ThreadPool threadPool, TransportInterceptor interceptor, Function localNodeFactory, - ClusterSettings clusterSettings) { + ClusterSettings clusterSettings, Set taskHeaders) { // we use the MockTransportService.TestPlugin class as a marker to create a network // module with this MockNetworkService. NetworkService is such an integral part of the systme // we don't allow to plug it in from plugins or anything. this is a test-only override and // can't be done in a production env. if (getPluginsService().filterPlugins(MockTransportService.TestPlugin.class).isEmpty()) { - return super.newTransportService(settings, transport, threadPool, interceptor, localNodeFactory, clusterSettings); + return super.newTransportService(settings, transport, threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders); } else { - return new MockTransportService(settings, transport, threadPool, interceptor, localNodeFactory, clusterSettings); + return new MockTransportService(settings, transport, threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java b/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java index bccbd537a53b4..dec204537b917 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java +++ b/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java @@ -27,8 +27,10 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskAwareRequest; import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.threadpool.ThreadPool; import java.util.Collection; +import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; /** @@ -41,8 +43,8 @@ public class MockTaskManager extends TaskManager { private final Collection listeners = new CopyOnWriteArrayList<>(); - public MockTaskManager(Settings settings) { - super(settings); + public MockTaskManager(Settings settings, ThreadPool threadPool, Set taskHeaders) { + super(settings, threadPool, taskHeaders); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index bdb3e317bc22e..7f03b8c5ed31b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -111,16 +111,16 @@ public static MockTransportService createNewService(Settings settings, Version v NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(ClusterModule.getNamedWriteables()); final Transport transport = new MockTcpTransport(settings, threadPool, BigArrays.NON_RECYCLING_INSTANCE, new NoneCircuitBreakerService(), namedWriteableRegistry, new NetworkService(Collections.emptyList()), version); - return createNewService(settings, transport, version, threadPool, clusterSettings); + return createNewService(settings, transport, version, threadPool, clusterSettings, Collections.emptySet()); } public static MockTransportService createNewService(Settings settings, Transport transport, Version version, ThreadPool threadPool, - @Nullable ClusterSettings clusterSettings) { + @Nullable ClusterSettings clusterSettings, Set taskHeaders) { return new MockTransportService(settings, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> new DiscoveryNode(Node.NODE_NAME_SETTING.get(settings), UUIDs.randomBase64UUID(), boundAddress.publishAddress(), Node.NODE_ATTRIBUTES.getAsMap(settings), DiscoveryNode.getRolesFromSettings(settings), version), - clusterSettings); + clusterSettings, taskHeaders); } private final Transport original; @@ -135,7 +135,7 @@ public MockTransportService(Settings settings, Transport transport, ThreadPool t @Nullable ClusterSettings clusterSettings) { this(settings, transport, threadPool, interceptor, (boundAddress) -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), settings.get(Node.NODE_NAME_SETTING.getKey(), - UUIDs.randomBase64UUID())), clusterSettings); + UUIDs.randomBase64UUID())), clusterSettings, Collections.emptySet()); } /** @@ -146,8 +146,9 @@ public MockTransportService(Settings settings, Transport transport, ThreadPool t */ public MockTransportService(Settings settings, Transport transport, ThreadPool threadPool, TransportInterceptor interceptor, Function localNodeFactory, - @Nullable ClusterSettings clusterSettings) { - super(settings, new LookupTestTransport(transport), threadPool, interceptor, localNodeFactory, clusterSettings); + @Nullable ClusterSettings clusterSettings, Set taskHeaders) { + super(settings, new LookupTestTransport(transport), threadPool, interceptor, localNodeFactory, clusterSettings, + taskHeaders); this.original = transport; } @@ -160,11 +161,11 @@ public static TransportAddress[] extractTransportAddresses(TransportService tran } @Override - protected TaskManager createTaskManager() { + protected TaskManager createTaskManager(Settings settings, ThreadPool threadPool, Set taskHeaders) { if (MockTaskManager.USE_MOCK_TASK_MANAGER_SETTING.get(settings)) { - return new MockTaskManager(settings); + return new MockTaskManager(settings, threadPool, taskHeaders); } else { - return super.createTaskManager(); + return super.createTaskManager(settings, threadPool, taskHeaders); } } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index b13cfd3f38f2a..c0c171e9bca70 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -1931,7 +1931,8 @@ public void testHandshakeWithIncompatVersion() { Version version = Version.fromString("2.0.0"); try (MockTcpTransport transport = new MockTcpTransport(Settings.EMPTY, threadPool, BigArrays.NON_RECYCLING_INSTANCE, new NoneCircuitBreakerService(), namedWriteableRegistry, new NetworkService(Collections.emptyList()), version); - MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, transport, version, threadPool, null)) { + MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, transport, version, threadPool, null, + Collections.emptySet())) { service.start(); service.acceptIncomingRequests(); DiscoveryNode node = @@ -1953,7 +1954,8 @@ public void testHandshakeUpdatesVersion() throws IOException { Version version = VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.CURRENT); try (MockTcpTransport transport = new MockTcpTransport(Settings.EMPTY, threadPool, BigArrays.NON_RECYCLING_INSTANCE, new NoneCircuitBreakerService(), namedWriteableRegistry, new NetworkService(Collections.emptyList()), version); - MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, transport, version, threadPool, null)) { + MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, transport, version, threadPool, null, + Collections.emptySet())) { service.start(); service.acceptIncomingRequests(); DiscoveryNode node = @@ -1989,7 +1991,7 @@ protected String handleRequest(TcpChannel mockChannel, String profileName, Strea }; try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, transport, Version.CURRENT, threadPool, - null)) { + null, Collections.emptySet())) { service.start(); service.acceptIncomingRequests(); // this acts like a node that doesn't have support for handshakes diff --git a/test/framework/src/test/java/org/elasticsearch/transport/MockTcpTransportTests.java b/test/framework/src/test/java/org/elasticsearch/transport/MockTcpTransportTests.java index 916e97ffd1211..e9f5f86462f54 100644 --- a/test/framework/src/test/java/org/elasticsearch/transport/MockTcpTransportTests.java +++ b/test/framework/src/test/java/org/elasticsearch/transport/MockTcpTransportTests.java @@ -50,7 +50,7 @@ protected Version executeHandshake(DiscoveryNode node, TcpChannel mockChannel, T } }; MockTransportService mockTransportService = - MockTransportService.createNewService(Settings.EMPTY, transport, version, threadPool, clusterSettings); + MockTransportService.createNewService(Settings.EMPTY, transport, version, threadPool, clusterSettings, Collections.emptySet()); mockTransportService.start(); return mockTransportService; } diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java b/test/framework/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java index 1f17c3df54118..a4786b4f3d8e6 100644 --- a/test/framework/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java +++ b/test/framework/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java @@ -83,7 +83,7 @@ protected SocketEventHandler getSocketEventHandler(Logger logger) { } }; MockTransportService mockTransportService = - MockTransportService.createNewService(Settings.EMPTY, transport, version, threadPool, clusterSettings); + MockTransportService.createNewService(Settings.EMPTY, transport, version, threadPool, clusterSettings, Collections.emptySet()); mockTransportService.start(); return mockTransportService; } From 75449fc37fcbd8502c2b8b849e562ec89f5f4500 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Mon, 15 Jan 2018 08:44:49 +0000 Subject: [PATCH 09/31] Adds metadata to rewritten aggregations (#28185) * Adds metadata to rewritten aggregations Previous to this change, if any filters in the filters aggregation were rewritten, the rewritten version of the FiltersAggregationBuilder would not contain the metadata form the original. This is because `AbstractAggregationBuilder.getMetadata()` returns an empty map when not metadata is set. Closes #28170 * Always set metadata when rewritten --- .../search/aggregations/AggregationBuilder.java | 4 +--- .../search/aggregations/FiltersAggsRewriteIT.java | 6 ++++++ .../search/aggregations/bucket/FiltersTests.java | 2 ++ 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java index 99bf9be683ee3..80d8277f4cab2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java @@ -101,9 +101,7 @@ public final AggregationBuilder rewrite(QueryRewriteContext context) throws IOEx if (rewritten == this) { return rewritten; } - if (getMetaData() != null && rewritten.getMetaData() == null) { - rewritten.setMetaData(getMetaData()); - } + rewritten.setMetaData(getMetaData()); AggregatorFactories.Builder rewrittenSubAggs = factoriesBuilder.rewrite(context); rewritten.subAggregations(rewrittenSubAggs); return rewritten; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/FiltersAggsRewriteIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/FiltersAggsRewriteIT.java index bb4c3a2a5eb0f..ce5e4a694f279 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/FiltersAggsRewriteIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/FiltersAggsRewriteIT.java @@ -32,6 +32,8 @@ import org.elasticsearch.test.ESSingleNodeTestCase; import java.io.IOException; +import java.util.HashMap; +import java.util.Map; public class FiltersAggsRewriteIT extends ESSingleNodeTestCase { @@ -58,10 +60,14 @@ public void testWrapperQueryIsRewritten() throws IOException { } FiltersAggregationBuilder builder = new FiltersAggregationBuilder("titles", new FiltersAggregator.KeyedFilter("titleterms", new WrapperQueryBuilder(bytesReference))); + Map metadata = new HashMap<>(); + metadata.put(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + builder.setMetaData(metadata); SearchResponse searchResponse = client().prepareSearch("test").setSize(0).addAggregation(builder).get(); assertEquals(3, searchResponse.getHits().getTotalHits()); InternalFilters filters = searchResponse.getAggregations().get("titles"); assertEquals(1, filters.getBuckets().size()); assertEquals(2, filters.getBuckets().get(0).getDocCount()); + assertEquals(metadata, filters.getMetaData()); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersTests.java index 7e63bbb6f3855..e0cd490134f14 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersTests.java @@ -35,6 +35,7 @@ import org.elasticsearch.search.aggregations.bucket.filter.FiltersAggregator.KeyedFilter; import java.io.IOException; +import java.util.Collections; import static org.hamcrest.Matchers.instanceOf; @@ -123,6 +124,7 @@ public void testOtherBucket() throws IOException { public void testRewrite() throws IOException { // test non-keyed filter that doesn't rewrite AggregationBuilder original = new FiltersAggregationBuilder("my-agg", new MatchAllQueryBuilder()); + original.setMetaData(Collections.singletonMap(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20))); AggregationBuilder rewritten = original.rewrite(new QueryRewriteContext(xContentRegistry(), null, null, () -> 0L)); assertSame(original, rewritten); From b43dd6d1f7975ee929a5a86ba2b72cdbe9ec9f7b Mon Sep 17 00:00:00 2001 From: Mayya Sharipova Date: Mon, 15 Jan 2018 07:28:08 -0500 Subject: [PATCH 10/31] Limit the analyzed text for highlighting (#28176) - Introduce index level setting "index.highlight.max_analyzed_offset" to control the max number of character to be analyzed for highlighting - Make this setting to be unset by default (equal to -1) - Issue a deprecation warning if setting is unset and analysis is required on a text larger than ES v.7.x max setting (10000) - Throw IllegalArgumentException is setting is set by a user, and analysis is required on a text larger than the user's set value. Closes #27517 --- docs/reference/index-modules.asciidoc | 2 +- .../migration/migrate_6_0/analysis.asciidoc | 8 ++++--- .../search/request/highlighting.asciidoc | 6 +++--- .../30_max_analyzed_offset.yml | 16 ++++++-------- .../uhighlight/CustomUnifiedHighlighter.java | 16 +++++++++++--- .../elasticsearch/index/IndexSettings.java | 14 +++++++++---- .../subphase/highlight/PlainHighlighter.java | 21 +++++++++++++------ .../CustomUnifiedHighlighterTests.java | 2 +- 8 files changed, 54 insertions(+), 31 deletions(-) diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index c5825c9fa15ca..f3ae767d78db7 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -186,7 +186,7 @@ specific index module: The maximum number of characters that will be analyzed for a highlight request. This setting is only applicable when highlighting is requested on a text that was indexed without offsets or term vectors. - Defaults to `10000`. + By default this settings is unset in 6.x, defaults to `-1`. `index.max_terms_count`:: diff --git a/docs/reference/migration/migrate_6_0/analysis.asciidoc b/docs/reference/migration/migrate_6_0/analysis.asciidoc index 6a5981f27edad..fba71827bedd1 100644 --- a/docs/reference/migration/migrate_6_0/analysis.asciidoc +++ b/docs/reference/migration/migrate_6_0/analysis.asciidoc @@ -16,6 +16,8 @@ created in 5.x. Highlighting a text that was indexed without offsets or term vectors, requires analysis of this text in memory real time during the search request. For large texts this analysis may take substantial amount of time and memory. -To protect against this, the maximum number of characters that will be analyzed has been -limited to 10000. This default limit can be changed -for a particular index with the index setting `index.highlight.max_analyzed_offset`. +To protect against this, the maximum number of characters that to be analyzed will be +limited to 10000 in the next major Elastic version. For this version, by default the limit +is not set. A deprecation warning will be issued when an analyzed text exceeds 10000. + The limit can be set for a particular index with the index setting +`index.highlight.max_analyzed_offset`. \ No newline at end of file diff --git a/docs/reference/search/request/highlighting.asciidoc b/docs/reference/search/request/highlighting.asciidoc index 7b154522d7a97..4748a1c171a79 100644 --- a/docs/reference/search/request/highlighting.asciidoc +++ b/docs/reference/search/request/highlighting.asciidoc @@ -106,9 +106,9 @@ needs highlighting. The `plain` highlighter always uses plain highlighting. [WARNING] Plain highlighting for large texts may require substantial amount of time and memory. -To protect against this, the maximum number of text characters that will be analyzed has been -limited to 10000. This default limit can be changed -for a particular index with the index setting `index.highlight.max_analyzed_offset`. +To protect against this, the maximum number of text characters to be analyzed will be +limited to 10000 in the next major Elastic version. The default limit is not set for this version, +but can be set for a particular index with the index setting `index.highlight.max_analyzed_offset`. [[highlighting-settings]] ==== Highlighting Settings diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/30_max_analyzed_offset.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/30_max_analyzed_offset.yml index 1e14bbd18a930..bb7c687138e08 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/30_max_analyzed_offset.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/30_max_analyzed_offset.yml @@ -33,27 +33,24 @@ setup: - skip: version: " - 6.1.99" reason: index.highlight.max_analyzed_offset setting has been added in 6.2 - features: "warnings" - do: + catch: bad_request search: index: test1 body: {"query" : {"match" : {"field1" : "fox"}}, "highlight" : {"type" : "unified", "fields" : {"field1" : {}}}} - warnings: - - Deprecated large text to be analyzed for highlighting! The length has exceeded the allowed maximum of [10]. This maximum can be set by changing the [index.highlight.max_analyzed_offset] index level setting. For large texts, indexing with offsets or term vectors is recommended! - + - match: { error.root_cause.0.type: "illegal_argument_exception" } --- "Plain highlighter on a field WITHOUT OFFSETS exceeding index.highlight.max_analyzed_offset should FAIL": - skip: version: " - 6.1.99" reason: index.highlight.max_analyzed_offset setting has been added in 6.2 - features: "warnings" - do: + catch: bad_request search: index: test1 body: {"query" : {"match" : {"field1" : "fox"}}, "highlight" : {"type" : "plain", "fields" : {"field1" : {}}}} - warnings: - - Deprecated large text to be analyzed for highlighting! The length has exceeded the allowed maximum of [10]. This maximum can be set by changing the [index.highlight.max_analyzed_offset] index level setting. For large texts, indexing with offsets or term vectors, and highlighting with unified or fvh highlighter is recommended! + - match: { error.root_cause.0.type: "illegal_argument_exception" } --- "Unified highlighter on a field WITH OFFSETS exceeding index.highlight.max_analyzed_offset should SUCCEED": @@ -72,10 +69,9 @@ setup: - skip: version: " - 6.1.99" reason: index.highlight.max_analyzed_offset setting has been added in 6.2 - features: "warnings" - do: + catch: bad_request search: index: test1 body: {"query" : {"match" : {"field2" : "fox"}}, "highlight" : {"type" : "plain", "fields" : {"field2" : {}}}} - warnings: - - Deprecated large text to be analyzed for highlighting! The length has exceeded the allowed maximum of [10]. This maximum can be set by changing the [index.highlight.max_analyzed_offset] index level setting. For large texts, indexing with offsets or term vectors, and highlighting with unified or fvh highlighter is recommended! + - match: { error.root_cause.0.type: "illegal_argument_exception" } diff --git a/server/src/main/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighter.java b/server/src/main/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighter.java index f72ee9eff034e..dc32e42c57bd3 100644 --- a/server/src/main/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighter.java +++ b/server/src/main/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighter.java @@ -128,11 +128,21 @@ public Snippet[] highlightField(String field, Query query, int docId, int maxPas @Override protected List loadFieldValues(String[] fields, DocIdSetIterator docIter, int cacheCharsThreshold) throws IOException { - if ((offsetSource == OffsetSource.ANALYSIS) && (fieldValue.length() > maxAnalyzedOffset)) { + // Issue deprecation warning if maxAnalyzedOffset is not set, and field length > default setting for 7.0 + final int defaultMaxAnalyzedOffset7 = 10000; + if ((offsetSource == OffsetSource.ANALYSIS) && (maxAnalyzedOffset == -1) && (fieldValue.length() > defaultMaxAnalyzedOffset7)) { DeprecationLogger deprecationLogger = new DeprecationLogger(Loggers.getLogger(CustomUnifiedHighlighter.class)); deprecationLogger.deprecated( - "Deprecated large text to be analyzed for highlighting! The length has exceeded the allowed maximum of [" + - maxAnalyzedOffset + "]. " + "This maximum can be set by changing the [" + + "The length of text to be analyzed for highlighting [" + fieldValue.length() + + "] exceeded the allowed maximum of [" + defaultMaxAnalyzedOffset7 + "] set for the next major Elastic version. " + + "For large texts, indexing with offsets or term vectors is recommended!"); + } + // Throw an error if maxAnalyzedOffset is explicitly set by the user, and field length > maxAnalyzedOffset + if ((offsetSource == OffsetSource.ANALYSIS) && (maxAnalyzedOffset > 0) && (fieldValue.length() > maxAnalyzedOffset)) { + // maxAnalyzedOffset is not set by user + throw new IllegalArgumentException( + "The length of text to be analyzed for highlighting [" + fieldValue.length() + + "] exceeded the allowed maximum of [" + maxAnalyzedOffset + "]. This maximum can be set by changing the [" + IndexSettings.MAX_ANALYZED_OFFSET_SETTING.getKey() + "] index level setting. " + "For large texts, indexing with offsets or term vectors is recommended!"); } diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index 42ffe6a0edd7c..c8de2400aeb88 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -126,11 +126,11 @@ public final class IndexSettings { * A setting describing the maximum number of characters that will be analyzed for a highlight request. * This setting is only applicable when highlighting is requested on a text that was indexed without * offsets or term vectors. - * The default maximum of 10000 characters is defensive as for highlighting larger texts, - * indexing with offsets or term vectors is recommended. + * This setting is defensive as for highlighting larger texts, indexing with offsets or term vectors is recommended. + * For 6.x the default value is not set or equals to -1. */ public static final Setting MAX_ANALYZED_OFFSET_SETTING = - Setting.intSetting("index.highlight.max_analyzed_offset", 10000, 1, Property.Dynamic, Property.IndexScope); + Setting.intSetting("index.highlight.max_analyzed_offset", -1, -1, Property.Dynamic, Property.IndexScope); /** @@ -727,7 +727,13 @@ private void setMaxDocvalueFields(int maxDocvalueFields) { */ public int getHighlightMaxAnalyzedOffset() { return this.maxAnalyzedOffset; } - private void setHighlightMaxAnalyzedOffset(int maxAnalyzedOffset) { this.maxAnalyzedOffset = maxAnalyzedOffset; } + private void setHighlightMaxAnalyzedOffset(int maxAnalyzedOffset) { + if (maxAnalyzedOffset < 1) { + throw new IllegalArgumentException( + "[" + MAX_ANALYZED_OFFSET_SETTING.getKey() + "] must be >= 1"); + } + this.maxAnalyzedOffset = maxAnalyzedOffset; + } /** * Returns the maximum number of terms that can be used in a Terms Query request diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/PlainHighlighter.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/PlainHighlighter.java index a69ee2473c773..5c3aa876e5a42 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/PlainHighlighter.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/PlainHighlighter.java @@ -110,17 +110,26 @@ public HighlightField highlight(HighlighterContext highlighterContext) { try { textsToHighlight = HighlightUtils.loadFieldValues(field, mapper, context, hitContext); - + final int defaultMaxAnalyzedOffset7 = 10000; for (Object textToHighlight : textsToHighlight) { String text = convertFieldValue(mapper.fieldType(), textToHighlight); - if (text.length() > maxAnalyzedOffset) { + + // Issue deprecation warning if maxAnalyzedOffset is not set, and text length > default setting for 7.0 + if ((maxAnalyzedOffset == -1) && (text.length() > defaultMaxAnalyzedOffset7)) { DeprecationLogger deprecationLogger = new DeprecationLogger(Loggers.getLogger(PlainHighlighter.class)); deprecationLogger.deprecated( - "Deprecated large text to be analyzed for highlighting! The length has exceeded the allowed maximum of [" + - maxAnalyzedOffset + "]. " + "This maximum can be set by changing the [" + + "The length of text to be analyzed for highlighting [" + text.length() + "] exceeded the allowed maximum of [" + + defaultMaxAnalyzedOffset7 + "] set for the next major Elastic version. " + + "For large texts, indexing with offsets or term vectors is recommended!"); + } + // Throw an error if maxAnalyzedOffset is explicitly set by the user, and text length > maxAnalyzedOffset + if ((maxAnalyzedOffset > 0) && (text.length() > maxAnalyzedOffset)) { + // maxAnalyzedOffset is not set by user + throw new IllegalArgumentException( + "The length of text to be analyzed for highlighting [" + text.length() + + "] exceeded the allowed maximum of [" + maxAnalyzedOffset + "]. This maximum can be set by changing the [" + IndexSettings.MAX_ANALYZED_OFFSET_SETTING.getKey() + "] index level setting. " + - "For large texts, indexing with offsets or term vectors, and highlighting with unified or " + - "fvh highlighter is recommended!"); + "For large texts, indexing with offsets or term vectors is recommended!"); } try (TokenStream tokenStream = analyzer.tokenStream(mapper.fieldType().name(), text)) { diff --git a/server/src/test/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java b/server/src/test/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java index 5253b354752a0..9353600bd0422 100644 --- a/server/src/test/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java +++ b/server/src/test/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java @@ -79,7 +79,7 @@ private void assertHighlightOneDoc(String fieldName, String[] inputs, Analyzer a String rawValue = Strings.arrayToDelimitedString(inputs, String.valueOf(MULTIVAL_SEP_CHAR)); CustomUnifiedHighlighter highlighter = new CustomUnifiedHighlighter(searcher, analyzer, null, new CustomPassageFormatter("", "", new DefaultEncoder()), locale, - breakIterator, rawValue, noMatchSize, 10000); + breakIterator, rawValue, noMatchSize, -1); highlighter.setFieldMatcher((name) -> "text".equals(name)); final Snippet[] snippets = highlighter.highlightField("text", query, topDocs.scoreDocs[0].doc, expectedPassages.length); From a600daca7e3bd97cfafe11c216f27ac48b8c3e73 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Mon, 8 Jan 2018 15:03:40 +0100 Subject: [PATCH 11/31] Avoid concurrent snapshot finalizations when deleting an INIT snapshot (#28078) This commit removes the finalization of a snapshot by the snapshot deletion request. This way, the deletion marks the snapshot as ABORTED in cluster state and waits for the snapshot completion. It is the responsability of the snapshot execution to detect the abortion and terminates itself correctly. This avoids concurrent snapshot finalizations and also ordinates the operations: the deletion aborts the snapshot and waits for the snapshot completion, the creation detects the abortion and stops by itself and finalizes the snapshot, then the deletion resumes and continues the deletion process. --- .../snapshots/SnapshotsService.java | 73 ++++++++++++------- .../SharedClusterSnapshotRestoreIT.java | 6 +- 2 files changed, 48 insertions(+), 31 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index dc87c3dd4e113..2a48d6a1b71bc 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -372,8 +372,8 @@ private void beginSnapshot(final ClusterState clusterState, return; } clusterService.submitStateUpdateTask("update_snapshot [" + snapshot.snapshot() + "]", new ClusterStateUpdateTask() { - boolean accepted = false; - SnapshotsInProgress.Entry updatedSnapshot; + + SnapshotsInProgress.Entry endSnapshot; String failure = null; @Override @@ -381,17 +381,23 @@ public ClusterState execute(ClusterState currentState) { SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); List entries = new ArrayList<>(); for (SnapshotsInProgress.Entry entry : snapshots.entries()) { - if (entry.snapshot().equals(snapshot.snapshot()) && entry.state() != State.ABORTED) { - // Replace the snapshot that was just created + if (entry.snapshot().equals(snapshot.snapshot()) == false) { + entries.add(entry); + continue; + } + + if (entry.state() != State.ABORTED) { + // Replace the snapshot that was just intialized ImmutableOpenMap shards = shards(currentState, entry.indices()); if (!partial) { Tuple, Set> indicesWithMissingShards = indicesWithMissingShards(shards, currentState.metaData()); Set missing = indicesWithMissingShards.v1(); Set closed = indicesWithMissingShards.v2(); if (missing.isEmpty() == false || closed.isEmpty() == false) { - StringBuilder failureMessage = new StringBuilder(); - updatedSnapshot = new SnapshotsInProgress.Entry(entry, State.FAILED, shards); - entries.add(updatedSnapshot); + endSnapshot = new SnapshotsInProgress.Entry(entry, State.FAILED, shards); + entries.add(endSnapshot); + + final StringBuilder failureMessage = new StringBuilder(); if (missing.isEmpty() == false) { failureMessage.append("Indices don't have primary shards "); failureMessage.append(missing); @@ -407,13 +413,16 @@ public ClusterState execute(ClusterState currentState) { continue; } } - updatedSnapshot = new SnapshotsInProgress.Entry(entry, State.STARTED, shards); + SnapshotsInProgress.Entry updatedSnapshot = new SnapshotsInProgress.Entry(entry, State.STARTED, shards); entries.add(updatedSnapshot); - if (!completed(shards.values())) { - accepted = true; + if (completed(shards.values())) { + endSnapshot = updatedSnapshot; } } else { - entries.add(entry); + assert entry.state() == State.ABORTED : "expecting snapshot to be aborted during initialization"; + failure = "snapshot was aborted during initialization"; + endSnapshot = entry; + entries.add(endSnapshot); } } return ClusterState.builder(currentState) @@ -448,8 +457,8 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS // We should end snapshot only if 1) we didn't accept it for processing (which happens when there // is nothing to do) and 2) there was a snapshot in metadata that we should end. Otherwise we should // go ahead and continue working on this snapshot rather then end here. - if (!accepted && updatedSnapshot != null) { - endSnapshot(updatedSnapshot, failure); + if (endSnapshot != null) { + endSnapshot(endSnapshot, failure); } } }); @@ -750,6 +759,11 @@ public ClusterState execute(ClusterState currentState) throws Exception { } entries.add(updatedSnapshot); } else if (snapshot.state() == State.INIT && newMaster) { + changed = true; + // Mark the snapshot as aborted as it failed to start from the previous master + updatedSnapshot = new SnapshotsInProgress.Entry(snapshot, State.ABORTED, snapshot.shards()); + entries.add(updatedSnapshot); + // Clean up the snapshot that failed to start from the old master deleteSnapshot(snapshot.snapshot(), new DeleteSnapshotListener() { @Override @@ -935,7 +949,7 @@ private Tuple, Set> indicesWithMissingShards(ImmutableOpenMa * * @param entry snapshot */ - void endSnapshot(SnapshotsInProgress.Entry entry) { + void endSnapshot(final SnapshotsInProgress.Entry entry) { endSnapshot(entry, null); } @@ -1144,24 +1158,26 @@ public ClusterState execute(ClusterState currentState) throws Exception { } else { // This snapshot is currently running - stopping shards first waitForSnapshot = true; - ImmutableOpenMap shards; - if (snapshotEntry.state() == State.STARTED && snapshotEntry.shards() != null) { - // snapshot is currently running - stop started shards - ImmutableOpenMap.Builder shardsBuilder = ImmutableOpenMap.builder(); + + final ImmutableOpenMap shards; + + final State state = snapshotEntry.state(); + if (state == State.INIT) { + // snapshot is still initializing, mark it as aborted + shards = snapshotEntry.shards(); + + } else if (state == State.STARTED) { + // snapshot is started - mark every non completed shard as aborted + final ImmutableOpenMap.Builder shardsBuilder = ImmutableOpenMap.builder(); for (ObjectObjectCursor shardEntry : snapshotEntry.shards()) { ShardSnapshotStatus status = shardEntry.value; - if (!status.state().completed()) { - shardsBuilder.put(shardEntry.key, new ShardSnapshotStatus(status.nodeId(), State.ABORTED, - "aborted by snapshot deletion")); - } else { - shardsBuilder.put(shardEntry.key, status); + if (status.state().completed() == false) { + status = new ShardSnapshotStatus(status.nodeId(), State.ABORTED, "aborted by snapshot deletion"); } + shardsBuilder.put(shardEntry.key, status); } shards = shardsBuilder.build(); - } else if (snapshotEntry.state() == State.INIT) { - // snapshot hasn't started yet - end it - shards = snapshotEntry.shards(); - endSnapshot(snapshotEntry); + } else { boolean hasUncompletedShards = false; // Cleanup in case a node gone missing and snapshot wasn't updated for some reason @@ -1178,7 +1194,8 @@ public ClusterState execute(ClusterState currentState) throws Exception { logger.debug("trying to delete completed snapshot - should wait for shards to finalize on all nodes"); return currentState; } else { - // no shards to wait for - finish the snapshot + // no shards to wait for but a node is gone - this is the only case + // where we force to finish the snapshot logger.debug("trying to delete completed snapshot with no finalizing shards - can delete immediately"); shards = snapshotEntry.shards(); endSnapshot(snapshotEntry); diff --git a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 3ac51992b7de8..b8448890b66fc 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -3151,7 +3151,7 @@ public void testSnapshottingWithMissingSequenceNumbers() { assertThat(shardStats.getSeqNoStats().getMaxSeqNo(), equalTo(15L)); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/27974") + @TestLogging("org.elasticsearch.snapshots:TRACE") public void testAbortedSnapshotDuringInitDoesNotStart() throws Exception { final Client client = client(); @@ -3163,11 +3163,11 @@ public void testAbortedSnapshotDuringInitDoesNotStart() throws Exception { )); createIndex("test-idx"); - final int nbDocs = scaledRandomIntBetween(1, 100); + final int nbDocs = scaledRandomIntBetween(100, 500); for (int i = 0; i < nbDocs; i++) { index("test-idx", "_doc", Integer.toString(i), "foo", "bar" + i); } - refresh(); + flushAndRefresh("test-idx"); assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits(), equalTo((long) nbDocs)); // Create a snapshot From ea6a46a99cff331079e535b2b7cc34d3c99f66d1 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Mon, 15 Jan 2018 13:45:57 +0100 Subject: [PATCH 12/31] Consistent updates of IndexShardSnapshotStatus (#28130) This commit changes IndexShardSnapshotStatus so that the Stage is updated coherently with any required information. It also provides a asCopy() method that returns the status of a IndexShardSnapshotStatus at a given point in time, ensuring that all information are coherent. Closes #26480 --- .../status/SnapshotIndexShardStatus.java | 15 +- .../snapshots/status/SnapshotStats.java | 21 +- .../status/TransportNodesSnapshotsStatus.java | 16 +- .../TransportSnapshotsStatusAction.java | 3 +- .../snapshots/IndexShardSnapshotStatus.java | 323 +++++++++--------- .../repositories/Repository.java | 2 +- .../blobstore/BlobStoreRepository.java | 125 ++++--- .../snapshots/SnapshotShardsService.java | 63 ++-- .../snapshots/SnapshotsService.java | 9 +- .../snapshots/SnapshotShardsServiceIT.java | 2 +- .../index/shard/IndexShardTestCase.java | 10 +- 11 files changed, 287 insertions(+), 302 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java index 324cb3712adf1..1b7ead5b96510 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java @@ -22,7 +22,6 @@ import org.elasticsearch.action.support.broadcast.BroadcastShardResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ToXContent.Params; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.shard.ShardId; @@ -49,13 +48,13 @@ private SnapshotIndexShardStatus() { this.stats = new SnapshotStats(); } - SnapshotIndexShardStatus(ShardId shardId, IndexShardSnapshotStatus indexShardStatus) { + SnapshotIndexShardStatus(ShardId shardId, IndexShardSnapshotStatus.Copy indexShardStatus) { this(shardId, indexShardStatus, null); } - SnapshotIndexShardStatus(ShardId shardId, IndexShardSnapshotStatus indexShardStatus, String nodeId) { + SnapshotIndexShardStatus(ShardId shardId, IndexShardSnapshotStatus.Copy indexShardStatus, String nodeId) { super(shardId); - switch (indexShardStatus.stage()) { + switch (indexShardStatus.getStage()) { case INIT: stage = SnapshotIndexShardStage.INIT; break; @@ -72,10 +71,12 @@ private SnapshotIndexShardStatus() { stage = SnapshotIndexShardStage.FAILURE; break; default: - throw new IllegalArgumentException("Unknown stage type " + indexShardStatus.stage()); + throw new IllegalArgumentException("Unknown stage type " + indexShardStatus.getStage()); } - stats = new SnapshotStats(indexShardStatus); - failure = indexShardStatus.failure(); + this.stats = new SnapshotStats(indexShardStatus.getStartTime(), indexShardStatus.getTotalTime(), + indexShardStatus.getNumberOfFiles(), indexShardStatus.getProcessedFiles(), + indexShardStatus.getTotalSize(), indexShardStatus.getProcessedSize()); + this.failure = indexShardStatus.getFailure(); this.nodeId = nodeId; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStats.java index ba11e51d56f87..5b2bdd7c614c6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStats.java @@ -25,33 +25,28 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import java.io.IOException; public class SnapshotStats implements Streamable, ToXContentFragment { - private long startTime; + private long startTime; private long time; - private int numberOfFiles; - private int processedFiles; - private long totalSize; - private long processedSize; SnapshotStats() { } - SnapshotStats(IndexShardSnapshotStatus indexShardStatus) { - startTime = indexShardStatus.startTime(); - time = indexShardStatus.time(); - numberOfFiles = indexShardStatus.numberOfFiles(); - processedFiles = indexShardStatus.processedFiles(); - totalSize = indexShardStatus.totalSize(); - processedSize = indexShardStatus.processedSize(); + SnapshotStats(long startTime, long time, int numberOfFiles, int processedFiles, long totalSize, long processedSize) { + this.startTime = startTime; + this.time = time; + this.numberOfFiles = numberOfFiles; + this.processedFiles = processedFiles; + this.totalSize = totalSize; + this.processedSize = processedSize; } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java index 872793f6ef21a..77578546b9585 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java @@ -96,7 +96,7 @@ protected NodesSnapshotStatus newResponse(Request request, List> snapshotMapBuilder = new HashMap<>(); try { - String nodeId = clusterService.localNode().getId(); + final String nodeId = clusterService.localNode().getId(); for (Snapshot snapshot : request.snapshots) { Map shardsStatus = snapshotShardsService.currentSnapshotShards(snapshot); if (shardsStatus == null) { @@ -104,15 +104,17 @@ protected NodeSnapshotStatus nodeOperation(NodeRequest request) { } Map shardMapBuilder = new HashMap<>(); for (Map.Entry shardEntry : shardsStatus.entrySet()) { - SnapshotIndexShardStatus shardStatus; - IndexShardSnapshotStatus.Stage stage = shardEntry.getValue().stage(); + final ShardId shardId = shardEntry.getKey(); + + final IndexShardSnapshotStatus.Copy lastSnapshotStatus = shardEntry.getValue().asCopy(); + final IndexShardSnapshotStatus.Stage stage = lastSnapshotStatus.getStage(); + + String shardNodeId = null; if (stage != IndexShardSnapshotStatus.Stage.DONE && stage != IndexShardSnapshotStatus.Stage.FAILURE) { // Store node id for the snapshots that are currently running. - shardStatus = new SnapshotIndexShardStatus(shardEntry.getKey(), shardEntry.getValue(), nodeId); - } else { - shardStatus = new SnapshotIndexShardStatus(shardEntry.getKey(), shardEntry.getValue()); + shardNodeId = nodeId; } - shardMapBuilder.put(shardEntry.getKey(), shardStatus); + shardMapBuilder.put(shardEntry.getKey(), new SnapshotIndexShardStatus(shardId, lastSnapshotStatus, shardNodeId)); } snapshotMapBuilder.put(snapshot, unmodifiableMap(shardMapBuilder)); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index 79e38be803c10..63b426ab324c4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -233,7 +233,8 @@ private SnapshotsStatusResponse buildResponse(SnapshotsStatusRequest request, Li Map shardStatues = snapshotsService.snapshotShards(request.repository(), snapshotInfo); for (Map.Entry shardStatus : shardStatues.entrySet()) { - shardStatusBuilder.add(new SnapshotIndexShardStatus(shardStatus.getKey(), shardStatus.getValue())); + IndexShardSnapshotStatus.Copy lastSnapshotStatus = shardStatus.getValue().asCopy(); + shardStatusBuilder.add(new SnapshotIndexShardStatus(shardStatus.getKey(), lastSnapshotStatus)); } final SnapshotsInProgress.State state; switch (snapshotInfo.state()) { diff --git a/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java b/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java index 644caa7520be5..f1c247a41bb6d 100644 --- a/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java +++ b/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java @@ -19,6 +19,9 @@ package org.elasticsearch.index.snapshots; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicReference; + /** * Represent shard snapshot status */ @@ -47,119 +50,85 @@ public enum Stage { /** * Snapshot failed */ - FAILURE + FAILURE, + /** + * Snapshot aborted + */ + ABORTED } - private Stage stage = Stage.INIT; - + private final AtomicReference stage; private long startTime; - - private long time; - + private long totalTime; private int numberOfFiles; - - private volatile int processedFiles; - + private int processedFiles; private long totalSize; - - private volatile long processedSize; - + private long processedSize; private long indexVersion; - - private volatile boolean aborted; - private String failure; - /** - * Returns current snapshot stage - * - * @return current snapshot stage - */ - public Stage stage() { - return this.stage; - } - - /** - * Sets new snapshot stage - * - * @param stage new snapshot stage - */ - public void updateStage(Stage stage) { - this.stage = stage; - } - - /** - * Returns snapshot start time - * - * @return snapshot start time - */ - public long startTime() { - return this.startTime; - } - - /** - * Sets snapshot start time - * - * @param startTime snapshot start time - */ - public void startTime(long startTime) { + private IndexShardSnapshotStatus(final Stage stage, final long startTime, final long totalTime, + final int numberOfFiles, final int processedFiles, final long totalSize, final long processedSize, + final long indexVersion, final String failure) { + this.stage = new AtomicReference<>(Objects.requireNonNull(stage)); this.startTime = startTime; + this.totalTime = totalTime; + this.numberOfFiles = numberOfFiles; + this.processedFiles = processedFiles; + this.totalSize = totalSize; + this.processedSize = processedSize; + this.indexVersion = indexVersion; + this.failure = failure; } - /** - * Returns snapshot processing time - * - * @return processing time - */ - public long time() { - return this.time; + public synchronized Copy moveToStarted(final long startTime, final int numberOfFiles, final long totalSize) { + if (stage.compareAndSet(Stage.INIT, Stage.STARTED)) { + this.startTime = startTime; + this.numberOfFiles = numberOfFiles; + this.totalSize = totalSize; + } else { + throw new IllegalStateException("Unable to move the shard snapshot status to [STARTED]: " + + "expecting [INIT] but got [" + stage.get() + "]"); + } + return asCopy(); } - /** - * Sets snapshot processing time - * - * @param time snapshot processing time - */ - public void time(long time) { - this.time = time; + public synchronized Copy moveToFinalize(final long indexVersion) { + if (stage.compareAndSet(Stage.STARTED, Stage.FINALIZE)) { + this.indexVersion = indexVersion; + } else { + throw new IllegalStateException("Unable to move the shard snapshot status to [FINALIZE]: " + + "expecting [STARTED] but got [" + stage.get() + "]"); + } + return asCopy(); } - /** - * Returns true if snapshot process was aborted - * - * @return true if snapshot process was aborted - */ - public boolean aborted() { - return this.aborted; + public synchronized Copy moveToDone(final long endTime) { + if (stage.compareAndSet(Stage.FINALIZE, Stage.DONE)) { + this.totalTime = Math.max(0L, endTime - startTime); + } else { + throw new IllegalStateException("Unable to move the shard snapshot status to [DONE]: " + + "expecting [FINALIZE] but got [" + stage.get() + "]"); + } + return asCopy(); } - /** - * Marks snapshot as aborted - */ - public void abort() { - this.aborted = true; + public synchronized Copy abortIfNotCompleted(final String failure) { + if (stage.compareAndSet(Stage.INIT, Stage.ABORTED) || stage.compareAndSet(Stage.STARTED, Stage.ABORTED)) { + this.failure = failure; + } + return asCopy(); } - /** - * Sets files stats - * - * @param numberOfFiles number of files in this snapshot - * @param totalSize total size of files in this snapshot - */ - public void files(int numberOfFiles, long totalSize) { - this.numberOfFiles = numberOfFiles; - this.totalSize = totalSize; + public synchronized void moveToFailed(final long endTime, final String failure) { + if (stage.getAndSet(Stage.FAILURE) != Stage.FAILURE) { + this.totalTime = Math.max(0L, endTime - startTime); + this.failure = failure; + } } - /** - * Sets processed files stats - * - * @param numberOfFiles number of files in this snapshot - * @param totalSize total size of files in this snapshot - */ - public synchronized void processedFiles(int numberOfFiles, long totalSize) { - processedFiles = numberOfFiles; - processedSize = totalSize; + public boolean isAborted() { + return stage.get() == Stage.ABORTED; } /** @@ -171,71 +140,111 @@ public synchronized void addProcessedFile(long size) { } /** - * Number of files - * - * @return number of files - */ - public int numberOfFiles() { - return numberOfFiles; - } - - /** - * Total snapshot size - * - * @return snapshot size - */ - public long totalSize() { - return totalSize; - } - - /** - * Number of processed files - * - * @return number of processed files - */ - public int processedFiles() { - return processedFiles; - } - - /** - * Size of processed files + * Returns a copy of the current {@link IndexShardSnapshotStatus}. This method is + * intended to be used when a coherent state of {@link IndexShardSnapshotStatus} is needed. * - * @return size of processed files - */ - public long processedSize() { - return processedSize; - } - - - /** - * Sets index version - * - * @param indexVersion index version - */ - public void indexVersion(long indexVersion) { - this.indexVersion = indexVersion; - } - - /** - * Returns index version - * - * @return index version - */ - public long indexVersion() { - return indexVersion; - } - - /** - * Sets the reason for the failure if the snapshot is in the {@link IndexShardSnapshotStatus.Stage#FAILURE} state - */ - public void failure(String failure) { - this.failure = failure; - } - - /** - * Returns the reason for the failure if the snapshot is in the {@link IndexShardSnapshotStatus.Stage#FAILURE} state - */ - public String failure() { - return failure; + * @return a {@link IndexShardSnapshotStatus.Copy} + */ + public synchronized IndexShardSnapshotStatus.Copy asCopy() { + return new IndexShardSnapshotStatus.Copy(stage.get(), startTime, totalTime, numberOfFiles, processedFiles, totalSize, processedSize, + indexVersion, failure); + } + + public static IndexShardSnapshotStatus newInitializing() { + return new IndexShardSnapshotStatus(Stage.INIT, 0L, 0L, 0, 0, 0, 0, 0, null); + } + + public static IndexShardSnapshotStatus newFailed(final String failure) { + assert failure != null : "expecting non null failure for a failed IndexShardSnapshotStatus"; + if (failure == null) { + throw new IllegalArgumentException("A failure description is required for a failed IndexShardSnapshotStatus"); + } + return new IndexShardSnapshotStatus(Stage.FAILURE, 0L, 0L, 0, 0, 0, 0, 0, failure); + } + + public static IndexShardSnapshotStatus newDone(final long startTime, final long totalTime, final int files, final long size) { + // The snapshot is done which means the number of processed files is the same as total + return new IndexShardSnapshotStatus(Stage.DONE, startTime, totalTime, files, files, size, size, 0, null); + } + + /** + * Returns an immutable state of {@link IndexShardSnapshotStatus} at a given point in time. + */ + public static class Copy { + + private final Stage stage; + private final long startTime; + private final long totalTime; + private final int numberOfFiles; + private final int processedFiles; + private final long totalSize; + private final long processedSize; + private final long indexVersion; + private final String failure; + + public Copy(final Stage stage, final long startTime, final long totalTime, + final int numberOfFiles, final int processedFiles, final long totalSize, final long processedSize, + final long indexVersion, final String failure) { + this.stage = stage; + this.startTime = startTime; + this.totalTime = totalTime; + this.numberOfFiles = numberOfFiles; + this.processedFiles = processedFiles; + this.totalSize = totalSize; + this.processedSize = processedSize; + this.indexVersion = indexVersion; + this.failure = failure; + } + + public Stage getStage() { + return stage; + } + + public long getStartTime() { + return startTime; + } + + public long getTotalTime() { + return totalTime; + } + + public int getNumberOfFiles() { + return numberOfFiles; + } + + public int getProcessedFiles() { + return processedFiles; + } + + public long getTotalSize() { + return totalSize; + } + + public long getProcessedSize() { + return processedSize; + } + + public long getIndexVersion() { + return indexVersion; + } + + public String getFailure() { + return failure; + } + + @Override + public String toString() { + return "index shard snapshot status (" + + "stage=" + stage + + ", startTime=" + startTime + + ", totalTime=" + totalTime + + ", numberOfFiles=" + numberOfFiles + + ", processedFiles=" + processedFiles + + ", totalSize=" + totalSize + + ", processedSize=" + processedSize + + ", indexVersion=" + indexVersion + + ", failure='" + failure + '\'' + + ')'; + } } } diff --git a/server/src/main/java/org/elasticsearch/repositories/Repository.java b/server/src/main/java/org/elasticsearch/repositories/Repository.java index f711a72b67757..4c3d58e67ff72 100644 --- a/server/src/main/java/org/elasticsearch/repositories/Repository.java +++ b/server/src/main/java/org/elasticsearch/repositories/Repository.java @@ -180,7 +180,7 @@ SnapshotInfo finalizeSnapshot(SnapshotId snapshotId, List indices, long * Repository implementations shouldn't release the snapshot index commit point. It is done by the method caller. *

* As snapshot process progresses, implementation of this method should update {@link IndexShardSnapshotStatus} object and check - * {@link IndexShardSnapshotStatus#aborted()} to see if the snapshot process should be aborted. + * {@link IndexShardSnapshotStatus#isAborted()} to see if the snapshot process should be aborted. * * @param shard shard to be snapshotted * @param snapshotId snapshot id diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 9afbb52878207..d1da5ec49cfd9 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -805,17 +805,11 @@ private void writeAtomic(final String blobName, final BytesReference bytesRef) t @Override public void snapshotShard(IndexShard shard, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) { - SnapshotContext snapshotContext = new SnapshotContext(shard, snapshotId, indexId, snapshotStatus); - snapshotStatus.startTime(System.currentTimeMillis()); - + SnapshotContext snapshotContext = new SnapshotContext(shard, snapshotId, indexId, snapshotStatus, System.currentTimeMillis()); try { snapshotContext.snapshot(snapshotIndexCommit); - snapshotStatus.time(System.currentTimeMillis() - snapshotStatus.startTime()); - snapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.DONE); } catch (Exception e) { - snapshotStatus.time(System.currentTimeMillis() - snapshotStatus.startTime()); - snapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.FAILURE); - snapshotStatus.failure(ExceptionsHelper.detailedMessage(e)); + snapshotStatus.moveToFailed(System.currentTimeMillis(), ExceptionsHelper.detailedMessage(e)); if (e instanceof IndexShardSnapshotFailedException) { throw (IndexShardSnapshotFailedException) e; } else { @@ -838,14 +832,7 @@ public void restoreShard(IndexShard shard, SnapshotId snapshotId, Version versio public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, Version version, IndexId indexId, ShardId shardId) { Context context = new Context(snapshotId, version, indexId, shardId); BlobStoreIndexShardSnapshot snapshot = context.loadSnapshot(); - IndexShardSnapshotStatus status = new IndexShardSnapshotStatus(); - status.updateStage(IndexShardSnapshotStatus.Stage.DONE); - status.startTime(snapshot.startTime()); - status.files(snapshot.numberOfFiles(), snapshot.totalSize()); - // The snapshot is done which means the number of processed files is the same as total - status.processedFiles(snapshot.numberOfFiles(), snapshot.totalSize()); - status.time(snapshot.time()); - return status; + return IndexShardSnapshotStatus.newDone(snapshot.startTime(), snapshot.time(), snapshot.numberOfFiles(), snapshot.totalSize()); } @Override @@ -1103,8 +1090,8 @@ protected Tuple buildBlobStoreIndexShardS private class SnapshotContext extends Context { private final Store store; - private final IndexShardSnapshotStatus snapshotStatus; + private final long startTime; /** * Constructs new context @@ -1114,10 +1101,11 @@ private class SnapshotContext extends Context { * @param indexId the id of the index being snapshotted * @param snapshotStatus snapshot status to report progress */ - SnapshotContext(IndexShard shard, SnapshotId snapshotId, IndexId indexId, IndexShardSnapshotStatus snapshotStatus) { + SnapshotContext(IndexShard shard, SnapshotId snapshotId, IndexId indexId, IndexShardSnapshotStatus snapshotStatus, long startTime) { super(snapshotId, Version.CURRENT, indexId, shard.shardId()); this.snapshotStatus = snapshotStatus; this.store = shard.store(); + this.startTime = startTime; } /** @@ -1125,24 +1113,25 @@ private class SnapshotContext extends Context { * * @param snapshotIndexCommit snapshot commit point */ - public void snapshot(IndexCommit snapshotIndexCommit) { + public void snapshot(final IndexCommit snapshotIndexCommit) { logger.debug("[{}] [{}] snapshot to [{}] ...", shardId, snapshotId, metadata.name()); - store.incRef(); + + final Map blobs; try { - final Map blobs; - try { - blobs = blobContainer.listBlobs(); - } catch (IOException e) { - throw new IndexShardSnapshotFailedException(shardId, "failed to list blobs", e); - } + blobs = blobContainer.listBlobs(); + } catch (IOException e) { + throw new IndexShardSnapshotFailedException(shardId, "failed to list blobs", e); + } - long generation = findLatestFileNameGeneration(blobs); - Tuple tuple = buildBlobStoreIndexShardSnapshots(blobs); - BlobStoreIndexShardSnapshots snapshots = tuple.v1(); - int fileListGeneration = tuple.v2(); + long generation = findLatestFileNameGeneration(blobs); + Tuple tuple = buildBlobStoreIndexShardSnapshots(blobs); + BlobStoreIndexShardSnapshots snapshots = tuple.v1(); + int fileListGeneration = tuple.v2(); - final List indexCommitPointFiles = new ArrayList<>(); + final List indexCommitPointFiles = new ArrayList<>(); + store.incRef(); + try { int indexNumberOfFiles = 0; long indexTotalFilesSize = 0; ArrayList filesToSnapshot = new ArrayList<>(); @@ -1156,10 +1145,11 @@ public void snapshot(IndexCommit snapshotIndexCommit) { throw new IndexShardSnapshotFailedException(shardId, "Failed to get store file metadata", e); } for (String fileName : fileNames) { - if (snapshotStatus.aborted()) { + if (snapshotStatus.isAborted()) { logger.debug("[{}] [{}] Aborted on the file [{}], exiting", shardId, snapshotId, fileName); throw new IndexShardSnapshotFailedException(shardId, "Aborted"); } + logger.trace("[{}] [{}] Processing [{}]", shardId, snapshotId, fileName); final StoreFileMetaData md = metadata.get(fileName); BlobStoreIndexShardSnapshot.FileInfo existingFileInfo = null; @@ -1195,14 +1185,7 @@ public void snapshot(IndexCommit snapshotIndexCommit) { } } - snapshotStatus.files(indexNumberOfFiles, indexTotalFilesSize); - - if (snapshotStatus.aborted()) { - logger.debug("[{}] [{}] Aborted during initialization", shardId, snapshotId); - throw new IndexShardSnapshotFailedException(shardId, "Aborted"); - } - - snapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.STARTED); + snapshotStatus.moveToStarted(startTime, indexNumberOfFiles, indexTotalFilesSize); for (BlobStoreIndexShardSnapshot.FileInfo snapshotFileInfo : filesToSnapshot) { try { @@ -1211,36 +1194,42 @@ public void snapshot(IndexCommit snapshotIndexCommit) { throw new IndexShardSnapshotFailedException(shardId, "Failed to perform snapshot (index files)", e); } } - - snapshotStatus.indexVersion(snapshotIndexCommit.getGeneration()); - // now create and write the commit point - snapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.FINALIZE); - - BlobStoreIndexShardSnapshot snapshot = new BlobStoreIndexShardSnapshot(snapshotId.getName(), - snapshotIndexCommit.getGeneration(), indexCommitPointFiles, snapshotStatus.startTime(), - // snapshotStatus.startTime() is assigned on the same machine, so it's safe to use with VLong - System.currentTimeMillis() - snapshotStatus.startTime(), indexNumberOfFiles, indexTotalFilesSize); - //TODO: The time stored in snapshot doesn't include cleanup time. - logger.trace("[{}] [{}] writing shard snapshot file", shardId, snapshotId); - try { - indexShardSnapshotFormat.write(snapshot, blobContainer, snapshotId.getUUID()); - } catch (IOException e) { - throw new IndexShardSnapshotFailedException(shardId, "Failed to write commit point", e); - } - - // delete all files that are not referenced by any commit point - // build a new BlobStoreIndexShardSnapshot, that includes this one and all the saved ones - List newSnapshotsList = new ArrayList<>(); - newSnapshotsList.add(new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles())); - for (SnapshotFiles point : snapshots) { - newSnapshotsList.add(point); - } - // finalize the snapshot and rewrite the snapshot index with the next sequential snapshot index - finalize(newSnapshotsList, fileListGeneration + 1, blobs); - snapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.DONE); } finally { store.decRef(); } + + final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.moveToFinalize(snapshotIndexCommit.getGeneration()); + + // now create and write the commit point + final BlobStoreIndexShardSnapshot snapshot = new BlobStoreIndexShardSnapshot(snapshotId.getName(), + lastSnapshotStatus.getIndexVersion(), + indexCommitPointFiles, + lastSnapshotStatus.getStartTime(), + // snapshotStatus.startTime() is assigned on the same machine, + // so it's safe to use with VLong + System.currentTimeMillis() - lastSnapshotStatus.getStartTime(), + lastSnapshotStatus.getNumberOfFiles(), + lastSnapshotStatus.getTotalSize()); + + //TODO: The time stored in snapshot doesn't include cleanup time. + logger.trace("[{}] [{}] writing shard snapshot file", shardId, snapshotId); + try { + indexShardSnapshotFormat.write(snapshot, blobContainer, snapshotId.getUUID()); + } catch (IOException e) { + throw new IndexShardSnapshotFailedException(shardId, "Failed to write commit point", e); + } + + // delete all files that are not referenced by any commit point + // build a new BlobStoreIndexShardSnapshot, that includes this one and all the saved ones + List newSnapshotsList = new ArrayList<>(); + newSnapshotsList.add(new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles())); + for (SnapshotFiles point : snapshots) { + newSnapshotsList.add(point); + } + // finalize the snapshot and rewrite the snapshot index with the next sequential snapshot index + finalize(newSnapshotsList, fileListGeneration + 1, blobs); + snapshotStatus.moveToDone(System.currentTimeMillis()); + } /** @@ -1335,7 +1324,7 @@ public int read(byte[] b, int off, int len) throws IOException { } private void checkAborted() { - if (snapshotStatus.aborted()) { + if (snapshotStatus.isAborted()) { logger.debug("[{}] [{}] Aborted on the file [{}], exiting", shardId, snapshotId, fileName); throw new IndexShardSnapshotFailedException(shardId, "Aborted"); } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index 13b1b8ff1c6f9..065f0bf3f8e1b 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -52,7 +52,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.index.engine.Engine; @@ -204,7 +203,7 @@ public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSh Map shards = snapshotShards.getValue().shards; if (shards.containsKey(shardId)) { logger.debug("[{}] shard closing, abort snapshotting for snapshot [{}]", shardId, snapshotShards.getKey().getSnapshotId()); - shards.get(shardId).abort(); + shards.get(shardId).abortIfNotCompleted("shard is closing, aborting"); } } } @@ -246,9 +245,7 @@ private void processIndexShardSnapshots(ClusterChangedEvent event) { // running shards is missed, then the snapshot is removed is a subsequent cluster // state update, which is being processed here for (IndexShardSnapshotStatus snapshotStatus : entry.getValue().shards.values()) { - if (snapshotStatus.stage() == Stage.INIT || snapshotStatus.stage() == Stage.STARTED) { - snapshotStatus.abort(); - } + snapshotStatus.abortIfNotCompleted("snapshot has been removed in cluster state, aborting"); } } } @@ -272,7 +269,7 @@ private void processIndexShardSnapshots(ClusterChangedEvent event) { if (localNodeId.equals(shard.value.nodeId())) { if (shard.value.state() == State.INIT && (snapshotShards == null || !snapshotShards.shards.containsKey(shard.key))) { logger.trace("[{}] - Adding shard to the queue", shard.key); - startedShards.put(shard.key, new IndexShardSnapshotStatus()); + startedShards.put(shard.key, IndexShardSnapshotStatus.newInitializing()); } } } @@ -295,30 +292,26 @@ private void processIndexShardSnapshots(ClusterChangedEvent event) { // Abort all running shards for this snapshot SnapshotShards snapshotShards = shardSnapshots.get(entry.snapshot()); if (snapshotShards != null) { + final String failure = "snapshot has been aborted"; for (ObjectObjectCursor shard : entry.shards()) { IndexShardSnapshotStatus snapshotStatus = snapshotShards.shards.get(shard.key); if (snapshotStatus != null) { - switch (snapshotStatus.stage()) { - case INIT: - case STARTED: - snapshotStatus.abort(); - break; - case FINALIZE: - logger.debug("[{}] trying to cancel snapshot on shard [{}] that is finalizing, " + - "letting it finish", entry.snapshot(), shard.key); - break; - case DONE: - logger.debug("[{}] trying to cancel snapshot on the shard [{}] that is already done, " + - "updating status on the master", entry.snapshot(), shard.key); - notifySuccessfulSnapshotShard(entry.snapshot(), shard.key, localNodeId, masterNode); - break; - case FAILURE: - logger.debug("[{}] trying to cancel snapshot on the shard [{}] that has already failed, " + - "updating status on the master", entry.snapshot(), shard.key); - notifyFailedSnapshotShard(entry.snapshot(), shard.key, localNodeId, snapshotStatus.failure(), masterNode); - break; - default: - throw new IllegalStateException("Unknown snapshot shard stage " + snapshotStatus.stage()); + final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.abortIfNotCompleted(failure); + final Stage stage = lastSnapshotStatus.getStage(); + if (stage == Stage.FINALIZE) { + logger.debug("[{}] trying to cancel snapshot on shard [{}] that is finalizing, " + + "letting it finish", entry.snapshot(), shard.key); + + } else if (stage == Stage.DONE) { + logger.debug("[{}] trying to cancel snapshot on the shard [{}] that is already done, " + + "updating status on the master", entry.snapshot(), shard.key); + notifySuccessfulSnapshotShard(entry.snapshot(), shard.key, localNodeId, masterNode); + + } else if (stage == Stage.FAILURE) { + logger.debug("[{}] trying to cancel snapshot on the shard [{}] that has already failed, " + + "updating status on the master", entry.snapshot(), shard.key); + final String snapshotFailure = lastSnapshotStatus.getFailure(); + notifyFailedSnapshotShard(entry.snapshot(), shard.key, localNodeId, snapshotFailure, masterNode); } } } @@ -418,12 +411,8 @@ private void snapshot(final IndexShard indexShard, final Snapshot snapshot, fina try (Engine.IndexCommitRef snapshotRef = indexShard.acquireIndexCommit(false, true)) { repository.snapshotShard(indexShard, snapshot.getSnapshotId(), indexId, snapshotRef.getIndexCommit(), snapshotStatus); if (logger.isDebugEnabled()) { - StringBuilder details = new StringBuilder(); - details.append(" index : version [").append(snapshotStatus.indexVersion()); - details.append("], number_of_files [").append(snapshotStatus.numberOfFiles()); - details.append("] with total_size [").append(new ByteSizeValue(snapshotStatus.totalSize())).append("]\n"); - logger.debug("snapshot ({}) completed to {}, took [{}]\n{}", snapshot, repository, - TimeValue.timeValueMillis(snapshotStatus.time()), details); + final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.asCopy(); + logger.debug("snapshot ({}) completed to {} with {}", snapshot, repository, lastSnapshotStatus); } } } catch (SnapshotFailedEngineException | IndexShardSnapshotFailedException e) { @@ -454,18 +443,20 @@ private void syncShardStatsOnNewMaster(ClusterChangedEvent event) { IndexShardSnapshotStatus localShardStatus = localShard.getValue(); ShardSnapshotStatus masterShard = masterShards.get(shardId); if (masterShard != null && masterShard.state().completed() == false) { + final IndexShardSnapshotStatus.Copy indexShardSnapshotStatus = localShard.getValue().asCopy(); + final Stage stage = indexShardSnapshotStatus.getStage(); // Master knows about the shard and thinks it has not completed - if (localShardStatus.stage() == Stage.DONE) { + if (stage == Stage.DONE) { // but we think the shard is done - we need to make new master know that the shard is done logger.debug("[{}] new master thinks the shard [{}] is not completed but the shard is done locally, " + "updating status on the master", snapshot.snapshot(), shardId); notifySuccessfulSnapshotShard(snapshot.snapshot(), shardId, localNodeId, masterNode); - } else if (localShard.getValue().stage() == Stage.FAILURE) { + } else if (stage == Stage.FAILURE) { // but we think the shard failed - we need to make new master know that the shard failed logger.debug("[{}] new master thinks the shard [{}] is not completed but the shard failed locally, " + "updating status on master", snapshot.snapshot(), shardId); - final String failure = localShardStatus.failure(); + final String failure = indexShardSnapshotStatus.getFailure(); notifyFailedSnapshotShard(snapshot.snapshot(), shardId, localNodeId, failure, masterNode); } } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 2a48d6a1b71bc..7230ac46e1306 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -607,10 +607,7 @@ public Map snapshotShards(final String reposi ShardId shardId = new ShardId(indexMetaData.getIndex(), i); SnapshotShardFailure shardFailure = findShardFailure(snapshotInfo.shardFailures(), shardId); if (shardFailure != null) { - IndexShardSnapshotStatus shardSnapshotStatus = new IndexShardSnapshotStatus(); - shardSnapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.FAILURE); - shardSnapshotStatus.failure(shardFailure.reason()); - shardStatus.put(shardId, shardSnapshotStatus); + shardStatus.put(shardId, IndexShardSnapshotStatus.newFailed(shardFailure.reason())); } else { final IndexShardSnapshotStatus shardSnapshotStatus; if (snapshotInfo.state() == SnapshotState.FAILED) { @@ -621,9 +618,7 @@ public Map snapshotShards(final String reposi // snapshot status will throw an exception. Instead, we create // a status for the shard to indicate that the shard snapshot // could not be taken due to partial being set to false. - shardSnapshotStatus = new IndexShardSnapshotStatus(); - shardSnapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.FAILURE); - shardSnapshotStatus.failure("skipped"); + shardSnapshotStatus = IndexShardSnapshotStatus.newFailed("skipped"); } else { shardSnapshotStatus = repository.getShardSnapshotStatus( snapshotInfo.snapshotId(), diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java index 651cd96776e75..8431c8fa69f54 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java @@ -93,7 +93,7 @@ public void testRetryPostingSnapshotStatusMessages() throws Exception { assertBusy(() -> { final Snapshot snapshot = new Snapshot("test-repo", snapshotId); List stages = snapshotShardsService.currentSnapshotShards(snapshot) - .values().stream().map(IndexShardSnapshotStatus::stage).collect(Collectors.toList()); + .values().stream().map(status -> status.asCopy().getStage()).collect(Collectors.toList()); assertThat(stages, hasSize(shards)); assertThat(stages, everyItem(equalTo(IndexShardSnapshotStatus.Stage.DONE))); }); diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index e7cb62b421282..2a5ef7e4aacd3 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -620,16 +620,18 @@ protected void recoverShardFromSnapshot(final IndexShard shard, protected void snapshotShard(final IndexShard shard, final Snapshot snapshot, final Repository repository) throws IOException { - final IndexShardSnapshotStatus snapshotStatus = new IndexShardSnapshotStatus(); + final IndexShardSnapshotStatus snapshotStatus = IndexShardSnapshotStatus.newInitializing(); try (Engine.IndexCommitRef indexCommitRef = shard.acquireIndexCommit(false, true)) { Index index = shard.shardId().getIndex(); IndexId indexId = new IndexId(index.getName(), index.getUUID()); repository.snapshotShard(shard, snapshot.getSnapshotId(), indexId, indexCommitRef.getIndexCommit(), snapshotStatus); } - assertEquals(IndexShardSnapshotStatus.Stage.DONE, snapshotStatus.stage()); - assertEquals(shard.snapshotStoreMetadata().size(), snapshotStatus.numberOfFiles()); - assertNull(snapshotStatus.failure()); + + final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.asCopy(); + assertEquals(IndexShardSnapshotStatus.Stage.DONE, lastSnapshotStatus.getStage()); + assertEquals(shard.snapshotStoreMetadata().size(), lastSnapshotStatus.getNumberOfFiles()); + assertNull(lastSnapshotStatus.getFailure()); } /** From d2ff1ae38b3a6bb64c3a1871c8bd388cd3be0f41 Mon Sep 17 00:00:00 2001 From: hanbj Date: Mon, 15 Jan 2018 22:09:27 +0800 Subject: [PATCH 13/31] [Docs] Fix an error in painless-types.asciidoc (#28221) --- docs/painless/painless-types.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/painless/painless-types.asciidoc b/docs/painless/painless-types.asciidoc index 36cf78312ea26..9e5077503b4a8 100644 --- a/docs/painless/painless-types.asciidoc +++ b/docs/painless/painless-types.asciidoc @@ -311,7 +311,7 @@ to floating point types. | int | explicit | explicit | explicit | | implicit | implicit | implicit | long | explicit | explicit | explicit | explicit | | implicit | implicit | float | explicit | explicit | explicit | explicit | explicit | | implicit -| float | explicit | explicit | explicit | explicit | explicit | explicit | +| double | explicit | explicit | explicit | explicit | explicit | explicit | |==== @@ -376,7 +376,7 @@ cast would normally be required between the non-def types. def x; // Declare def variable x and set it to null x = 3; // Set the def variable x to the literal 3 with an implicit // cast from int to def -double a = x; // Declare double variable y and set it to def variable x, +double a = x; // Declare double variable a and set it to def variable x, // which contains a double int b = x; // ERROR: Results in a run-time error because an explicit cast is // required to cast from a double to an int From 938b7b9594a658c260b805208c6cef9d1c5f322f Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Mon, 15 Jan 2018 16:47:46 +0100 Subject: [PATCH 14/31] upgrade to lucene 7.2.1 (#28218) --- buildSrc/version.properties | 2 +- docs/Versions.asciidoc | 2 +- .../lang-expression/licenses/lucene-expressions-7.2.0.jar.sha1 | 1 - .../lang-expression/licenses/lucene-expressions-7.2.1.jar.sha1 | 1 + .../analysis-icu/licenses/lucene-analyzers-icu-7.2.0.jar.sha1 | 1 - .../analysis-icu/licenses/lucene-analyzers-icu-7.2.1.jar.sha1 | 1 + .../licenses/lucene-analyzers-kuromoji-7.2.0.jar.sha1 | 1 - .../licenses/lucene-analyzers-kuromoji-7.2.1.jar.sha1 | 1 + .../licenses/lucene-analyzers-phonetic-7.2.0.jar.sha1 | 1 - .../licenses/lucene-analyzers-phonetic-7.2.1.jar.sha1 | 1 + .../licenses/lucene-analyzers-smartcn-7.2.0.jar.sha1 | 1 - .../licenses/lucene-analyzers-smartcn-7.2.1.jar.sha1 | 1 + .../licenses/lucene-analyzers-stempel-7.2.0.jar.sha1 | 1 - .../licenses/lucene-analyzers-stempel-7.2.1.jar.sha1 | 1 + .../licenses/lucene-analyzers-morfologik-7.2.0.jar.sha1 | 1 - .../licenses/lucene-analyzers-morfologik-7.2.1.jar.sha1 | 1 + server/licenses/lucene-analyzers-common-7.2.0.jar.sha1 | 1 - server/licenses/lucene-analyzers-common-7.2.1.jar.sha1 | 1 + server/licenses/lucene-backward-codecs-7.2.0.jar.sha1 | 1 - server/licenses/lucene-backward-codecs-7.2.1.jar.sha1 | 1 + server/licenses/lucene-core-7.2.0.jar.sha1 | 1 - server/licenses/lucene-core-7.2.1.jar.sha1 | 1 + server/licenses/lucene-grouping-7.2.0.jar.sha1 | 1 - server/licenses/lucene-grouping-7.2.1.jar.sha1 | 1 + server/licenses/lucene-highlighter-7.2.0.jar.sha1 | 1 - server/licenses/lucene-highlighter-7.2.1.jar.sha1 | 1 + server/licenses/lucene-join-7.2.0.jar.sha1 | 1 - server/licenses/lucene-join-7.2.1.jar.sha1 | 1 + server/licenses/lucene-memory-7.2.0.jar.sha1 | 1 - server/licenses/lucene-memory-7.2.1.jar.sha1 | 1 + server/licenses/lucene-misc-7.2.0.jar.sha1 | 1 - server/licenses/lucene-misc-7.2.1.jar.sha1 | 1 + server/licenses/lucene-queries-7.2.0.jar.sha1 | 1 - server/licenses/lucene-queries-7.2.1.jar.sha1 | 1 + server/licenses/lucene-queryparser-7.2.0.jar.sha1 | 1 - server/licenses/lucene-queryparser-7.2.1.jar.sha1 | 1 + server/licenses/lucene-sandbox-7.2.0.jar.sha1 | 1 - server/licenses/lucene-sandbox-7.2.1.jar.sha1 | 1 + server/licenses/lucene-spatial-7.2.0.jar.sha1 | 1 - server/licenses/lucene-spatial-7.2.1.jar.sha1 | 1 + server/licenses/lucene-spatial-extras-7.2.0.jar.sha1 | 1 - server/licenses/lucene-spatial-extras-7.2.1.jar.sha1 | 1 + server/licenses/lucene-spatial3d-7.2.0.jar.sha1 | 1 - server/licenses/lucene-spatial3d-7.2.1.jar.sha1 | 1 + server/licenses/lucene-suggest-7.2.0.jar.sha1 | 1 - server/licenses/lucene-suggest-7.2.1.jar.sha1 | 1 + server/src/main/java/org/elasticsearch/Version.java | 2 +- 47 files changed, 25 insertions(+), 25 deletions(-) delete mode 100644 modules/lang-expression/licenses/lucene-expressions-7.2.0.jar.sha1 create mode 100644 modules/lang-expression/licenses/lucene-expressions-7.2.1.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analyzers-icu-7.2.0.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analyzers-icu-7.2.1.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.2.0.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.2.1.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.2.0.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.2.1.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.2.0.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.2.1.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.2.0.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.2.1.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.2.0.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-analyzers-common-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-analyzers-common-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-backward-codecs-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-backward-codecs-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-core-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-core-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-grouping-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-grouping-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-highlighter-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-highlighter-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-join-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-join-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-memory-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-memory-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-misc-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-misc-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-queries-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-queries-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-queryparser-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-queryparser-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-sandbox-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-sandbox-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-spatial-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-extras-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-spatial-extras-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-spatial3d-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-spatial3d-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-suggest-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-suggest-7.2.1.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 32d1c8a044321..605c3da1854d3 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ elasticsearch = 6.2.0 -lucene = 7.2.0 +lucene = 7.2.1 # optional dependencies spatial4j = 0.6 diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index 71d816356b057..dfbbd4ac04bdc 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -1,6 +1,6 @@ :version: 6.2.0 :major-version: 6.x -:lucene_version: 7.2.0 +:lucene_version: 7.2.1 :lucene_version_path: 7_2_0 :branch: 6.x :jdk: 1.8.0_131 diff --git a/modules/lang-expression/licenses/lucene-expressions-7.2.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.2.0.jar.sha1 deleted file mode 100644 index 0e903acab596e..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -848eda48b43c30a7c7e38fa50182a7e866460e95 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-7.2.1.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..a57efa8c26aa6 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-7.2.1.jar.sha1 @@ -0,0 +1 @@ +51fbb33cdb17bb36a0e86485685bba18eb1c2ccf \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.2.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.2.0.jar.sha1 deleted file mode 100644 index 8c744b138d9b4..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -726e5cf3515ba765f5f326cdced8abaaa64da875 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.2.1.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..fb8e4b0167bf5 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.2.1.jar.sha1 @@ -0,0 +1 @@ +cfdfcd54c052cdd08140c7cd4daa7929b9657da0 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.2.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.2.0.jar.sha1 deleted file mode 100644 index 72de0db978a26..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -879c63f60c20d9f0f2a106062ad2512158007108 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.2.1.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..f8c67b9480380 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.2.1.jar.sha1 @@ -0,0 +1 @@ +21418892a16434ecb4f8efdbf4e62838f58a6a59 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.2.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.2.0.jar.sha1 deleted file mode 100644 index fe98e5ed6ba59..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bdf0ae30f09641d2c0b098c3b7a340d59a7ab4b1 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.2.1.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..2443de6a49b0a --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.2.1.jar.sha1 @@ -0,0 +1 @@ +970e860a6e252e7c1dc117c45176a847ce961ffc \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.2.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.2.0.jar.sha1 deleted file mode 100644 index e019470764969..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -575096198d49aad52d2e12eb4d43dd547747dd7d \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.2.1.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..1c301d32445ec --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.2.1.jar.sha1 @@ -0,0 +1 @@ +ec08375a8392720cc378995d8234cd6138a735f6 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.2.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.2.0.jar.sha1 deleted file mode 100644 index 83c0a09eed763..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b0f748e15d3b6b8abbe654ba48ca7cbbebcfb98a \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.2.1.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..4833879967b8e --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.2.1.jar.sha1 @@ -0,0 +1 @@ +58305876f7fb0fbfad288910378cf4770da43892 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.2.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.2.0.jar.sha1 deleted file mode 100644 index b7453ece71681..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -547938ebce6a7ea4308c4753e28c39d09e4c7423 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.2.1.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..dc33291c7a3cb --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.2.1.jar.sha1 @@ -0,0 +1 @@ +51cf40e2606863840e52d7e8981314a5a0323e06 \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-7.2.0.jar.sha1 b/server/licenses/lucene-analyzers-common-7.2.0.jar.sha1 deleted file mode 100644 index 2ca17a5b5c1ab..0000000000000 --- a/server/licenses/lucene-analyzers-common-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4e1b4638fb8b07befc8175880641f821af3e655a \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-7.2.1.jar.sha1 b/server/licenses/lucene-analyzers-common-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..5ffdd6b7ba4cf --- /dev/null +++ b/server/licenses/lucene-analyzers-common-7.2.1.jar.sha1 @@ -0,0 +1 @@ +324c3a090a04136720f4ef612db03b5c14866efa \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-7.2.0.jar.sha1 b/server/licenses/lucene-backward-codecs-7.2.0.jar.sha1 deleted file mode 100644 index f53f41fd9f865..0000000000000 --- a/server/licenses/lucene-backward-codecs-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -35f5a26abb7fd466749fea7edfedae7897192e95 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-7.2.1.jar.sha1 b/server/licenses/lucene-backward-codecs-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..b166b97dd7c4d --- /dev/null +++ b/server/licenses/lucene-backward-codecs-7.2.1.jar.sha1 @@ -0,0 +1 @@ +bc8dc9cc1555543532953d1dff33b67f849e19f9 \ No newline at end of file diff --git a/server/licenses/lucene-core-7.2.0.jar.sha1 b/server/licenses/lucene-core-7.2.0.jar.sha1 deleted file mode 100644 index 41e1103ca2570..0000000000000 --- a/server/licenses/lucene-core-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f88107aa577ce8edc0a5cee036b485943107a552 \ No newline at end of file diff --git a/server/licenses/lucene-core-7.2.1.jar.sha1 b/server/licenses/lucene-core-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..e2fd2d7533737 --- /dev/null +++ b/server/licenses/lucene-core-7.2.1.jar.sha1 @@ -0,0 +1 @@ +91897dbbbbada95ccddbd90505f0a0ba6bf7c199 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-7.2.0.jar.sha1 b/server/licenses/lucene-grouping-7.2.0.jar.sha1 deleted file mode 100644 index 034534ffef35a..0000000000000 --- a/server/licenses/lucene-grouping-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1536a1a0fd24d0a8c03cfd45d00a52a88f9f52d1 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-7.2.1.jar.sha1 b/server/licenses/lucene-grouping-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..7537cd21bf326 --- /dev/null +++ b/server/licenses/lucene-grouping-7.2.1.jar.sha1 @@ -0,0 +1 @@ +5dbae570b1a4e54cd978fe5c3ed2d6b2f87be968 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-7.2.0.jar.sha1 b/server/licenses/lucene-highlighter-7.2.0.jar.sha1 deleted file mode 100644 index f13d7cc8489bf..0000000000000 --- a/server/licenses/lucene-highlighter-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -afd4093723520b0cdb59852018b545efeefd544a \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-7.2.1.jar.sha1 b/server/licenses/lucene-highlighter-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..38837afb0a623 --- /dev/null +++ b/server/licenses/lucene-highlighter-7.2.1.jar.sha1 @@ -0,0 +1 @@ +2f4b8c93563409cfebb36d910c4dab4910678689 \ No newline at end of file diff --git a/server/licenses/lucene-join-7.2.0.jar.sha1 b/server/licenses/lucene-join-7.2.0.jar.sha1 deleted file mode 100644 index 8cc521e31a007..0000000000000 --- a/server/licenses/lucene-join-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -16029d54fa9c99b3187b68791b182a1ea4f78e89 \ No newline at end of file diff --git a/server/licenses/lucene-join-7.2.1.jar.sha1 b/server/licenses/lucene-join-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..c2944aa323e2f --- /dev/null +++ b/server/licenses/lucene-join-7.2.1.jar.sha1 @@ -0,0 +1 @@ +3121a038d472f51087500dd6da9146a9b0031ae4 \ No newline at end of file diff --git a/server/licenses/lucene-memory-7.2.0.jar.sha1 b/server/licenses/lucene-memory-7.2.0.jar.sha1 deleted file mode 100644 index a267d12bd71ba..0000000000000 --- a/server/licenses/lucene-memory-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -32f26371224c595f625f061d67fc2edd9c8c836b \ No newline at end of file diff --git a/server/licenses/lucene-memory-7.2.1.jar.sha1 b/server/licenses/lucene-memory-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..543e123b2a733 --- /dev/null +++ b/server/licenses/lucene-memory-7.2.1.jar.sha1 @@ -0,0 +1 @@ +21233b2baeed2aaa5acf8359bf8c4a90cc6bf553 \ No newline at end of file diff --git a/server/licenses/lucene-misc-7.2.0.jar.sha1 b/server/licenses/lucene-misc-7.2.0.jar.sha1 deleted file mode 100644 index d378ea1ae2cc2..0000000000000 --- a/server/licenses/lucene-misc-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1067351bfca1fc72ece5cb4a4f219762b097de36 \ No newline at end of file diff --git a/server/licenses/lucene-misc-7.2.1.jar.sha1 b/server/licenses/lucene-misc-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..2a9f649d7d527 --- /dev/null +++ b/server/licenses/lucene-misc-7.2.1.jar.sha1 @@ -0,0 +1 @@ +0478fed6c474c95f6c0c678c04297a3df0c1687e \ No newline at end of file diff --git a/server/licenses/lucene-queries-7.2.0.jar.sha1 b/server/licenses/lucene-queries-7.2.0.jar.sha1 deleted file mode 100644 index 04b1048ee15dc..0000000000000 --- a/server/licenses/lucene-queries-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e0b41af59bc2baed0315abb04621d62e500d094a \ No newline at end of file diff --git a/server/licenses/lucene-queries-7.2.1.jar.sha1 b/server/licenses/lucene-queries-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..e0f2d575e8a2a --- /dev/null +++ b/server/licenses/lucene-queries-7.2.1.jar.sha1 @@ -0,0 +1 @@ +02135cf5047409ed1ca6cd098e802b30f9dbd1ff \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-7.2.0.jar.sha1 b/server/licenses/lucene-queryparser-7.2.0.jar.sha1 deleted file mode 100644 index bedb4fbd1448b..0000000000000 --- a/server/licenses/lucene-queryparser-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9a17128e35e5e924cf28c283415d83c7a8935e58 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-7.2.1.jar.sha1 b/server/licenses/lucene-queryparser-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..56c5dbfa18678 --- /dev/null +++ b/server/licenses/lucene-queryparser-7.2.1.jar.sha1 @@ -0,0 +1 @@ +a87d8b14d1c8045f61cb704955706f6681170be3 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-7.2.0.jar.sha1 b/server/licenses/lucene-sandbox-7.2.0.jar.sha1 deleted file mode 100644 index 62704a0258e92..0000000000000 --- a/server/licenses/lucene-sandbox-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1fa77169831ec17636357b55bd2c8ca5a97ec7a2 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-7.2.1.jar.sha1 b/server/licenses/lucene-sandbox-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..9445acbdd87d8 --- /dev/null +++ b/server/licenses/lucene-sandbox-7.2.1.jar.sha1 @@ -0,0 +1 @@ +dc8dd132fd183791dc27591a69974f55b685d0d7 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-7.2.0.jar.sha1 b/server/licenses/lucene-spatial-7.2.0.jar.sha1 deleted file mode 100644 index adcb3b8de7603..0000000000000 --- a/server/licenses/lucene-spatial-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -575f7507d526b2692ae461a4df349e90f048ec77 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-7.2.1.jar.sha1 b/server/licenses/lucene-spatial-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..8c1b3d01c2339 --- /dev/null +++ b/server/licenses/lucene-spatial-7.2.1.jar.sha1 @@ -0,0 +1 @@ +09c4d96e6ea34292f7cd20c4ff1d16ff31eb7869 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-7.2.0.jar.sha1 b/server/licenses/lucene-spatial-extras-7.2.0.jar.sha1 deleted file mode 100644 index b9c4e84c78eb0..0000000000000 --- a/server/licenses/lucene-spatial-extras-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8f6e31d08dc86bb3edeb6ef132f0920941735e15 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-7.2.1.jar.sha1 b/server/licenses/lucene-spatial-extras-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..50422956651d3 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-7.2.1.jar.sha1 @@ -0,0 +1 @@ +8aff7e8a5547c03d0c4e7e1b58cb30773bb1d7d5 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-7.2.0.jar.sha1 b/server/licenses/lucene-spatial3d-7.2.0.jar.sha1 deleted file mode 100644 index 225d318bcda9d..0000000000000 --- a/server/licenses/lucene-spatial3d-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f857630bfafde418e6e3cf748fe8d18f7b771a70 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-7.2.1.jar.sha1 b/server/licenses/lucene-spatial3d-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..85aae1cfdd053 --- /dev/null +++ b/server/licenses/lucene-spatial3d-7.2.1.jar.sha1 @@ -0,0 +1 @@ +8b0db8ff795b31994ebe93779c450d17c612590d \ No newline at end of file diff --git a/server/licenses/lucene-suggest-7.2.0.jar.sha1 b/server/licenses/lucene-suggest-7.2.0.jar.sha1 deleted file mode 100644 index f99189e7b9aae..0000000000000 --- a/server/licenses/lucene-suggest-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0409ce8d0d7e1203143b5be41aa6dd31d4c1bcf9 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-7.2.1.jar.sha1 b/server/licenses/lucene-suggest-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..e46240d1c6287 --- /dev/null +++ b/server/licenses/lucene-suggest-7.2.1.jar.sha1 @@ -0,0 +1 @@ +1c3804602e35589c21b0391fa7088ef012751a22 \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 3db2a6fea9fbb..fa9741304fa47 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -139,7 +139,7 @@ public class Version implements Comparable { public static final int V_6_1_2_ID = 6010299; public static final Version V_6_1_2 = new Version(V_6_1_2_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); public static final int V_6_2_0_ID = 6020099; - public static final Version V_6_2_0 = new Version(V_6_2_0_ID, org.apache.lucene.util.Version.LUCENE_7_2_0); + public static final Version V_6_2_0 = new Version(V_6_2_0_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); public static final Version CURRENT = V_6_2_0; static { From 9334fa0fe1fe947d3cd52b0b9b71886f363dc1f4 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Mon, 15 Jan 2018 09:59:01 -0700 Subject: [PATCH 15/31] Introduce elasticsearch-core jar (#28191) This is related to #27933. It introduces a jar named elasticsearch-core in the lib directory. This commit moves the JarHell class from server to elasticsearch-core. Additionally, PathUtils and some of Loggers are moved as JarHell depends on them. --- build.gradle | 1 + client/rest/build.gradle | 1 + client/sniffer/build.gradle | 1 + client/test/build.gradle | 1 + libs/elasticsearch-core/build.gradle | 81 +++++++ .../licenses/log4j-api-2.9.1.jar.sha1 | 1 + .../licenses/log4j-api-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/log4j-api-NOTICE.txt | 5 + .../org/elasticsearch/bootstrap/JarHell.java | 19 +- .../elasticsearch/bootstrap/JavaVersion.java | 1 + .../common/SuppressForbidden.java | 0 .../elasticsearch/common/io/PathUtils.java | 0 .../common/logging/ESLoggerFactory.java | 9 - .../elasticsearch/common/logging/Loggers.java | 69 ++++++ .../common/logging/PrefixLogger.java | 2 +- .../elasticsearch/bootstrap/JarHellTests.java | 31 +-- .../bootstrap/duplicate-classes.jar | Bin .../bootstrap/duplicate-xmlbeans-classes.jar | Bin .../transport/netty4/ESLoggingHandlerIT.java | 5 +- .../logging/EvilLoggerConfigurationTests.java | 2 +- .../common/logging/EvilLoggerTests.java | 6 +- server/build.gradle | 2 + .../org/elasticsearch/action/bulk/Retry.java | 4 +- .../elasticsearch/bootstrap/Bootstrap.java | 11 +- .../common/component/AbstractComponent.java | 4 +- .../common/logging/LogConfigurator.java | 12 +- .../{Loggers.java => ServerLoggers.java} | 61 +----- .../common/settings/ClusterSettings.java | 18 +- .../common/settings/SettingsModule.java | 5 +- .../discovery/DiscoveryModule.java | 4 +- .../elasticsearch/env/NodeEnvironment.java | 8 +- .../index/AbstractIndexComponent.java | 4 +- .../index/CompositeIndexEventListener.java | 4 +- .../elasticsearch/index/IndexSettings.java | 4 +- .../elasticsearch/index/IndexingSlowLog.java | 6 +- .../elasticsearch/index/SearchSlowLog.java | 10 +- .../index/analysis/AnalysisRegistry.java | 3 +- ...ElasticsearchConcurrentMergeScheduler.java | 4 +- .../elasticsearch/index/engine/Engine.java | 4 +- .../plain/DocValuesIndexFieldData.java | 2 - .../RandomScoreFunctionBuilder.java | 2 +- .../shard/AbstractIndexShardComponent.java | 4 +- .../org/elasticsearch/index/store/Store.java | 4 +- .../recovery/RecoverySourceHandler.java | 4 +- .../indices/recovery/RecoveryTarget.java | 4 +- .../java/org/elasticsearch/node/Node.java | 4 +- .../bucket/terms/TermsAggregatorFactory.java | 4 +- .../bootstrap/MaxMapCountCheckTests.java | 10 +- .../cluster/allocation/ClusterRerouteIT.java | 9 +- .../metadata/TemplateUpgradeServiceIT.java | 4 +- .../ExpectedShardSizeAllocationTests.java | 1 - .../allocation/FailedNodeRoutingTests.java | 6 - .../allocation/RebalanceAfterActiveTests.java | 1 - .../service/ClusterApplierServiceTests.java | 10 +- .../cluster/service/MasterServiceTests.java | 9 +- .../common/settings/ScopedSettingsTests.java | 8 +- .../gateway/GatewayIndexStateIT.java | 3 - .../index/MergeSchedulerSettingsTests.java | 17 +- .../index/engine/InternalEngineTests.java | 21 +- settings.gradle | 1 + .../index/store/EsBaseDirectoryTestCase.java | 1 - .../org/elasticsearch/test/TestCluster.java | 1 - .../test/engine/MockEngineSupport.java | 1 - .../test/junit/listeners/LoggingListener.java | 5 +- .../elasticsearch/test/rest/yaml/Stash.java | 1 - .../test/store/MockFSIndexStore.java | 4 +- 66 files changed, 509 insertions(+), 237 deletions(-) create mode 100644 libs/elasticsearch-core/build.gradle create mode 100644 libs/elasticsearch-core/licenses/log4j-api-2.9.1.jar.sha1 create mode 100644 libs/elasticsearch-core/licenses/log4j-api-LICENSE.txt create mode 100644 libs/elasticsearch-core/licenses/log4j-api-NOTICE.txt rename {server => libs/elasticsearch-core}/src/main/java/org/elasticsearch/bootstrap/JarHell.java (94%) rename {server => libs/elasticsearch-core}/src/main/java/org/elasticsearch/bootstrap/JavaVersion.java (99%) rename {server => libs/elasticsearch-core}/src/main/java/org/elasticsearch/common/SuppressForbidden.java (100%) rename {server => libs/elasticsearch-core}/src/main/java/org/elasticsearch/common/io/PathUtils.java (100%) rename {server => libs/elasticsearch-core}/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java (80%) create mode 100644 libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/Loggers.java rename {server => libs/elasticsearch-core}/src/main/java/org/elasticsearch/common/logging/PrefixLogger.java (98%) rename {server => libs/elasticsearch-core}/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java (88%) rename {server => libs/elasticsearch-core}/src/test/resources/org/elasticsearch/bootstrap/duplicate-classes.jar (100%) rename {server => libs/elasticsearch-core}/src/test/resources/org/elasticsearch/bootstrap/duplicate-xmlbeans-classes.jar (100%) rename server/src/main/java/org/elasticsearch/common/logging/{Loggers.java => ServerLoggers.java} (76%) diff --git a/build.gradle b/build.gradle index c4ef2b3588029..b48b473b07615 100644 --- a/build.gradle +++ b/build.gradle @@ -183,6 +183,7 @@ subprojects { "org.elasticsearch:rest-api-spec:${version}": ':rest-api-spec', "org.elasticsearch:elasticsearch:${version}": ':server', "org.elasticsearch:elasticsearch-cli:${version}": ':server:cli', + "org.elasticsearch:elasticsearch-core:${version}": ':libs:elasticsearch-core', "org.elasticsearch.client:elasticsearch-rest-client:${version}": ':client:rest', "org.elasticsearch.client:elasticsearch-rest-client-sniffer:${version}": ':client:sniffer', "org.elasticsearch.client:elasticsearch-rest-high-level-client:${version}": ':client:rest-high-level', diff --git a/client/rest/build.gradle b/client/rest/build.gradle index 1c7e86f799f61..8e0f179634a27 100644 --- a/client/rest/build.gradle +++ b/client/rest/build.gradle @@ -72,6 +72,7 @@ forbiddenApisTest { } // JarHell is part of es server, which we don't want to pull in +// TODO: Not anymore. Now in elasticsearch-core jarHell.enabled=false namingConventions { diff --git a/client/sniffer/build.gradle b/client/sniffer/build.gradle index bcde806f4df16..03e4a082d274c 100644 --- a/client/sniffer/build.gradle +++ b/client/sniffer/build.gradle @@ -75,6 +75,7 @@ dependencyLicenses { } // JarHell is part of es server, which we don't want to pull in +// TODO: Not anymore. Now in elasticsearch-core jarHell.enabled=false namingConventions { diff --git a/client/test/build.gradle b/client/test/build.gradle index ccc7be81466a4..fd5777cc8df3f 100644 --- a/client/test/build.gradle +++ b/client/test/build.gradle @@ -49,6 +49,7 @@ forbiddenApisTest { } // JarHell is part of es server, which we don't want to pull in +// TODO: Not anymore. Now in elasticsearch-core jarHell.enabled=false // TODO: should we have licenses for our test deps? diff --git a/libs/elasticsearch-core/build.gradle b/libs/elasticsearch-core/build.gradle new file mode 100644 index 0000000000000..4cbee03649bb7 --- /dev/null +++ b/libs/elasticsearch-core/build.gradle @@ -0,0 +1,81 @@ +import org.elasticsearch.gradle.precommit.PrecommitTasks + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +apply plugin: 'elasticsearch.build' +apply plugin: 'nebula.optional-base' +apply plugin: 'nebula.maven-base-publish' +apply plugin: 'nebula.maven-scm' + +archivesBaseName = 'elasticsearch-core' + +publishing { + publications { + nebula { + artifactId = archivesBaseName + } + } +} + +dependencies { + compile "org.apache.logging.log4j:log4j-api:${versions.log4j}" + + testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" + testCompile "junit:junit:${versions.junit}" + testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" + + if (isEclipse == false || project.path == ":libs:elasticsearch-core-tests") { + testCompile("org.elasticsearch.test:framework:${version}") { + exclude group: 'org.elasticsearch', module: 'elasticsearch-core' + } + } +} + +forbiddenApisMain { + // elasticsearch-core does not depend on server + // TODO: Need to decide how we want to handle for forbidden signatures with the changes to core + signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] +} + +if (isEclipse) { + // in eclipse the project is under a fake root, we need to change around the source sets + sourceSets { + if (project.path == ":libs:elasticsearch-core") { + main.java.srcDirs = ['java'] + main.resources.srcDirs = ['resources'] + } else { + test.java.srcDirs = ['java'] + test.resources.srcDirs = ['resources'] + } + } +} + +thirdPartyAudit.excludes = [ + // from log4j + 'org/osgi/framework/AdaptPermission', + 'org/osgi/framework/AdminPermission', + 'org/osgi/framework/Bundle', + 'org/osgi/framework/BundleActivator', + 'org/osgi/framework/BundleContext', + 'org/osgi/framework/BundleEvent', + 'org/osgi/framework/SynchronousBundleListener', + 'org/osgi/framework/wiring/BundleWire', + 'org/osgi/framework/wiring/BundleWiring' +] \ No newline at end of file diff --git a/libs/elasticsearch-core/licenses/log4j-api-2.9.1.jar.sha1 b/libs/elasticsearch-core/licenses/log4j-api-2.9.1.jar.sha1 new file mode 100644 index 0000000000000..e1a89fadfed95 --- /dev/null +++ b/libs/elasticsearch-core/licenses/log4j-api-2.9.1.jar.sha1 @@ -0,0 +1 @@ +7a2999229464e7a324aa503c0a52ec0f05efe7bd \ No newline at end of file diff --git a/libs/elasticsearch-core/licenses/log4j-api-LICENSE.txt b/libs/elasticsearch-core/licenses/log4j-api-LICENSE.txt new file mode 100644 index 0000000000000..6279e5206de13 --- /dev/null +++ b/libs/elasticsearch-core/licenses/log4j-api-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 1999-2005 The Apache Software Foundation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/libs/elasticsearch-core/licenses/log4j-api-NOTICE.txt b/libs/elasticsearch-core/licenses/log4j-api-NOTICE.txt new file mode 100644 index 0000000000000..0375732360047 --- /dev/null +++ b/libs/elasticsearch-core/licenses/log4j-api-NOTICE.txt @@ -0,0 +1,5 @@ +Apache log4j +Copyright 2007 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/bootstrap/JarHell.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/bootstrap/JarHell.java similarity index 94% rename from server/src/main/java/org/elasticsearch/bootstrap/JarHell.java rename to libs/elasticsearch-core/src/main/java/org/elasticsearch/bootstrap/JarHell.java index 1959e5e81394b..0e5c9597b7ec8 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/JarHell.java +++ b/libs/elasticsearch-core/src/main/java/org/elasticsearch/bootstrap/JarHell.java @@ -20,7 +20,6 @@ package org.elasticsearch.bootstrap; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.logging.Loggers; @@ -120,7 +119,8 @@ static Set parseClassPath(String classPath) { // } // Instead we just throw an exception, and keep it clean. if (element.isEmpty()) { - throw new IllegalStateException("Classpath should not contain empty elements! (outdated shell script from a previous version?) classpath='" + classPath + "'"); + throw new IllegalStateException("Classpath should not contain empty elements! (outdated shell script from a previous" + + " version?) classpath='" + classPath + "'"); } // we should be able to just Paths.get() each element, but unfortunately this is not the // whole story on how classpath parsing works: if you want to know, start at sun.misc.Launcher, @@ -215,21 +215,13 @@ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IO } /** inspect manifest for sure incompatibilities */ - static void checkManifest(Manifest manifest, Path jar) { + private static void checkManifest(Manifest manifest, Path jar) { // give a nice error if jar requires a newer java version String targetVersion = manifest.getMainAttributes().getValue("X-Compile-Target-JDK"); if (targetVersion != null) { checkVersionFormat(targetVersion); checkJavaVersion(jar.toString(), targetVersion); } - - // give a nice error if jar is compiled against different es version - String systemESVersion = Version.CURRENT.toString(); - String targetESVersion = manifest.getMainAttributes().getValue("X-Compile-Elasticsearch-Version"); - if (targetESVersion != null && targetESVersion.equals(systemESVersion) == false) { - throw new IllegalStateException(jar + " requires Elasticsearch " + targetESVersion - + ", your system: " + systemESVersion); - } } public static void checkVersionFormat(String targetVersion) { @@ -237,7 +229,8 @@ public static void checkVersionFormat(String targetVersion) { throw new IllegalStateException( String.format( Locale.ROOT, - "version string must be a sequence of nonnegative decimal integers separated by \".\"'s and may have leading zeros but was %s", + "version string must be a sequence of nonnegative decimal integers separated by \".\"'s and may have " + + "leading zeros but was %s", targetVersion ) ); @@ -263,7 +256,7 @@ public static void checkJavaVersion(String resource, String targetVersion) { } } - static void checkClass(Map clazzes, String clazz, Path jarpath) { + private static void checkClass(Map clazzes, String clazz, Path jarpath) { Path previous = clazzes.put(clazz, jarpath); if (previous != null) { if (previous.equals(jarpath)) { diff --git a/server/src/main/java/org/elasticsearch/bootstrap/JavaVersion.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/bootstrap/JavaVersion.java similarity index 99% rename from server/src/main/java/org/elasticsearch/bootstrap/JavaVersion.java rename to libs/elasticsearch-core/src/main/java/org/elasticsearch/bootstrap/JavaVersion.java index 03722e03060a7..f22087c6e7d8d 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/JavaVersion.java +++ b/libs/elasticsearch-core/src/main/java/org/elasticsearch/bootstrap/JavaVersion.java @@ -26,6 +26,7 @@ import java.util.stream.Collectors; public class JavaVersion implements Comparable { + private final List version; public List getVersion() { diff --git a/server/src/main/java/org/elasticsearch/common/SuppressForbidden.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/SuppressForbidden.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/SuppressForbidden.java rename to libs/elasticsearch-core/src/main/java/org/elasticsearch/common/SuppressForbidden.java diff --git a/server/src/main/java/org/elasticsearch/common/io/PathUtils.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/io/PathUtils.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/io/PathUtils.java rename to libs/elasticsearch-core/src/main/java/org/elasticsearch/common/io/PathUtils.java diff --git a/server/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java similarity index 80% rename from server/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java rename to libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java index d8f2ebe9be843..44d7d17b59325 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java +++ b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java @@ -19,12 +19,9 @@ package org.elasticsearch.common.logging; -import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.spi.ExtendedLogger; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Property; /** * Factory to get {@link Logger}s @@ -35,12 +32,6 @@ private ESLoggerFactory() { } - public static final Setting LOG_DEFAULT_LEVEL_SETTING = - new Setting<>("logger.level", Level.INFO.name(), Level::valueOf, Property.NodeScope); - public static final Setting.AffixSetting LOG_LEVEL_SETTING = - Setting.prefixKeySetting("logger.", (key) -> new Setting<>(key, Level.INFO.name(), Level::valueOf, Property.Dynamic, - Property.NodeScope)); - public static Logger getLogger(String prefix, String name) { return getLogger(prefix, LogManager.getLogger(name)); } diff --git a/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/Loggers.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/Loggers.java new file mode 100644 index 0000000000000..89073bdce54c4 --- /dev/null +++ b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/Loggers.java @@ -0,0 +1,69 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.apache.logging.log4j.Logger; + +public class Loggers { + + public static final String SPACE = " "; + + public static Logger getLogger(Logger parentLogger, String s) { + assert parentLogger instanceof PrefixLogger; + return ESLoggerFactory.getLogger(((PrefixLogger)parentLogger).prefix(), parentLogger.getName() + s); + } + + public static Logger getLogger(String s) { + return ESLoggerFactory.getLogger(s); + } + + public static Logger getLogger(Class clazz) { + return ESLoggerFactory.getLogger(clazz); + } + + public static Logger getLogger(Class clazz, String... prefixes) { + return ESLoggerFactory.getLogger(formatPrefix(prefixes), clazz); + } + + public static Logger getLogger(String name, String... prefixes) { + return ESLoggerFactory.getLogger(formatPrefix(prefixes), name); + } + + private static String formatPrefix(String... prefixes) { + String prefix = null; + if (prefixes != null && prefixes.length > 0) { + StringBuilder sb = new StringBuilder(); + for (String prefixX : prefixes) { + if (prefixX != null) { + if (prefixX.equals(SPACE)) { + sb.append(" "); + } else { + sb.append("[").append(prefixX).append("]"); + } + } + } + if (sb.length() > 0) { + sb.append(" "); + prefix = sb.toString(); + } + } + return prefix; + } +} diff --git a/server/src/main/java/org/elasticsearch/common/logging/PrefixLogger.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/PrefixLogger.java similarity index 98% rename from server/src/main/java/org/elasticsearch/common/logging/PrefixLogger.java rename to libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/PrefixLogger.java index a78330c3e8564..b24e839690366 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/PrefixLogger.java +++ b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/PrefixLogger.java @@ -32,7 +32,7 @@ * A logger that prefixes all messages with a fixed prefix specified during construction. The prefix mechanism uses the marker construct, so * for the prefixes to appear, the logging layout pattern must include the marker in its pattern. */ -class PrefixLogger extends ExtendedLoggerWrapper { +public class PrefixLogger extends ExtendedLoggerWrapper { /* * We can not use the built-in Marker tracking (MarkerManager) because the MarkerManager holds a permanent reference to the marker; diff --git a/server/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java b/libs/elasticsearch-core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java similarity index 88% rename from server/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java rename to libs/elasticsearch-core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java index 7003ef3d81efe..b3dee0b004584 100644 --- a/server/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java +++ b/libs/elasticsearch-core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.bootstrap; -import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.test.ESTestCase; @@ -164,7 +163,8 @@ public void testBadJDKVersionInJar() throws Exception { JarHell.checkJarHell(jars); fail("did not get expected exception"); } catch (IllegalStateException e) { - assertTrue(e.getMessage().equals("version string must be a sequence of nonnegative decimal integers separated by \".\"'s and may have leading zeros but was bogus")); + assertTrue(e.getMessage().equals("version string must be a sequence of nonnegative decimal integers separated " + + "by \".\"'s and may have leading zeros but was bogus")); } } @@ -178,33 +178,6 @@ public void testRequiredJDKVersionIsOK() throws Exception { JarHell.checkJarHell(jars); } - /** make sure if a plugin is compiled against the same ES version, it works */ - public void testGoodESVersionInJar() throws Exception { - Path dir = createTempDir(); - Manifest manifest = new Manifest(); - Attributes attributes = manifest.getMainAttributes(); - attributes.put(Attributes.Name.MANIFEST_VERSION, "1.0.0"); - attributes.put(new Attributes.Name("X-Compile-Elasticsearch-Version"), Version.CURRENT.toString()); - Set jars = Collections.singleton(makeJar(dir, "foo.jar", manifest, "Foo.class")); - JarHell.checkJarHell(jars); - } - - /** make sure if a plugin is compiled against a different ES version, it fails */ - public void testBadESVersionInJar() throws Exception { - Path dir = createTempDir(); - Manifest manifest = new Manifest(); - Attributes attributes = manifest.getMainAttributes(); - attributes.put(Attributes.Name.MANIFEST_VERSION, "1.0.0"); - attributes.put(new Attributes.Name("X-Compile-Elasticsearch-Version"), "1.0-bogus"); - Set jars = Collections.singleton(makeJar(dir, "foo.jar", manifest, "Foo.class")); - try { - JarHell.checkJarHell(jars); - fail("did not get expected exception"); - } catch (IllegalStateException e) { - assertTrue(e.getMessage().contains("requires Elasticsearch 1.0-bogus")); - } - } - public void testValidVersions() { String[] versions = new String[]{"1.7", "1.7.0", "0.1.7", "1.7.0.80"}; for (String version : versions) { diff --git a/server/src/test/resources/org/elasticsearch/bootstrap/duplicate-classes.jar b/libs/elasticsearch-core/src/test/resources/org/elasticsearch/bootstrap/duplicate-classes.jar similarity index 100% rename from server/src/test/resources/org/elasticsearch/bootstrap/duplicate-classes.jar rename to libs/elasticsearch-core/src/test/resources/org/elasticsearch/bootstrap/duplicate-classes.jar diff --git a/server/src/test/resources/org/elasticsearch/bootstrap/duplicate-xmlbeans-classes.jar b/libs/elasticsearch-core/src/test/resources/org/elasticsearch/bootstrap/duplicate-xmlbeans-classes.jar similarity index 100% rename from server/src/test/resources/org/elasticsearch/bootstrap/duplicate-xmlbeans-classes.jar rename to libs/elasticsearch-core/src/test/resources/org/elasticsearch/bootstrap/duplicate-xmlbeans-classes.jar diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java index acd71749e2333..67368cb577a81 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java @@ -22,6 +22,7 @@ import org.apache.logging.log4j.Level; import org.elasticsearch.ESNetty4IntegTestCase; import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequest; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.MockLogAppender; @@ -36,12 +37,12 @@ public class ESLoggingHandlerIT extends ESNetty4IntegTestCase { public void setUp() throws Exception { super.setUp(); appender = new MockLogAppender(); - Loggers.addAppender(Loggers.getLogger(ESLoggingHandler.class), appender); + ServerLoggers.addAppender(Loggers.getLogger(ESLoggingHandler.class), appender); appender.start(); } public void tearDown() throws Exception { - Loggers.removeAppender(Loggers.getLogger(ESLoggingHandler.class), appender); + ServerLoggers.removeAppender(Loggers.getLogger(ESLoggingHandler.class), appender); appender.stop(); super.tearDown(); } diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java index f53c9d3b1f5e7..8dab47bd1ceee 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java @@ -138,7 +138,7 @@ public void testHierarchy() throws Exception { assertThat(ESLoggerFactory.getLogger("x.y").getLevel(), equalTo(Level.DEBUG)); final Level level = randomFrom(Level.TRACE, Level.DEBUG, Level.INFO, Level.WARN, Level.ERROR); - Loggers.setLevel(ESLoggerFactory.getLogger("x"), level); + ServerLoggers.setLevel(ESLoggerFactory.getLogger("x"), level); assertThat(ESLoggerFactory.getLogger("x").getLevel(), equalTo(level)); assertThat(ESLoggerFactory.getLogger("x.y").getLevel(), equalTo(level)); diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java index d4bc754689e68..55e359697eb15 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java @@ -285,12 +285,12 @@ public void testFindAppender() throws IOException, UserException { final Logger hasConsoleAppender = ESLoggerFactory.getLogger("has_console_appender"); - final Appender testLoggerConsoleAppender = Loggers.findAppender(hasConsoleAppender, ConsoleAppender.class); + final Appender testLoggerConsoleAppender = ServerLoggers.findAppender(hasConsoleAppender, ConsoleAppender.class); assertNotNull(testLoggerConsoleAppender); assertThat(testLoggerConsoleAppender.getName(), equalTo("console")); final Logger hasCountingNoOpAppender = ESLoggerFactory.getLogger("has_counting_no_op_appender"); - assertNull(Loggers.findAppender(hasCountingNoOpAppender, ConsoleAppender.class)); - final Appender countingNoOpAppender = Loggers.findAppender(hasCountingNoOpAppender, CountingNoOpAppender.class); + assertNull(ServerLoggers.findAppender(hasCountingNoOpAppender, ConsoleAppender.class)); + final Appender countingNoOpAppender = ServerLoggers.findAppender(hasCountingNoOpAppender, CountingNoOpAppender.class); assertThat(countingNoOpAppender.getName(), equalTo("counting_no_op")); } diff --git a/server/build.gradle b/server/build.gradle index 20693a30c0cec..4f69c2ee159b5 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -38,6 +38,8 @@ archivesBaseName = 'elasticsearch' dependencies { + compile "org.elasticsearch:elasticsearch-core:${version}" + compileOnly project(':libs:plugin-classloader') testRuntime project(':libs:plugin-classloader') diff --git a/server/src/main/java/org/elasticsearch/action/bulk/Retry.java b/server/src/main/java/org/elasticsearch/action/bulk/Retry.java index 9985d23b9badb..b173fc074bd82 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/Retry.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/Retry.java @@ -22,7 +22,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.FutureUtils; @@ -102,7 +102,7 @@ static class RetryHandler implements ActionListener { this.backoff = backoffPolicy.iterator(); this.consumer = consumer; this.listener = listener; - this.logger = Loggers.getLogger(getClass(), settings); + this.logger = ServerLoggers.getLogger(getClass(), settings); this.scheduler = scheduler; // in contrast to System.currentTimeMillis(), nanoTime() uses a monotonic clock under the hood this.startTimestampNanos = System.nanoTime(); diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index 30b9fb7e28dd0..410b060200bc6 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -37,6 +37,7 @@ import org.elasticsearch.common.inject.CreationException; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.logging.LogConfigurator; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.IfConfig; import org.elasticsearch.common.settings.KeyStoreWrapper; @@ -303,9 +304,9 @@ static void init( try { if (closeStandardStreams) { final Logger rootLogger = ESLoggerFactory.getRootLogger(); - final Appender maybeConsoleAppender = Loggers.findAppender(rootLogger, ConsoleAppender.class); + final Appender maybeConsoleAppender = ServerLoggers.findAppender(rootLogger, ConsoleAppender.class); if (maybeConsoleAppender != null) { - Loggers.removeAppender(rootLogger, maybeConsoleAppender); + ServerLoggers.removeAppender(rootLogger, maybeConsoleAppender); } closeSystOut(); } @@ -336,9 +337,9 @@ static void init( } catch (NodeValidationException | RuntimeException e) { // disable console logging, so user does not see the exception twice (jvm will show it already) final Logger rootLogger = ESLoggerFactory.getRootLogger(); - final Appender maybeConsoleAppender = Loggers.findAppender(rootLogger, ConsoleAppender.class); + final Appender maybeConsoleAppender = ServerLoggers.findAppender(rootLogger, ConsoleAppender.class); if (foreground && maybeConsoleAppender != null) { - Loggers.removeAppender(rootLogger, maybeConsoleAppender); + ServerLoggers.removeAppender(rootLogger, maybeConsoleAppender); } Logger logger = Loggers.getLogger(Bootstrap.class); if (INSTANCE.node != null) { @@ -371,7 +372,7 @@ static void init( } // re-enable it if appropriate, so they can see any logging during the shutdown process if (foreground && maybeConsoleAppender != null) { - Loggers.addAppender(rootLogger, maybeConsoleAppender); + ServerLoggers.addAppender(rootLogger, maybeConsoleAppender); } throw e; diff --git a/server/src/main/java/org/elasticsearch/common/component/AbstractComponent.java b/server/src/main/java/org/elasticsearch/common/component/AbstractComponent.java index 8cb51f2b06b0e..f335a754f3771 100644 --- a/server/src/main/java/org/elasticsearch/common/component/AbstractComponent.java +++ b/server/src/main/java/org/elasticsearch/common/component/AbstractComponent.java @@ -23,7 +23,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.node.Node; @@ -34,7 +34,7 @@ public abstract class AbstractComponent { protected final Settings settings; public AbstractComponent(Settings settings) { - this.logger = Loggers.getLogger(getClass(), settings); + this.logger = ServerLoggers.getLogger(getClass(), settings); this.deprecationLogger = new DeprecationLogger(logger); this.settings = settings; } diff --git a/server/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java b/server/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java index b97fc13e73038..b38c3d3bdd78e 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java +++ b/server/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java @@ -177,15 +177,15 @@ private static void configureStatusLogger() { * @param settings the settings from which logger levels will be extracted */ private static void configureLoggerLevels(final Settings settings) { - if (ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.exists(settings)) { - final Level level = ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.get(settings); - Loggers.setLevel(ESLoggerFactory.getRootLogger(), level); + if (ServerLoggers.LOG_DEFAULT_LEVEL_SETTING.exists(settings)) { + final Level level = ServerLoggers.LOG_DEFAULT_LEVEL_SETTING.get(settings); + ServerLoggers.setLevel(ESLoggerFactory.getRootLogger(), level); } - ESLoggerFactory.LOG_LEVEL_SETTING.getAllConcreteSettings(settings) + ServerLoggers.LOG_LEVEL_SETTING.getAllConcreteSettings(settings) // do not set a log level for a logger named level (from the default log setting) - .filter(s -> s.getKey().equals(ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.getKey()) == false).forEach(s -> { + .filter(s -> s.getKey().equals(ServerLoggers.LOG_DEFAULT_LEVEL_SETTING.getKey()) == false).forEach(s -> { final Level level = s.get(settings); - Loggers.setLevel(ESLoggerFactory.getLogger(s.getKey().substring("logger.".length())), level); + ServerLoggers.setLevel(ESLoggerFactory.getLogger(s.getKey().substring("logger.".length())), level); }); } diff --git a/server/src/main/java/org/elasticsearch/common/logging/Loggers.java b/server/src/main/java/org/elasticsearch/common/logging/ServerLoggers.java similarity index 76% rename from server/src/main/java/org/elasticsearch/common/logging/Loggers.java rename to server/src/main/java/org/elasticsearch/common/logging/ServerLoggers.java index 812a0b70f2877..99049c53d1637 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/Loggers.java +++ b/server/src/main/java/org/elasticsearch/common/logging/ServerLoggers.java @@ -27,28 +27,29 @@ import org.apache.logging.log4j.core.config.Configuration; import org.apache.logging.log4j.core.config.Configurator; import org.apache.logging.log4j.core.config.LoggerConfig; -import org.apache.logging.log4j.message.MessageFactory; -import org.elasticsearch.common.Classes; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.node.Node; import java.util.ArrayList; -import java.util.Collection; import java.util.List; import java.util.Map; import static java.util.Arrays.asList; -import static javax.security.auth.login.Configuration.getConfiguration; import static org.elasticsearch.common.util.CollectionUtils.asArrayList; /** * A set of utilities around Logging. */ -public class Loggers { +public class ServerLoggers { - public static final String SPACE = " "; + public static final Setting LOG_DEFAULT_LEVEL_SETTING = + new Setting<>("logger.level", Level.INFO.name(), Level::valueOf, Setting.Property.NodeScope); + public static final Setting.AffixSetting LOG_LEVEL_SETTING = + Setting.prefixKeySetting("logger.", (key) -> new Setting<>(key, Level.INFO.name(), Level::valueOf, Setting.Property.Dynamic, + Setting.Property.NodeScope)); public static Logger getLogger(Class clazz, Settings settings, ShardId shardId, String... prefixes) { return getLogger(clazz, settings, shardId.getIndex(), asArrayList(Integer.toString(shardId.id()), prefixes).toArray(new String[0])); @@ -64,17 +65,17 @@ public static Logger getLogger(String loggerName, Settings settings, ShardId sha } public static Logger getLogger(Class clazz, Settings settings, Index index, String... prefixes) { - return getLogger(clazz, settings, asArrayList(SPACE, index.getName(), prefixes).toArray(new String[0])); + return getLogger(clazz, settings, asArrayList(Loggers.SPACE, index.getName(), prefixes).toArray(new String[0])); } public static Logger getLogger(Class clazz, Settings settings, String... prefixes) { final List prefixesList = prefixesList(settings, prefixes); - return getLogger(clazz, prefixesList.toArray(new String[prefixesList.size()])); + return Loggers.getLogger(clazz, prefixesList.toArray(new String[prefixesList.size()])); } public static Logger getLogger(String loggerName, Settings settings, String... prefixes) { final List prefixesList = prefixesList(settings, prefixes); - return getLogger(loggerName, prefixesList.toArray(new String[prefixesList.size()])); + return Loggers.getLogger(loggerName, prefixesList.toArray(new String[prefixesList.size()])); } private static List prefixesList(Settings settings, String... prefixes) { @@ -88,48 +89,6 @@ private static List prefixesList(Settings settings, String... prefixes) return prefixesList; } - public static Logger getLogger(Logger parentLogger, String s) { - assert parentLogger instanceof PrefixLogger; - return ESLoggerFactory.getLogger(((PrefixLogger)parentLogger).prefix(), parentLogger.getName() + s); - } - - public static Logger getLogger(String s) { - return ESLoggerFactory.getLogger(s); - } - - public static Logger getLogger(Class clazz) { - return ESLoggerFactory.getLogger(clazz); - } - - public static Logger getLogger(Class clazz, String... prefixes) { - return ESLoggerFactory.getLogger(formatPrefix(prefixes), clazz); - } - - public static Logger getLogger(String name, String... prefixes) { - return ESLoggerFactory.getLogger(formatPrefix(prefixes), name); - } - - private static String formatPrefix(String... prefixes) { - String prefix = null; - if (prefixes != null && prefixes.length > 0) { - StringBuilder sb = new StringBuilder(); - for (String prefixX : prefixes) { - if (prefixX != null) { - if (prefixX.equals(SPACE)) { - sb.append(" "); - } else { - sb.append("[").append(prefixX).append("]"); - } - } - } - if (sb.length() > 0) { - sb.append(" "); - prefix = sb.toString(); - } - } - return prefix; - } - /** * Set the level of the logger. If the new level is null, the logger will inherit it's level from its nearest ancestor with a non-null * level. diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index db8dd461dd737..aec14415db3fc 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -46,7 +46,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.logging.ESLoggerFactory; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting.Property; @@ -111,7 +111,7 @@ public ClusterSettings(Settings nodeSettings, Set> settingsSet) { } private static final class LoggingSettingUpdater implements SettingUpdater { - final Predicate loggerPredicate = ESLoggerFactory.LOG_LEVEL_SETTING::match; + final Predicate loggerPredicate = ServerLoggers.LOG_LEVEL_SETTING::match; private final Settings settings; LoggingSettingUpdater(Settings settings) { @@ -129,10 +129,10 @@ public Settings getValue(Settings current, Settings previous) { builder.put(current.filter(loggerPredicate)); for (String key : previous.keySet()) { if (loggerPredicate.test(key) && builder.keys().contains(key) == false) { - if (ESLoggerFactory.LOG_LEVEL_SETTING.getConcreteSetting(key).exists(settings) == false) { + if (ServerLoggers.LOG_LEVEL_SETTING.getConcreteSetting(key).exists(settings) == false) { builder.putNull(key); } else { - builder.put(key, ESLoggerFactory.LOG_LEVEL_SETTING.getConcreteSetting(key).get(settings).toString()); + builder.put(key, ServerLoggers.LOG_LEVEL_SETTING.getConcreteSetting(key).get(settings).toString()); } } } @@ -150,12 +150,12 @@ public void apply(Settings value, Settings current, Settings previous) { if ("_root".equals(component)) { final String rootLevel = value.get(key); if (rootLevel == null) { - Loggers.setLevel(ESLoggerFactory.getRootLogger(), ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.get(settings)); + ServerLoggers.setLevel(ESLoggerFactory.getRootLogger(), ServerLoggers.LOG_DEFAULT_LEVEL_SETTING.get(settings)); } else { - Loggers.setLevel(ESLoggerFactory.getRootLogger(), rootLevel); + ServerLoggers.setLevel(ESLoggerFactory.getRootLogger(), rootLevel); } } else { - Loggers.setLevel(ESLoggerFactory.getLogger(component), value.get(key)); + ServerLoggers.setLevel(ESLoggerFactory.getLogger(component), value.get(key)); } } } @@ -379,8 +379,8 @@ public void apply(Settings value, Settings current, Settings previous) { ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING, EsExecutors.PROCESSORS_SETTING, ThreadContext.DEFAULT_HEADERS_SETTING, - ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING, - ESLoggerFactory.LOG_LEVEL_SETTING, + ServerLoggers.LOG_DEFAULT_LEVEL_SETTING, + ServerLoggers.LOG_LEVEL_SETTING, NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING, NodeEnvironment.ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING, OsService.REFRESH_INTERVAL_SETTING, diff --git a/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java b/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java index 0304b20e992e5..20253f7876880 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java +++ b/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java @@ -22,7 +22,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.inject.Binder; import org.elasticsearch.common.inject.Module; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; @@ -35,7 +35,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.function.Predicate; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -58,7 +57,7 @@ public SettingsModule(Settings settings, Setting... additionalSettings) { } public SettingsModule(Settings settings, List> additionalSettings, List settingsFilter) { - logger = Loggers.getLogger(getClass(), settings); + logger = ServerLoggers.getLogger(getClass(), settings); this.settings = settings; for (Setting setting : ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) { registerSetting(setting); diff --git a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java index 179692cd516c8..b2602e8f2c596 100644 --- a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java +++ b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java @@ -25,7 +25,7 @@ import org.elasticsearch.cluster.service.ClusterApplier; import org.elasticsearch.cluster.service.MasterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; @@ -109,7 +109,7 @@ public DiscoveryModule(Settings settings, ThreadPool threadPool, TransportServic if (discoverySupplier == null) { throw new IllegalArgumentException("Unknown discovery type [" + discoveryType + "]"); } - Loggers.getLogger(getClass(), settings).info("using discovery type [{}]", discoveryType); + ServerLoggers.getLogger(getClass(), settings).info("using discovery type [{}]", discoveryType); discovery = Objects.requireNonNull(discoverySupplier.get()); } diff --git a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 9c58335117407..ca4ea2e37549b 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -38,7 +38,7 @@ import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -182,7 +182,7 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce locks = null; nodeLockId = -1; nodeMetaData = new NodeMetaData(generateNodeId(settings)); - logger = Loggers.getLogger(getClass(), Node.addNodeNameIfNeeded(settings, this.nodeMetaData.nodeId())); + logger = ServerLoggers.getLogger(getClass(), Node.addNodeNameIfNeeded(settings, this.nodeMetaData.nodeId())); return; } final NodePath[] nodePaths = new NodePath[environment.dataWithClusterFiles().length]; @@ -190,7 +190,7 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce boolean success = false; // trace logger to debug issues before the default node name is derived from the node id - Logger startupTraceLogger = Loggers.getLogger(getClass(), settings); + Logger startupTraceLogger = ServerLoggers.getLogger(getClass(), settings); try { sharedDataPath = environment.sharedDataFile(); @@ -244,7 +244,7 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce throw new IllegalStateException(message, lastException); } this.nodeMetaData = loadOrCreateNodeMetaData(settings, startupTraceLogger, nodePaths); - this.logger = Loggers.getLogger(getClass(), Node.addNodeNameIfNeeded(settings, this.nodeMetaData.nodeId())); + this.logger = ServerLoggers.getLogger(getClass(), Node.addNodeNameIfNeeded(settings, this.nodeMetaData.nodeId())); this.nodeLockId = nodeLockId; this.locks = locks; diff --git a/server/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java b/server/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java index 25acdd06b44a6..ce13c12c8496f 100644 --- a/server/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java +++ b/server/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java @@ -21,7 +21,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; public abstract class AbstractIndexComponent implements IndexComponent { @@ -33,7 +33,7 @@ public abstract class AbstractIndexComponent implements IndexComponent { * Constructs a new index component, with the index name and its settings. */ protected AbstractIndexComponent(IndexSettings indexSettings) { - this.logger = Loggers.getLogger(getClass(), indexSettings.getSettings(), indexSettings.getIndex()); + this.logger = ServerLoggers.getLogger(getClass(), indexSettings.getSettings(), indexSettings.getIndex()); this.deprecationLogger = new DeprecationLogger(logger); this.indexSettings = indexSettings; } diff --git a/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java b/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java index 90d8a205e8b57..e50ddd8e3966c 100644 --- a/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java +++ b/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java @@ -24,7 +24,7 @@ import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; @@ -52,7 +52,7 @@ final class CompositeIndexEventListener implements IndexEventListener { } } this.listeners = Collections.unmodifiableList(new ArrayList<>(listeners)); - this.logger = Loggers.getLogger(getClass(), indexSettings.getSettings(), indexSettings.getIndex()); + this.logger = ServerLoggers.getLogger(getClass(), indexSettings.getSettings(), indexSettings.getIndex()); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index c8de2400aeb88..45b805798c105 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -22,7 +22,7 @@ import org.apache.lucene.index.MergePolicy; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -375,7 +375,7 @@ public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSetti this.settings = Settings.builder().put(nodeSettings).put(indexMetaData.getSettings()).build(); this.index = indexMetaData.getIndex(); version = Version.indexCreated(settings); - logger = Loggers.getLogger(getClass(), settings, index); + logger = ServerLoggers.getLogger(getClass(), settings, index); nodeName = Node.NODE_NAME_SETTING.get(settings); this.indexMetaData = indexMetaData; numberOfShards = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, null); diff --git a/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java b/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java index 94c3892ef361e..53d63bf64bb6b 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java +++ b/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java @@ -22,7 +22,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.TimeValue; @@ -87,7 +87,7 @@ public final class IndexingSlowLog implements IndexingOperationListener { }, Property.Dynamic, Property.IndexScope); IndexingSlowLog(IndexSettings indexSettings) { - this.indexLogger = Loggers.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".index", indexSettings.getSettings()); + this.indexLogger = ServerLoggers.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".index", indexSettings.getSettings()); this.index = indexSettings.getIndex(); indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING, this::setReformat); @@ -117,7 +117,7 @@ private void setMaxSourceCharsToLog(int maxSourceCharsToLog) { private void setLevel(SlowLogLevel level) { this.level = level; - Loggers.setLevel(this.indexLogger, level.name()); + ServerLoggers.setLevel(this.indexLogger, level.name()); } private void setWarnThreshold(TimeValue warnThreshold) { diff --git a/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java b/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java index a48e3d7bd72c5..d02d4820fd402 100644 --- a/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java +++ b/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java @@ -21,7 +21,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.TimeValue; @@ -81,8 +81,8 @@ public final class SearchSlowLog implements SearchOperationListener { public SearchSlowLog(IndexSettings indexSettings) { - this.queryLogger = Loggers.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".query", indexSettings.getSettings()); - this.fetchLogger = Loggers.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".fetch", indexSettings.getSettings()); + this.queryLogger = ServerLoggers.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".query", indexSettings.getSettings()); + this.fetchLogger = ServerLoggers.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".fetch", indexSettings.getSettings()); indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING, this::setQueryWarnThreshold); this.queryWarnThreshold = indexSettings.getValue(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING).nanos(); @@ -108,8 +108,8 @@ public SearchSlowLog(IndexSettings indexSettings) { private void setLevel(SlowLogLevel level) { this.level = level; - Loggers.setLevel(queryLogger, level.name()); - Loggers.setLevel(fetchLogger, level.name()); + ServerLoggers.setLevel(queryLogger, level.name()); + ServerLoggers.setLevel(fetchLogger, level.name()); } @Override public void onQueryPhase(SearchContext context, long tookInNanos) { diff --git a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java index ec8d17929a31f..f4c655af48b5e 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.env.Environment; @@ -456,7 +457,7 @@ public IndexAnalyzers build(IndexSettings indexSettings, Index index = indexSettings.getIndex(); analyzerProviders = new HashMap<>(analyzerProviders); - Logger logger = Loggers.getLogger(getClass(), indexSettings.getSettings()); + Logger logger = ServerLoggers.getLogger(getClass(), indexSettings.getSettings()); DeprecationLogger deprecationLogger = new DeprecationLogger(logger); Map analyzerAliases = new HashMap<>(); Map analyzers = new HashMap<>(); diff --git a/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java b/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java index f4876149cac13..871f1f62f41be 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java @@ -25,7 +25,7 @@ import org.apache.lucene.index.MergePolicy; import org.apache.lucene.index.MergeScheduler; import org.apache.lucene.index.OneMergeHelper; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.settings.Settings; @@ -71,7 +71,7 @@ class ElasticsearchConcurrentMergeScheduler extends ConcurrentMergeScheduler { this.config = indexSettings.getMergeSchedulerConfig(); this.shardId = shardId; this.indexSettings = indexSettings.getSettings(); - this.logger = Loggers.getLogger(getClass(), this.indexSettings, shardId); + this.logger = ServerLoggers.getLogger(getClass(), this.indexSettings, shardId); refreshConfig(); } diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index 1fbe17b752e2c..268a3b6db17f2 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -51,7 +51,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver; @@ -130,7 +130,7 @@ protected Engine(EngineConfig engineConfig) { this.shardId = engineConfig.getShardId(); this.allocationId = engineConfig.getAllocationId(); this.store = engineConfig.getStore(); - this.logger = Loggers.getLogger(Engine.class, // we use the engine class directly here to make sure all subclasses have the same logger name + this.logger = ServerLoggers.getLogger(Engine.class, // we use the engine class directly here to make sure all subclasses have the same logger name engineConfig.getIndexSettings().getSettings(), engineConfig.getShardId()); this.eventListener = engineConfig.getEventListener(); } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java index 698b289d758be..2384e34732040 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java @@ -19,10 +19,8 @@ package org.elasticsearch.index.fielddata.plain; -import org.apache.logging.log4j.Logger; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.SortedSetDocValues; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.fielddata.IndexFieldData; diff --git a/server/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java b/server/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java index d3d9ffa481871..d7ce32d9b7628 100644 --- a/server/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java @@ -120,7 +120,7 @@ public Integer getSeed() { /** * Set the field to be used for random number generation. This parameter is compulsory * when a {@link #seed(int) seed} is set and ignored otherwise. Note that documents that - * have the same value for a field will get the same score. + * have the same value for a field will get the same score. */ public RandomScoreFunctionBuilder setField(String field) { this.field = field; diff --git a/server/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java b/server/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java index 0e46a562488d3..1d02c33dd3e1b 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java +++ b/server/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java @@ -21,7 +21,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.index.IndexSettings; public abstract class AbstractIndexShardComponent implements IndexShardComponent { @@ -34,7 +34,7 @@ public abstract class AbstractIndexShardComponent implements IndexShardComponent protected AbstractIndexShardComponent(ShardId shardId, IndexSettings indexSettings) { this.shardId = shardId; this.indexSettings = indexSettings; - this.logger = Loggers.getLogger(getClass(), this.indexSettings.getSettings(), shardId); + this.logger = ServerLoggers.getLogger(getClass(), this.indexSettings.getSettings(), shardId); this.deprecationLogger = new DeprecationLogger(logger); } diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index dab39c26a3c5b..74be98b813238 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -58,7 +58,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.store.ByteArrayIndexInput; import org.elasticsearch.common.lucene.store.InputStreamIndexInput; @@ -159,7 +159,7 @@ public Store(ShardId shardId, IndexSettings indexSettings, DirectoryService dire public Store(ShardId shardId, IndexSettings indexSettings, DirectoryService directoryService, ShardLock shardLock, OnClose onClose) throws IOException { super(shardId, indexSettings); final Settings settings = indexSettings.getSettings(); - this.directory = new StoreDirectory(directoryService.newDirectory(), Loggers.getLogger("index.store.deletes", settings, shardId)); + this.directory = new StoreDirectory(directoryService.newDirectory(), ServerLoggers.getLogger("index.store.deletes", settings, shardId)); this.shardLock = shardLock; this.onClose = onClose; final TimeValue refreshInterval = indexSettings.getValue(INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 3ee9b953757c3..5a0ee1cf44d07 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -40,7 +40,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.lucene.store.InputStreamIndexInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; @@ -120,7 +120,7 @@ public RecoverySourceHandler(final IndexShard shard, RecoveryTargetHandler recov this.recoveryTarget = recoveryTarget; this.request = request; this.shardId = this.request.shardId().id(); - this.logger = Loggers.getLogger(getClass(), nodeSettings, request.shardId(), "recover to " + request.targetNode().getName()); + this.logger = ServerLoggers.getLogger(getClass(), nodeSettings, request.shardId(), "recover to " + request.targetNode().getName()); this.chunkSizeInBytes = fileChunkSizeInBytes; this.response = new RecoveryResponse(); } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index 1bbcb9efa9644..f4c823c0e96a7 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -34,7 +34,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.util.CancellableThreads; import org.elasticsearch.common.util.concurrent.AbstractRefCounted; @@ -117,7 +117,7 @@ public RecoveryTarget(final IndexShard indexShard, this.cancellableThreads = new CancellableThreads(); this.recoveryId = idGenerator.incrementAndGet(); this.listener = listener; - this.logger = Loggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId()); + this.logger = ServerLoggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId()); this.indexShard = indexShard; this.sourceNode = sourceNode; this.shardId = indexShard.shardId(); diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 02d6c205831fa..b76e63959105f 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -67,6 +67,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkModule; @@ -143,7 +144,6 @@ import java.io.Closeable; import java.io.IOException; import java.io.UncheckedIOException; -import java.net.Inet6Address; import java.net.InetAddress; import java.net.InetSocketAddress; import java.nio.charset.Charset; @@ -267,7 +267,7 @@ protected Node(final Environment environment, Collection throw new IllegalStateException("Failed to create node environment", ex); } final boolean hadPredefinedNodeName = NODE_NAME_SETTING.exists(tmpSettings); - Logger logger = Loggers.getLogger(Node.class, tmpSettings); + Logger logger = ServerLoggers.getLogger(Node.class, tmpSettings); final String nodeId = nodeEnvironment.nodeId(); tmpSettings = addNodeNameIfNeeded(tmpSettings, nodeId); // this must be captured after the node name is possibly added to the settings diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java index 2b94a464a592a..7f0e67a2a95a9 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java @@ -259,8 +259,8 @@ Aggregator create(String name, final long maxOrd = getMaxOrd(valuesSource, context.searcher()); assert maxOrd != -1; - final double ratio = maxOrd / ((double) context.searcher().getIndexReader().numDocs()); - + final double ratio = maxOrd / ((double) context.searcher().getIndexReader().numDocs()); + if (factories == AggregatorFactories.EMPTY && includeExclude == null && Aggregator.descendsFromBucketAggregator(parent) == false && diff --git a/server/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java b/server/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java index c5b99a91ffa3b..2c51c210b1edc 100644 --- a/server/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java +++ b/server/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java @@ -26,7 +26,7 @@ import org.apache.lucene.util.Constants; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.logging.ESLoggerFactory; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; @@ -83,11 +83,11 @@ BufferedReader getBufferedReader(Path path) throws IOException { "I/O exception while trying to read [{}]", new Object[] { procSysVmMaxMapCountPath }, e -> ioException == e)); - Loggers.addAppender(logger, appender); + ServerLoggers.addAppender(logger, appender); assertThat(check.getMaxMapCount(logger), equalTo(-1L)); appender.assertAllExpectationsMatched(); verify(reader).close(); - Loggers.removeAppender(logger, appender); + ServerLoggers.removeAppender(logger, appender); appender.stop(); } @@ -105,11 +105,11 @@ BufferedReader getBufferedReader(Path path) throws IOException { "unable to parse vm.max_map_count [{}]", new Object[] { "eof" }, e -> e instanceof NumberFormatException && e.getMessage().equals("For input string: \"eof\""))); - Loggers.addAppender(logger, appender); + ServerLoggers.addAppender(logger, appender); assertThat(check.getMaxMapCount(logger), equalTo(-1L)); appender.assertAllExpectationsMatched(); verify(reader).close(); - Loggers.removeAppender(logger, appender); + ServerLoggers.removeAppender(logger, appender); appender.stop(); } diff --git a/server/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java b/server/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java index 0522f3f15f817..b8050d728a6b3 100644 --- a/server/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java @@ -44,6 +44,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.common.Priority; import org.elasticsearch.common.io.FileSystemUtils; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -342,7 +343,7 @@ public void testMessageLogging() throws Exception{ new MockLogAppender.UnseenEventExpectation("no completed message logged on dry run", TransportClusterRerouteAction.class.getName(), Level.INFO, "allocated an empty primary*") ); - Loggers.addAppender(actionLogger, dryRunMockLog); + ServerLoggers.addAppender(actionLogger, dryRunMockLog); AllocationCommand dryRunAllocation = new AllocateEmptyPrimaryAllocationCommand(indexName, 0, nodeName1, true); ClusterRerouteResponse dryRunResponse = client().admin().cluster().prepareReroute() @@ -357,7 +358,7 @@ public void testMessageLogging() throws Exception{ dryRunMockLog.assertAllExpectationsMatched(); dryRunMockLog.stop(); - Loggers.removeAppender(actionLogger, dryRunMockLog); + ServerLoggers.removeAppender(actionLogger, dryRunMockLog); MockLogAppender allocateMockLog = new MockLogAppender(); allocateMockLog.start(); @@ -369,7 +370,7 @@ public void testMessageLogging() throws Exception{ new MockLogAppender.UnseenEventExpectation("no message for second allocate empty primary", TransportClusterRerouteAction.class.getName(), Level.INFO, "allocated an empty primary*" + nodeName2 + "*") ); - Loggers.addAppender(actionLogger, allocateMockLog); + ServerLoggers.addAppender(actionLogger, allocateMockLog); AllocationCommand yesDecisionAllocation = new AllocateEmptyPrimaryAllocationCommand(indexName, 0, nodeName1, true); AllocationCommand noDecisionAllocation = new AllocateEmptyPrimaryAllocationCommand("noexist", 1, nodeName2, true); @@ -385,7 +386,7 @@ public void testMessageLogging() throws Exception{ allocateMockLog.assertAllExpectationsMatched(); allocateMockLog.stop(); - Loggers.removeAppender(actionLogger, allocateMockLog); + ServerLoggers.removeAppender(actionLogger, allocateMockLog); } public void testClusterRerouteWithBlocks() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java b/server/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java index c8d5cdc6c86db..be03fbe1cd640 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java @@ -23,7 +23,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -63,7 +63,7 @@ public static class TestPlugin extends Plugin { protected final Settings settings; public TestPlugin(Settings settings) { - this.logger = Loggers.getLogger(getClass(), settings); + this.logger = ServerLoggers.getLogger(getClass(), settings); this.settings = settings; } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java index 1ed5a3ac7ed90..8ebe627751ce4 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java @@ -22,7 +22,6 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterInfo; -import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ESAllocationTestCase; import org.elasticsearch.cluster.metadata.IndexMetaData; diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java index 3b551e912947a..4b941a6ce4a7f 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.routing.allocation; import com.carrotsearch.hppc.cursors.ObjectCursor; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest; @@ -41,24 +40,19 @@ import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; -import org.elasticsearch.indices.cluster.AbstractIndicesClusterStateServiceTestCase; import org.elasticsearch.indices.cluster.ClusterStateChanges; -import org.elasticsearch.indices.cluster.IndicesClusterStateService; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import java.util.ArrayList; import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Locale; -import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Supplier; import java.util.stream.Collectors; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java index f6ab967a10b46..1406e4d6d6121 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java @@ -22,7 +22,6 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterInfo; -import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ESAllocationTestCase; import org.elasticsearch.cluster.metadata.IndexMetaData; diff --git a/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java index 34750180ff185..c104df913b205 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; @@ -31,6 +30,7 @@ import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -130,7 +130,7 @@ public void testClusterStateUpdateLogging() throws Exception { "*failed to execute cluster state applier in [2s]*")); Logger clusterLogger = Loggers.getLogger("org.elasticsearch.cluster.service"); - Loggers.addAppender(clusterLogger, mockAppender); + ServerLoggers.addAppender(clusterLogger, mockAppender); try { final CountDownLatch latch = new CountDownLatch(3); clusterApplierService.currentTimeOverride = System.nanoTime(); @@ -180,7 +180,7 @@ public void onFailure(String source, Exception e) { }); latch.await(); } finally { - Loggers.removeAppender(clusterLogger, mockAppender); + ServerLoggers.removeAppender(clusterLogger, mockAppender); mockAppender.stop(); } mockAppender.assertAllExpectationsMatched(); @@ -210,7 +210,7 @@ public void testLongClusterStateUpdateLogging() throws Exception { "*cluster state applier task [test3] took [34s] above the warn threshold of *")); Logger clusterLogger = Loggers.getLogger("org.elasticsearch.cluster.service"); - Loggers.addAppender(clusterLogger, mockAppender); + ServerLoggers.addAppender(clusterLogger, mockAppender); try { final CountDownLatch latch = new CountDownLatch(4); final CountDownLatch processedFirstTask = new CountDownLatch(1); @@ -276,7 +276,7 @@ public void onFailure(String source, Exception e) { }); latch.await(); } finally { - Loggers.removeAppender(clusterLogger, mockAppender); + ServerLoggers.removeAppender(clusterLogger, mockAppender); mockAppender.stop(); } mockAppender.assertAllExpectationsMatched(); diff --git a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java index 1b747f2268747..3b999b5f7733a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java @@ -35,6 +35,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Priority; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -231,7 +232,7 @@ public void testClusterStateUpdateLogging() throws Exception { "*processing [test3]: took [3s] done publishing updated cluster state (version: *, uuid: *)")); Logger clusterLogger = Loggers.getLogger(masterService.getClass().getPackage().getName()); - Loggers.addAppender(clusterLogger, mockAppender); + ServerLoggers.addAppender(clusterLogger, mockAppender); try { final CountDownLatch latch = new CountDownLatch(4); masterService.currentTimeOverride = System.nanoTime(); @@ -306,7 +307,7 @@ public void onFailure(String source, Exception e) { }); latch.await(); } finally { - Loggers.removeAppender(clusterLogger, mockAppender); + ServerLoggers.removeAppender(clusterLogger, mockAppender); mockAppender.stop(); } mockAppender.assertAllExpectationsMatched(); @@ -578,7 +579,7 @@ public void testLongClusterStateUpdateLogging() throws Exception { "*cluster state update task [test4] took [34s] above the warn threshold of *")); Logger clusterLogger = Loggers.getLogger(masterService.getClass().getPackage().getName()); - Loggers.addAppender(clusterLogger, mockAppender); + ServerLoggers.addAppender(clusterLogger, mockAppender); try { final CountDownLatch latch = new CountDownLatch(5); final CountDownLatch processedFirstTask = new CountDownLatch(1); @@ -674,7 +675,7 @@ public void onFailure(String source, Exception e) { }); latch.await(); } finally { - Loggers.removeAppender(clusterLogger, mockAppender); + ServerLoggers.removeAppender(clusterLogger, mockAppender); mockAppender.stop(); } mockAppender.assertAllExpectationsMatched(); diff --git a/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java b/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java index 2015a6b42d16f..29c7a2b161403 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java @@ -24,7 +24,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.elasticsearch.common.logging.ESLoggerFactory; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.index.IndexModule; import org.elasticsearch.test.ESTestCase; @@ -751,8 +751,8 @@ public void testLoggingUpdates() { settings.applySettings(Settings.builder().build()); assertEquals(property, ESLoggerFactory.getLogger("test").getLevel()); } finally { - Loggers.setLevel(ESLoggerFactory.getRootLogger(), level); - Loggers.setLevel(ESLoggerFactory.getLogger("test"), testLevel); + ServerLoggers.setLevel(ESLoggerFactory.getRootLogger(), level); + ServerLoggers.setLevel(ESLoggerFactory.getLogger("test"), testLevel); } } @@ -767,7 +767,7 @@ public void testFallbackToLoggerLevel() { settings.applySettings(Settings.builder().build()); // here we fall back to 'logger.level' which is our default. assertEquals(Level.ERROR, ESLoggerFactory.getRootLogger().getLevel()); } finally { - Loggers.setLevel(ESLoggerFactory.getRootLogger(), level); + ServerLoggers.setLevel(ESLoggerFactory.getRootLogger(), level); } } diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index 880f8dcba5de5..aeadcf30e3678 100644 --- a/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -41,7 +41,6 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.discovery.zen.ElectMasterService; -import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.indices.IndexClosedException; @@ -52,8 +51,6 @@ import org.elasticsearch.test.InternalTestCluster.RestartCallback; import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; import java.util.List; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; diff --git a/server/src/test/java/org/elasticsearch/index/MergeSchedulerSettingsTests.java b/server/src/test/java/org/elasticsearch/index/MergeSchedulerSettingsTests.java index e9eb5d8b83d2e..301d4e3cfa360 100644 --- a/server/src/test/java/org/elasticsearch/index/MergeSchedulerSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/index/MergeSchedulerSettingsTests.java @@ -26,6 +26,7 @@ import org.apache.logging.log4j.core.filter.RegexFilter; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; @@ -71,8 +72,8 @@ public void testUpdateAutoThrottleSettings() throws Exception { MockAppender mockAppender = new MockAppender("testUpdateAutoThrottleSettings"); mockAppender.start(); final Logger settingsLogger = Loggers.getLogger("org.elasticsearch.common.settings.IndexScopedSettings"); - Loggers.addAppender(settingsLogger, mockAppender); - Loggers.setLevel(settingsLogger, Level.TRACE); + ServerLoggers.addAppender(settingsLogger, mockAppender); + ServerLoggers.setLevel(settingsLogger, Level.TRACE); try { Settings.Builder builder = Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) @@ -91,9 +92,9 @@ public void testUpdateAutoThrottleSettings() throws Exception { assertTrue(mockAppender.sawUpdateAutoThrottle); assertEquals(settings.getMergeSchedulerConfig().isAutoThrottle(), false); } finally { - Loggers.removeAppender(settingsLogger, mockAppender); + ServerLoggers.removeAppender(settingsLogger, mockAppender); mockAppender.stop(); - Loggers.setLevel(settingsLogger, (Level) null); + ServerLoggers.setLevel(settingsLogger, (Level) null); } } @@ -102,8 +103,8 @@ public void testUpdateMergeMaxThreadCount() throws Exception { MockAppender mockAppender = new MockAppender("testUpdateAutoThrottleSettings"); mockAppender.start(); final Logger settingsLogger = Loggers.getLogger("org.elasticsearch.common.settings.IndexScopedSettings"); - Loggers.addAppender(settingsLogger, mockAppender); - Loggers.setLevel(settingsLogger, Level.TRACE); + ServerLoggers.addAppender(settingsLogger, mockAppender); + ServerLoggers.setLevel(settingsLogger, Level.TRACE); try { Settings.Builder builder = Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) @@ -123,9 +124,9 @@ public void testUpdateMergeMaxThreadCount() throws Exception { // Make sure we log the change: assertTrue(mockAppender.sawUpdateMaxThreadCount); } finally { - Loggers.removeAppender(settingsLogger, mockAppender); + ServerLoggers.removeAppender(settingsLogger, mockAppender); mockAppender.stop(); - Loggers.setLevel(settingsLogger, (Level) null); + ServerLoggers.setLevel(settingsLogger, (Level) null); } } diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index e1aa09b9b6542..cfbc404d3c778 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -78,6 +78,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; @@ -1942,8 +1943,8 @@ public void testIndexWriterInfoStream() throws IllegalAccessException, IOExcepti Logger rootLogger = LogManager.getRootLogger(); Level savedLevel = rootLogger.getLevel(); - Loggers.addAppender(rootLogger, mockAppender); - Loggers.setLevel(rootLogger, Level.DEBUG); + ServerLoggers.addAppender(rootLogger, mockAppender); + ServerLoggers.setLevel(rootLogger, Level.DEBUG); rootLogger = LogManager.getRootLogger(); try { @@ -1954,15 +1955,15 @@ public void testIndexWriterInfoStream() throws IllegalAccessException, IOExcepti assertFalse(mockAppender.sawIndexWriterMessage); // Again, with TRACE, which should log IndexWriter output: - Loggers.setLevel(rootLogger, Level.TRACE); + ServerLoggers.setLevel(rootLogger, Level.TRACE); engine.index(indexForDoc(doc)); engine.flush(); assertTrue(mockAppender.sawIndexWriterMessage); } finally { - Loggers.removeAppender(rootLogger, mockAppender); + ServerLoggers.removeAppender(rootLogger, mockAppender); mockAppender.stop(); - Loggers.setLevel(rootLogger, savedLevel); + ServerLoggers.setLevel(rootLogger, savedLevel); } } @@ -2232,8 +2233,8 @@ public void testIndexWriterIFDInfoStream() throws IllegalAccessException, IOExce final Logger iwIFDLogger = Loggers.getLogger("org.elasticsearch.index.engine.Engine.IFD"); - Loggers.addAppender(iwIFDLogger, mockAppender); - Loggers.setLevel(iwIFDLogger, Level.DEBUG); + ServerLoggers.addAppender(iwIFDLogger, mockAppender); + ServerLoggers.setLevel(iwIFDLogger, Level.DEBUG); try { // First, with DEBUG, which should NOT log IndexWriter output: @@ -2244,16 +2245,16 @@ public void testIndexWriterIFDInfoStream() throws IllegalAccessException, IOExce assertFalse(mockAppender.sawIndexWriterIFDMessage); // Again, with TRACE, which should only log IndexWriter IFD output: - Loggers.setLevel(iwIFDLogger, Level.TRACE); + ServerLoggers.setLevel(iwIFDLogger, Level.TRACE); engine.index(indexForDoc(doc)); engine.flush(); assertFalse(mockAppender.sawIndexWriterMessage); assertTrue(mockAppender.sawIndexWriterIFDMessage); } finally { - Loggers.removeAppender(iwIFDLogger, mockAppender); + ServerLoggers.removeAppender(iwIFDLogger, mockAppender); mockAppender.stop(); - Loggers.setLevel(iwIFDLogger, (Level) null); + ServerLoggers.setLevel(iwIFDLogger, (Level) null); } } diff --git a/settings.gradle b/settings.gradle index be1e6c7c45d64..b811a46ea42c7 100644 --- a/settings.gradle +++ b/settings.gradle @@ -28,6 +28,7 @@ List projects = [ 'test:fixtures:krb5kdc-fixture', 'test:fixtures:old-elasticsearch', 'test:logger-usage', + 'libs:elasticsearch-core', 'modules:aggs-matrix-stats', 'modules:analysis-common', 'modules:ingest-common', diff --git a/test/framework/src/main/java/org/elasticsearch/index/store/EsBaseDirectoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/store/EsBaseDirectoryTestCase.java index 69dfae2c6788c..c078e88da20ee 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/store/EsBaseDirectoryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/store/EsBaseDirectoryTestCase.java @@ -27,7 +27,6 @@ import org.apache.lucene.util.TimeUnits; import org.elasticsearch.bootstrap.BootstrapForTesting; import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter; /** diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java index 470847e65f25f..a11b70bfa104e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java @@ -35,7 +35,6 @@ import java.io.Closeable; import java.io.IOException; import java.net.InetSocketAddress; -import java.util.List; import java.util.Random; import java.util.Set; diff --git a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java index 144b2be1b0235..f30c498b21020 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java @@ -27,7 +27,6 @@ import org.apache.lucene.search.QueryCache; import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.ReferenceManager; -import org.apache.lucene.search.SearcherManager; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.logging.Loggers; diff --git a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java index e021df52c60fe..60cc6ceeccfa7 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java +++ b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java @@ -21,6 +21,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.test.junit.annotations.TestLogging; import org.junit.runner.Description; @@ -106,7 +107,7 @@ private Map processTestLogging(final TestLogging testLogging) { } for (final Map.Entry entry : map.entrySet()) { final Logger logger = resolveLogger(entry.getKey()); - Loggers.setLevel(logger, entry.getValue()); + ServerLoggers.setLevel(logger, entry.getValue()); } return existing; } @@ -145,7 +146,7 @@ private static Map getLoggersAndLevelsFromAnnotation(final TestL private Map reset(final Map map) { for (final Map.Entry previousLogger : map.entrySet()) { final Logger logger = resolveLogger(previousLogger.getKey()); - Loggers.setLevel(logger, previousLogger.getValue()); + ServerLoggers.setLevel(logger, previousLogger.getValue()); } return Collections.emptyMap(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Stash.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Stash.java index e2eefc6376ad1..c7b8e0fef2f9b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Stash.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Stash.java @@ -22,7 +22,6 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.xcontent.ToXContent.Params; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; diff --git a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java index 1efd210b110c8..858a8ebd5ed0b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java +++ b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java @@ -21,7 +21,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -95,7 +95,7 @@ public void afterIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSha if (indexShard != null) { Boolean remove = shardSet.remove(indexShard); if (remove == Boolean.TRUE) { - Logger logger = Loggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId()); + Logger logger = ServerLoggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId()); MockFSDirectoryService.checkIndex(logger, indexShard.store(), indexShard.shardId()); } } From b93996fe1401615a16ddd764a528933468e3d40f Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Mon, 15 Jan 2018 18:00:20 +0100 Subject: [PATCH 16/31] Fix synonym phrase query expansion for cross_fields parsing (#28045) * Fix synonym phrase query expansion for cross_fields parsing The `cross_fields` mode for query parser ignores phrase query generated by multi-word synonyms. In such case only the first field of each analyzer group is kept. This change fixes this issue by expanding the phrase query for each analyzer group to **all** fields using a disjunction max query. --- .../index/search/MatchQuery.java | 17 ++++++- .../index/search/MultiMatchQuery.java | 47 +++++++++++++++++- .../index/search/MultiMatchQueryTests.java | 49 +++++++++++++++++++ 3 files changed, 110 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java b/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java index 380b6251920e8..d1a7fd91819a2 100644 --- a/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java +++ b/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java @@ -29,6 +29,7 @@ import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; +import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.MultiPhraseQuery; import org.apache.lucene.search.MultiTermQuery; @@ -351,7 +352,12 @@ protected Query analyzePhrase(String field, TokenStream stream, int slop) throws throw exc; } } - return super.analyzePhrase(field, stream, slop); + Query query = super.analyzePhrase(field, stream, slop); + if (query instanceof PhraseQuery) { + // synonyms that expand to multiple terms can return a phrase query. + return blendPhraseQuery((PhraseQuery) query, mapper); + } + return query; } /** @@ -476,6 +482,14 @@ private Query boolToExtendedCommonTermsQuery(BooleanQuery bq, Occur highFreqOccu } } + /** + * Called when a phrase query is built with {@link QueryBuilder#analyzePhrase(String, TokenStream, int)}. + * Subclass can override this function to blend this query to multiple fields. + */ + protected Query blendPhraseQuery(PhraseQuery query, MappedFieldType fieldType) { + return query; + } + protected Query blendTermsQuery(Term[] terms, MappedFieldType fieldType) { return new SynonymQuery(terms); } @@ -498,5 +512,4 @@ protected Query blendTermQuery(Term term, MappedFieldType fieldType) { } return termQuery(fieldType, term.bytes(), lenient); } - } diff --git a/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java b/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java index c67cceec0d182..9d76b35c21ed6 100644 --- a/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java +++ b/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java @@ -25,10 +25,10 @@ import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.AbstractQueryBuilder; @@ -141,6 +141,10 @@ public Query blendTerms(Term[] terms, MappedFieldType fieldType) { public Query termQuery(MappedFieldType fieldType, BytesRef value) { return MultiMatchQuery.this.termQuery(fieldType, value, lenient); } + + public Query blendPhrase(PhraseQuery query, MappedFieldType type) { + return MultiMatchQuery.super.blendPhraseQuery(query, type); + } } final class CrossFieldsQueryBuilder extends QueryBuilder { @@ -224,6 +228,17 @@ public Query termQuery(MappedFieldType fieldType, BytesRef value) { */ return blendTerm(new Term(fieldType.name(), value.utf8ToString()), fieldType); } + + @Override + public Query blendPhrase(PhraseQuery query, MappedFieldType type) { + if (blendedFields == null) { + return super.blendPhrase(query, type); + } + /** + * We build phrase queries for multi-word synonyms when {@link QueryBuilder#autoGenerateSynonymsPhraseQuery} is true. + */ + return MultiMatchQuery.blendPhrase(query, blendedFields); + } } static Query blendTerm(QueryShardContext context, BytesRef value, Float commonTermsCutoff, float tieBreaker, @@ -293,6 +308,28 @@ static Query blendTerms(QueryShardContext context, BytesRef[] values, Float comm } } + /** + * Expand a {@link PhraseQuery} to multiple fields that share the same analyzer. + * Returns a {@link DisjunctionMaxQuery} with a disjunction for each expanded field. + */ + static Query blendPhrase(PhraseQuery query, FieldAndFieldType... fields) { + List disjunctions = new ArrayList<>(); + for (FieldAndFieldType field : fields) { + int[] positions = query.getPositions(); + Term[] terms = query.getTerms(); + PhraseQuery.Builder builder = new PhraseQuery.Builder(); + for (int i = 0; i < terms.length; i++) { + builder.add(new Term(field.fieldType.name(), terms[i].bytes()), positions[i]); + } + Query q = builder.build(); + if (field.boost != AbstractQueryBuilder.DEFAULT_BOOST) { + q = new BoostQuery(q, field.boost); + } + disjunctions.add(q); + } + return new DisjunctionMaxQuery(disjunctions, 0.0f); + } + @Override protected Query blendTermQuery(Term term, MappedFieldType fieldType) { if (queryBuilder == null) { @@ -309,6 +346,14 @@ protected Query blendTermsQuery(Term[] terms, MappedFieldType fieldType) { return queryBuilder.blendTerms(terms, fieldType); } + @Override + protected Query blendPhraseQuery(PhraseQuery query, MappedFieldType fieldType) { + if (queryBuilder == null) { + return super.blendPhraseQuery(query, fieldType); + } + return queryBuilder.blendPhrase(query, fieldType); + } + static final class FieldAndFieldType { final MappedFieldType fieldType; final float boost; diff --git a/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java b/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java index d18e4307d36de..0f6429f7f30b3 100644 --- a/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java +++ b/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java @@ -19,12 +19,16 @@ package org.elasticsearch.index.search; +import org.apache.lucene.analysis.MockSynonymAnalyzer; import org.apache.lucene.index.Term; import org.apache.lucene.queries.BlendedTermQuery; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.SynonymQuery; import org.apache.lucene.search.TermQuery; @@ -43,7 +47,11 @@ import org.junit.Before; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import static org.elasticsearch.index.query.QueryBuilders.multiMatchQuery; import static org.hamcrest.Matchers.equalTo; @@ -215,4 +223,45 @@ public void testMultiMatchCrossFieldsWithSynonyms() throws IOException { assertThat(parsedQuery, equalTo(expectedQuery)); } + + public void testMultiMatchCrossFieldsWithSynonymsPhrase() throws IOException { + QueryShardContext queryShardContext = indexService.newQueryShardContext( + randomInt(20), null, () -> { throw new UnsupportedOperationException(); }, null); + MultiMatchQuery parser = new MultiMatchQuery(queryShardContext); + parser.setAnalyzer(new MockSynonymAnalyzer()); + Map fieldNames = new HashMap<>(); + fieldNames.put("name.first", 1.0f); + fieldNames.put("name.last", 1.0f); + Query query = parser.parse(MultiMatchQueryBuilder.Type.CROSS_FIELDS, fieldNames, "guinea pig", null); + + Term[] terms = new Term[2]; + terms[0] = new Term("name.first", "cavy"); + terms[1] = new Term("name.last", "cavy"); + float[] boosts = new float[2]; + Arrays.fill(boosts, 1.0f); + + List phraseDisjuncts = new ArrayList<>(); + phraseDisjuncts.add( + new PhraseQuery.Builder() + .add(new Term("name.first", "guinea")) + .add(new Term("name.first", "pig")) + .build() + ); + phraseDisjuncts.add( + new PhraseQuery.Builder() + .add(new Term("name.last", "guinea")) + .add(new Term("name.last", "pig")) + .build() + ); + BooleanQuery expected = new BooleanQuery.Builder() + .add( + new BooleanQuery.Builder() + .add(new DisjunctionMaxQuery(phraseDisjuncts, 0.0f), BooleanClause.Occur.SHOULD) + .add(BlendedTermQuery.dismaxBlendedQuery(terms, boosts, 1.0f), BooleanClause.Occur.SHOULD) + .build(), + BooleanClause.Occur.SHOULD + ) + .build(); + assertEquals(expected, query); + } } From 24c19c889eb13a66bb4ae5fbd027a359b6a6a1b8 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Mon, 15 Jan 2018 18:27:30 +0100 Subject: [PATCH 17/31] #28045 restore removed import after backport --- .../java/org/elasticsearch/index/search/MultiMatchQuery.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java b/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java index 9d76b35c21ed6..c559aa9458b5b 100644 --- a/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java +++ b/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java @@ -29,6 +29,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.AbstractQueryBuilder; From 7e28573a6674818fc06c0da989a9d497dc18cfd3 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Mon, 15 Jan 2018 18:30:38 +0100 Subject: [PATCH 18/31] Fix NPE on composite aggregation with sub-aggregations that need scores (#28129) The composite aggregation defers the collection of sub-aggregations to a second pass that visits documents only if they appear in the top buckets. Though the scorer for sub-aggregations is not set on this second pass and generates an NPE if any sub-aggregation tries to access the score. This change creates a scorer for the second pass and makes sure that sub-aggs can use it safely to check the score of the collected documents. --- .../bucket/composite/CompositeAggregator.java | 23 ++++++ .../composite/CompositeAggregatorTests.java | 73 ++++++++++++++++++- .../aggregations/AggregatorTestCase.java | 39 +++++++--- 3 files changed, 123 insertions(+), 12 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java index 9612ba2f895bc..3467aaf318baf 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java @@ -23,6 +23,9 @@ import org.apache.lucene.search.CollectionTerminatedException; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Weight; import org.apache.lucene.util.RoaringDocIdSet; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -87,6 +90,12 @@ public InternalAggregation buildAggregation(long zeroBucket) throws IOException // Replay all documents that contain at least one top bucket (collected during the first pass). grow(keys.size()+1); + final boolean needsScores = needsScores(); + Weight weight = null; + if (needsScores) { + Query query = context.query(); + weight = context.searcher().createNormalizedWeight(query, true); + } for (LeafContext context : contexts) { DocIdSetIterator docIdSetIterator = context.docIdSet.iterator(); if (docIdSetIterator == null) { @@ -95,7 +104,21 @@ public InternalAggregation buildAggregation(long zeroBucket) throws IOException final CompositeValuesSource.Collector collector = array.getLeafCollector(context.ctx, getSecondPassCollector(context.subCollector)); int docID; + DocIdSetIterator scorerIt = null; + if (needsScores) { + Scorer scorer = weight.scorer(context.ctx); + // We don't need to check if the scorer is null + // since we are sure that there are documents to replay (docIdSetIterator it not empty). + scorerIt = scorer.iterator(); + context.subCollector.setScorer(scorer); + } while ((docID = docIdSetIterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { + if (needsScores) { + assert scorerIt.docID() < docID; + scorerIt.advance(docID); + // aggregations should only be replayed on matching documents + assert scorerIt.docID() == docID; + } collector.collect(docID); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java index 339f9bda65a0a..172aebbc0e5dc 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java @@ -50,6 +50,8 @@ import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.search.aggregations.metrics.tophits.TopHits; +import org.elasticsearch.search.aggregations.metrics.tophits.TopHitsAggregationBuilder; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.IndexSettingsModule; import org.joda.time.DateTimeZone; @@ -1065,8 +1067,73 @@ public void testWithKeywordAndDateHistogram() throws IOException { ); } - private void testSearchCase(Query query, - Sort sort, + public void testWithKeywordAndTopHits() throws Exception { + final List>> dataset = new ArrayList<>(); + dataset.addAll( + Arrays.asList( + createDocument("keyword", "a"), + createDocument("keyword", "c"), + createDocument("keyword", "a"), + createDocument("keyword", "d"), + createDocument("keyword", "c") + ) + ); + final Sort sort = new Sort(new SortedSetSortField("keyword", false)); + testSearchCase(new MatchAllDocsQuery(), sort, dataset, + () -> { + TermsValuesSourceBuilder terms = new TermsValuesSourceBuilder("keyword") + .field("keyword"); + return new CompositeAggregationBuilder("name", Collections.singletonList(terms)) + .subAggregation(new TopHitsAggregationBuilder("top_hits").storedField("_none_")); + }, (result) -> { + assertEquals(3, result.getBuckets().size()); + assertEquals("{keyword=a}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(0).getDocCount()); + TopHits topHits = result.getBuckets().get(0).getAggregations().get("top_hits"); + assertNotNull(topHits); + assertEquals(topHits.getHits().getHits().length, 2); + assertEquals(topHits.getHits().getTotalHits(), 2L); + assertEquals("{keyword=c}", result.getBuckets().get(1).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(1).getDocCount()); + topHits = result.getBuckets().get(1).getAggregations().get("top_hits"); + assertNotNull(topHits); + assertEquals(topHits.getHits().getHits().length, 2); + assertEquals(topHits.getHits().getTotalHits(), 2L); + assertEquals("{keyword=d}", result.getBuckets().get(2).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(2).getDocCount()); + topHits = result.getBuckets().get(2).getAggregations().get("top_hits"); + assertNotNull(topHits); + assertEquals(topHits.getHits().getHits().length, 1); + assertEquals(topHits.getHits().getTotalHits(), 1L);; + } + ); + + testSearchCase(new MatchAllDocsQuery(), sort, dataset, + () -> { + TermsValuesSourceBuilder terms = new TermsValuesSourceBuilder("keyword") + .field("keyword"); + return new CompositeAggregationBuilder("name", Collections.singletonList(terms)) + .aggregateAfter(Collections.singletonMap("keyword", "a")) + .subAggregation(new TopHitsAggregationBuilder("top_hits").storedField("_none_")); + }, (result) -> { + assertEquals(2, result.getBuckets().size()); + assertEquals("{keyword=c}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(0).getDocCount()); + TopHits topHits = result.getBuckets().get(0).getAggregations().get("top_hits"); + assertNotNull(topHits); + assertEquals(topHits.getHits().getHits().length, 2); + assertEquals(topHits.getHits().getTotalHits(), 2L); + assertEquals("{keyword=d}", result.getBuckets().get(1).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(1).getDocCount()); + topHits = result.getBuckets().get(1).getAggregations().get("top_hits"); + assertNotNull(topHits); + assertEquals(topHits.getHits().getHits().length, 1); + assertEquals(topHits.getHits().getTotalHits(), 1L); + } + ); + } + + private void testSearchCase(Query query, Sort sort, List>> dataset, Supplier create, Consumer verify) throws IOException { @@ -1107,7 +1174,7 @@ private void executeTestCase(boolean reduced, IndexSearcher indexSearcher = newSearcher(indexReader, sort == null, sort == null); CompositeAggregationBuilder aggregationBuilder = create.get(); if (sort != null) { - CompositeAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, indexSettings, FIELD_TYPES); + CompositeAggregator aggregator = createAggregator(query, aggregationBuilder, indexSearcher, indexSettings, FIELD_TYPES); assertTrue(aggregator.canEarlyTerminate()); } final InternalComposite composite; diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java index 137865beb1540..05ca2d40f82ca 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java @@ -103,12 +103,22 @@ protected AggregatorFactory createAggregatorFactory(AggregationBuilder aggreg new MultiBucketConsumer(DEFAULT_MAX_BUCKETS), fieldTypes); } - /** Create a factory for the given aggregation builder. */ + protected AggregatorFactory createAggregatorFactory(AggregationBuilder aggregationBuilder, IndexSearcher indexSearcher, IndexSettings indexSettings, MultiBucketConsumer bucketConsumer, MappedFieldType... fieldTypes) throws IOException { + return createAggregatorFactory(null, aggregationBuilder, indexSearcher, indexSettings, bucketConsumer, fieldTypes); + } + + /** Create a factory for the given aggregation builder. */ + protected AggregatorFactory createAggregatorFactory(Query query, + AggregationBuilder aggregationBuilder, + IndexSearcher indexSearcher, + IndexSettings indexSettings, + MultiBucketConsumer bucketConsumer, + MappedFieldType... fieldTypes) throws IOException { SearchContext searchContext = createSearchContext(indexSearcher, indexSettings); CircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService(); when(searchContext.aggregations()) @@ -116,6 +126,7 @@ protected AggregatorFactory createAggregatorFactory(AggregationBuilder aggreg when(searchContext.bigArrays()).thenReturn( new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), circuitBreakerService) ); + when(searchContext.query()).thenReturn(query); // TODO: now just needed for top_hits, this will need to be revised for other agg unit tests: MapperService mapperService = mapperServiceMock(); when(mapperService.getIndexSettings()).thenReturn(indexSettings); @@ -148,19 +159,20 @@ protected A createAggregator(AggregationBuilder aggregati new MultiBucketConsumer(DEFAULT_MAX_BUCKETS), fieldTypes); } - protected A createAggregator(AggregationBuilder aggregationBuilder, + protected A createAggregator(Query query, + AggregationBuilder aggregationBuilder, IndexSearcher indexSearcher, IndexSettings indexSettings, MappedFieldType... fieldTypes) throws IOException { - return createAggregator(aggregationBuilder, indexSearcher, indexSettings, + return createAggregator(query, aggregationBuilder, indexSearcher, indexSettings, new MultiBucketConsumer(DEFAULT_MAX_BUCKETS), fieldTypes); } - protected A createAggregator(AggregationBuilder aggregationBuilder, + protected A createAggregator(Query query, AggregationBuilder aggregationBuilder, IndexSearcher indexSearcher, MultiBucketConsumer bucketConsumer, MappedFieldType... fieldTypes) throws IOException { - return createAggregator(aggregationBuilder, indexSearcher, createIndexSettings(), bucketConsumer, fieldTypes); + return createAggregator(query, aggregationBuilder, indexSearcher, createIndexSettings(), bucketConsumer, fieldTypes); } protected A createAggregator(AggregationBuilder aggregationBuilder, @@ -168,8 +180,17 @@ protected A createAggregator(AggregationBuilder aggregati IndexSettings indexSettings, MultiBucketConsumer bucketConsumer, MappedFieldType... fieldTypes) throws IOException { + return createAggregator(null, aggregationBuilder, indexSearcher, indexSettings, bucketConsumer, fieldTypes); + } + + protected A createAggregator(Query query, + AggregationBuilder aggregationBuilder, + IndexSearcher indexSearcher, + IndexSettings indexSettings, + MultiBucketConsumer bucketConsumer, + MappedFieldType... fieldTypes) throws IOException { @SuppressWarnings("unchecked") - A aggregator = (A) createAggregatorFactory(aggregationBuilder, indexSearcher, indexSettings, bucketConsumer, fieldTypes) + A aggregator = (A) createAggregatorFactory(query, aggregationBuilder, indexSearcher, indexSettings, bucketConsumer, fieldTypes) .create(null, true); return aggregator; } @@ -264,7 +285,7 @@ protected A search(IndexSe int maxBucket, MappedFieldType... fieldTypes) throws IOException { MultiBucketConsumer bucketConsumer = new MultiBucketConsumer(maxBucket); - C a = createAggregator(builder, searcher, bucketConsumer, fieldTypes); + C a = createAggregator(query, builder, searcher, bucketConsumer, fieldTypes); a.preCollection(); searcher.search(query, a); a.postCollection(); @@ -312,11 +333,11 @@ protected A searchAndReduc Query rewritten = searcher.rewrite(query); Weight weight = searcher.createWeight(rewritten, true, 1f); MultiBucketConsumer bucketConsumer = new MultiBucketConsumer(maxBucket); - C root = createAggregator(builder, searcher, bucketConsumer, fieldTypes); + C root = createAggregator(query, builder, searcher, bucketConsumer, fieldTypes); for (ShardSearcher subSearcher : subSearchers) { MultiBucketConsumer shardBucketConsumer = new MultiBucketConsumer(maxBucket); - C a = createAggregator(builder, subSearcher, shardBucketConsumer, fieldTypes); + C a = createAggregator(query, builder, subSearcher, shardBucketConsumer, fieldTypes); a.preCollection(); subSearcher.search(weight, a); a.postCollection(); From 720e2dd2863e0b022f4bac6f500577d93e76d564 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Mon, 15 Jan 2018 19:35:54 +0100 Subject: [PATCH 19/31] Fix daitch_mokotoff phonetic filter to use the dedicated Lucene filter (#28225) This commit changes the phonetic filter factory to use a DaitchMokotoffSoundexFilter instead of a PhoneticFilter with a daitch_mokotoff encoder when daitch_mokotoff is selected. The latter does not hanlde branching when computing the soundex and fails to encode multiple variations when possible. Closes #28211 --- .../index/analysis/PhoneticTokenFilterFactory.java | 9 ++++++++- .../index/analysis/SimplePhoneticAnalysisTests.java | 11 +++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/PhoneticTokenFilterFactory.java b/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/PhoneticTokenFilterFactory.java index 86330f5d9ba2c..36b4ab9ca58f4 100644 --- a/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/PhoneticTokenFilterFactory.java +++ b/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/PhoneticTokenFilterFactory.java @@ -33,6 +33,7 @@ import org.apache.commons.codec.language.bm.RuleType; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.phonetic.BeiderMorseFilter; +import org.apache.lucene.analysis.phonetic.DaitchMokotoffSoundexFilter; import org.apache.lucene.analysis.phonetic.DoubleMetaphoneFilter; import org.apache.lucene.analysis.phonetic.PhoneticFilter; import org.elasticsearch.common.settings.Settings; @@ -53,6 +54,7 @@ public class PhoneticTokenFilterFactory extends AbstractTokenFilterFactory { private List languageset; private NameType nametype; private RuleType ruletype; + private boolean isDaitchMokotoff; public PhoneticTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); @@ -61,6 +63,7 @@ public PhoneticTokenFilterFactory(IndexSettings indexSettings, Environment envir this.ruletype = null; this.maxcodelength = 0; this.replace = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "replace", true, deprecationLogger); + this.isDaitchMokotoff = false; // weird, encoder is null at last step in SimplePhoneticAnalysisTests, so we set it to metaphone as default String encodername = settings.get("encoder", "metaphone"); if ("metaphone".equalsIgnoreCase(encodername)) { @@ -106,7 +109,8 @@ public PhoneticTokenFilterFactory(IndexSettings indexSettings, Environment envir } else if ("nysiis".equalsIgnoreCase(encodername)) { this.encoder = new Nysiis(); } else if ("daitch_mokotoff".equalsIgnoreCase(encodername)) { - this.encoder = new DaitchMokotoffSoundex(); + this.encoder = null; + this.isDaitchMokotoff = true; } else { throw new IllegalArgumentException("unknown encoder [" + encodername + "] for phonetic token filter"); } @@ -115,6 +119,9 @@ public PhoneticTokenFilterFactory(IndexSettings indexSettings, Environment envir @Override public TokenStream create(TokenStream tokenStream) { if (encoder == null) { + if (isDaitchMokotoff) { + return new DaitchMokotoffSoundexFilter(tokenStream, !replace); + } if (ruletype != null && nametype != null) { LanguageSet langset = null; if (languageset != null && languageset.size() > 0) { diff --git a/plugins/analysis-phonetic/src/test/java/org/elasticsearch/index/analysis/SimplePhoneticAnalysisTests.java b/plugins/analysis-phonetic/src/test/java/org/elasticsearch/index/analysis/SimplePhoneticAnalysisTests.java index e3877faee3146..7fad525b33c3e 100644 --- a/plugins/analysis-phonetic/src/test/java/org/elasticsearch/index/analysis/SimplePhoneticAnalysisTests.java +++ b/plugins/analysis-phonetic/src/test/java/org/elasticsearch/index/analysis/SimplePhoneticAnalysisTests.java @@ -22,6 +22,7 @@ import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.WhitespaceTokenizer; +import org.apache.lucene.analysis.phonetic.DaitchMokotoffSoundexFilter; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; @@ -72,4 +73,14 @@ public void testPhoneticTokenFilterBeiderMorseWithLanguage() throws IOException "rmba", "rmbalt", "rmbo", "rmbolt", "rmbu", "rmbult" }; BaseTokenStreamTestCase.assertTokenStreamContents(filterFactory.create(tokenizer), expected); } + + public void testPhoneticTokenFilterDaitchMotokoff() throws IOException { + TokenFilterFactory filterFactory = analysis.tokenFilter.get("daitch_mokotoff"); + Tokenizer tokenizer = new WhitespaceTokenizer(); + tokenizer.setReader(new StringReader("chauptman")); + String[] expected = new String[] { "473660", "573660" }; + assertThat(filterFactory.create(tokenizer), instanceOf(DaitchMokotoffSoundexFilter.class)); + BaseTokenStreamTestCase.assertTokenStreamContents(filterFactory.create(tokenizer), expected); + } + } From f84a8afc1a1ff9a1e0287257232281c3006badce Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Mon, 15 Jan 2018 11:28:31 -0800 Subject: [PATCH 20/31] Painless: Add whitelist extensions (#28161) This commit adds a PainlessExtension which may be plugged in via SPI to add additional classes, methods and members to the painless whitelist on a per context basis. An example plugin adding and using a whitelist is also added. --- .../org/elasticsearch/painless/Compiler.java | 1 + .../elasticsearch/painless/Definition.java | 24 +------- .../painless/PainlessPlugin.java | 30 +++++++++- .../painless/PainlessScriptEngine.java | 19 +++---- .../painless/spi/PainlessExtension.java | 30 ++++++++++ .../painless/{ => spi}/Whitelist.java | 22 ++++++- .../painless/{ => spi}/WhitelistLoader.java | 7 ++- .../plugin-metadata/plugin-security.policy | 3 + .../painless/{ => spi}/java.lang.txt | 0 .../painless/{ => spi}/java.math.txt | 0 .../painless/{ => spi}/java.text.txt | 0 .../painless/{ => spi}/java.time.chrono.txt | 0 .../painless/{ => spi}/java.time.format.txt | 0 .../painless/{ => spi}/java.time.temporal.txt | 0 .../painless/{ => spi}/java.time.txt | 0 .../painless/{ => spi}/java.time.zone.txt | 0 .../painless/{ => spi}/java.util.function.txt | 0 .../painless/{ => spi}/java.util.regex.txt | 0 .../painless/{ => spi}/java.util.stream.txt | 0 .../painless/{ => spi}/java.util.txt | 0 .../painless/{ => spi}/joda.time.txt | 0 .../painless/{ => spi}/org.elasticsearch.txt | 0 .../painless/AnalyzerCasterTests.java | 8 +-- .../painless/BaseClassTests.java | 8 +-- .../elasticsearch/painless/DebugTests.java | 5 +- .../org/elasticsearch/painless/Debugger.java | 5 +- .../painless/DefBootstrapTests.java | 4 +- .../elasticsearch/painless/FactoryTests.java | 16 +++--- .../painless/NeedsScoreTests.java | 13 +++-- .../painless/PainlessDocGenerator.java | 6 +- .../painless/ScriptTestCase.java | 17 +++--- .../painless/SimilarityScriptTests.java | 13 +++-- .../painless/node/NodeToStringTests.java | 6 +- .../examples/painless-whitelist/build.gradle | 4 ++ .../ExampleWhitelistExtension.java | 42 ++++++++++++++ .../ExampleWhitelistedClass.java | 57 +++++++++++++++++++ .../painlesswhitelist/MyWhitelistPlugin.java | 1 + ...asticsearch.painless.spi.PainlessExtension | 1 + .../painlesswhitelist/example_whitelist.txt | 42 ++++++++++++++ .../test/painless_whitelist/20_whitelist.yml | 26 +++++++++ 40 files changed, 319 insertions(+), 91 deletions(-) create mode 100644 modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/PainlessExtension.java rename modules/lang-painless/src/main/java/org/elasticsearch/painless/{ => spi}/Whitelist.java (93%) rename modules/lang-painless/src/main/java/org/elasticsearch/painless/{ => spi}/WhitelistLoader.java (98%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/java.lang.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/java.math.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/java.text.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/java.time.chrono.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/java.time.format.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/java.time.temporal.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/java.time.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/java.time.zone.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/java.util.function.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/java.util.regex.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/java.util.stream.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/java.util.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/joda.time.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/org.elasticsearch.txt (100%) create mode 100644 plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/ExampleWhitelistExtension.java create mode 100644 plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/ExampleWhitelistedClass.java create mode 100644 plugins/examples/painless-whitelist/src/main/resources/META-INF/services/org.elasticsearch.painless.spi.PainlessExtension create mode 100644 plugins/examples/painless-whitelist/src/main/resources/org/elasticsearch/example/painlesswhitelist/example_whitelist.txt create mode 100644 plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/20_whitelist.yml diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java index ad5e80ba16edd..8102016828c30 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java @@ -22,6 +22,7 @@ import org.elasticsearch.bootstrap.BootstrapInfo; import org.elasticsearch.painless.antlr.Walker; import org.elasticsearch.painless.node.SSource; +import org.elasticsearch.painless.spi.Whitelist; import org.objectweb.asm.util.Printer; import java.lang.reflect.Constructor; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java index 7d8b4ff4e614e..7729c5319ea81 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java @@ -20,6 +20,7 @@ package org.elasticsearch.painless; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.painless.spi.Whitelist; import java.lang.invoke.MethodHandle; import java.lang.invoke.MethodHandles; @@ -46,29 +47,6 @@ public final class Definition { private static final Pattern TYPE_NAME_PATTERN = Pattern.compile("^[_a-zA-Z][._a-zA-Z0-9]*$"); - public static final String[] DEFINITION_FILES = new String[] { - "org.elasticsearch.txt", - "java.lang.txt", - "java.math.txt", - "java.text.txt", - "java.time.txt", - "java.time.chrono.txt", - "java.time.format.txt", - "java.time.temporal.txt", - "java.time.zone.txt", - "java.util.txt", - "java.util.function.txt", - "java.util.regex.txt", - "java.util.stream.txt", - "joda.time.txt" - }; - - /** - * Whitelist that is "built in" to Painless and required by all scripts. - */ - public static final Definition DEFINITION = new Definition( - Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Definition.class, DEFINITION_FILES))); - /** Some native types as constants: */ public final Type voidType; public final Type booleanType; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java index 842af8717a34b..795d81bb6e058 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java @@ -22,28 +22,56 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.painless.spi.PainlessExtension; +import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.plugins.ExtensiblePlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.ScriptPlugin; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptEngine; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.HashMap; import java.util.List; +import java.util.Map; +import java.util.ServiceLoader; /** * Registers Painless as a plugin. */ public final class PainlessPlugin extends Plugin implements ScriptPlugin, ExtensiblePlugin { + private final Map, List> extendedWhitelists = new HashMap<>(); + @Override public ScriptEngine getScriptEngine(Settings settings, Collection> contexts) { - return new PainlessScriptEngine(settings, contexts); + Map, List> contextsWithWhitelists = new HashMap<>(); + for (ScriptContext context : contexts) { + // we might have a context that only uses the base whitelists, so would not have been filled in by reloadSPI + List whitelists = extendedWhitelists.get(context); + if (whitelists == null) { + whitelists = new ArrayList<>(Whitelist.BASE_WHITELISTS); + } + contextsWithWhitelists.put(context, whitelists); + } + return new PainlessScriptEngine(settings, contextsWithWhitelists); } @Override public List> getSettings() { return Arrays.asList(CompilerSettings.REGEX_ENABLED); } + + @Override + public void reloadSPI(ClassLoader loader) { + for (PainlessExtension extension : ServiceLoader.load(PainlessExtension.class, loader)) { + for (Map.Entry, List> entry : extension.getContextWhitelists().entrySet()) { + List existing = extendedWhitelists.computeIfAbsent(entry.getKey(), + c -> new ArrayList<>(Whitelist.BASE_WHITELISTS)); + existing.addAll(entry.getValue()); + } + } + } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java index ac01f45a7fdd6..95a38bf22c653 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java @@ -19,12 +19,12 @@ package org.elasticsearch.painless; -import org.apache.logging.log4j.core.tools.Generate; import org.apache.lucene.index.LeafReaderContext; import org.elasticsearch.SpecialPermission; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.painless.Compiler.Loader; +import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptEngine; @@ -45,7 +45,6 @@ import java.security.ProtectionDomain; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -82,7 +81,7 @@ public final class PainlessScriptEngine extends AbstractComponent implements Scr /** * Default compiler settings to be used. Note that {@link CompilerSettings} is mutable but this instance shouldn't be mutated outside - * of {@link PainlessScriptEngine#PainlessScriptEngine(Settings, Collection)}. + * of {@link PainlessScriptEngine#PainlessScriptEngine(Settings, Map)}. */ private final CompilerSettings defaultCompilerSettings = new CompilerSettings(); @@ -92,23 +91,19 @@ public final class PainlessScriptEngine extends AbstractComponent implements Scr * Constructor. * @param settings The settings to initialize the engine with. */ - public PainlessScriptEngine(Settings settings, Collection> contexts) { + public PainlessScriptEngine(Settings settings, Map, List> contexts) { super(settings); defaultCompilerSettings.setRegexesEnabled(CompilerSettings.REGEX_ENABLED.get(settings)); Map, Compiler> contextsToCompilers = new HashMap<>(); - // Placeholder definition used for all contexts until SPI is fully integrated. Reduces memory foot print - // by re-using the same definition since caching isn't implemented at this time. - Definition definition = new Definition( - Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Definition.class, Definition.DEFINITION_FILES))); - - for (ScriptContext context : contexts) { + for (Map.Entry, List> entry : contexts.entrySet()) { + ScriptContext context = entry.getKey(); if (context.instanceClazz.equals(SearchScript.class) || context.instanceClazz.equals(ExecutableScript.class)) { - contextsToCompilers.put(context, new Compiler(GenericElasticsearchScript.class, definition)); + contextsToCompilers.put(context, new Compiler(GenericElasticsearchScript.class, new Definition(entry.getValue()))); } else { - contextsToCompilers.put(context, new Compiler(context.instanceClazz, definition)); + contextsToCompilers.put(context, new Compiler(context.instanceClazz, new Definition(entry.getValue()))); } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/PainlessExtension.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/PainlessExtension.java new file mode 100644 index 0000000000000..9434e6986c0a3 --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/PainlessExtension.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless.spi; + +import java.util.List; +import java.util.Map; + +import org.elasticsearch.script.ScriptContext; + +public interface PainlessExtension { + + Map, List> getContextWhitelists(); +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Whitelist.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/Whitelist.java similarity index 93% rename from modules/lang-painless/src/main/java/org/elasticsearch/painless/Whitelist.java rename to modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/Whitelist.java index 678b8a4c1ae38..e715eb0090c7f 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Whitelist.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/Whitelist.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.painless; +package org.elasticsearch.painless.spi; import java.util.Collections; import java.util.List; @@ -34,6 +34,26 @@ */ public final class Whitelist { + private static final String[] BASE_WHITELIST_FILES = new String[] { + "org.elasticsearch.txt", + "java.lang.txt", + "java.math.txt", + "java.text.txt", + "java.time.txt", + "java.time.chrono.txt", + "java.time.format.txt", + "java.time.temporal.txt", + "java.time.zone.txt", + "java.util.txt", + "java.util.function.txt", + "java.util.regex.txt", + "java.util.stream.txt", + "joda.time.txt" + }; + + public static final List BASE_WHITELISTS = + Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Whitelist.class, BASE_WHITELIST_FILES)); + /** * Struct represents the equivalent of a Java class in Painless complete with super classes, * constructors, methods, and fields. In Painless a class is known as a struct primarily to avoid diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/WhitelistLoader.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java similarity index 98% rename from modules/lang-painless/src/main/java/org/elasticsearch/painless/WhitelistLoader.java rename to modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java index 93ea951f453aa..8817bfa274c60 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/WhitelistLoader.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.painless; +package org.elasticsearch.painless.spi; import java.io.InputStreamReader; import java.io.LineNumberReader; @@ -25,6 +25,8 @@ import java.lang.reflect.Field; import java.lang.reflect.Method; import java.nio.charset.StandardCharsets; +import java.security.AccessController; +import java.security.PrivilegedAction; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -296,8 +298,9 @@ public static Whitelist loadFromResourceFiles(Class resource, String... filep throw new RuntimeException("error in [" + filepath + "] at line [" + number + "]", exception); } } + ClassLoader loader = AccessController.doPrivileged((PrivilegedAction)resource::getClassLoader); - return new Whitelist(resource.getClassLoader(), whitelistStructs); + return new Whitelist(loader, whitelistStructs); } private WhitelistLoader() {} diff --git a/modules/lang-painless/src/main/plugin-metadata/plugin-security.policy b/modules/lang-painless/src/main/plugin-metadata/plugin-security.policy index e45c1b86ceb2c..b383c6da3f12c 100644 --- a/modules/lang-painless/src/main/plugin-metadata/plugin-security.policy +++ b/modules/lang-painless/src/main/plugin-metadata/plugin-security.policy @@ -20,4 +20,7 @@ grant { // needed to generate runtime classes permission java.lang.RuntimePermission "createClassLoader"; + + // needed to find the classloader to load whitelisted classes from + permission java.lang.RuntimePermission "getClassLoader"; }; diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.lang.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.lang.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.lang.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.lang.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.math.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.math.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.math.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.math.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.text.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.text.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.text.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.text.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.chrono.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.chrono.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.chrono.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.chrono.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.format.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.format.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.format.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.format.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.temporal.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.temporal.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.temporal.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.temporal.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.zone.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.zone.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.zone.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.zone.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.function.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.util.function.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.function.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.util.function.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.regex.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.util.regex.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.regex.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.util.regex.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.stream.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.util.stream.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.stream.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.util.stream.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.util.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.util.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/joda.time.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/joda.time.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/joda.time.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/joda.time.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/AnalyzerCasterTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/AnalyzerCasterTests.java index 58ae31a45c93a..919b0881c0794 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/AnalyzerCasterTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/AnalyzerCasterTests.java @@ -21,16 +21,12 @@ import org.elasticsearch.painless.Definition.Cast; import org.elasticsearch.painless.Definition.Type; +import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.test.ESTestCase; -import java.util.Collections; - -import static org.elasticsearch.painless.Definition.DEFINITION_FILES; - public class AnalyzerCasterTests extends ESTestCase { - private static final Definition definition = new Definition( - Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Definition.class, DEFINITION_FILES))); + private static final Definition definition = new Definition(Whitelist.BASE_WHITELISTS); private static void assertCast(Type actual, Type expected, boolean mustBeExplicit) { Location location = new Location("dummy", 0); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BaseClassTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BaseClassTests.java index 2ba8692b8af59..59cafa96ddcb9 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BaseClassTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BaseClassTests.java @@ -19,13 +19,12 @@ package org.elasticsearch.painless; -import org.elasticsearch.script.ScriptContext; - -import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Map; +import org.elasticsearch.painless.spi.Whitelist; + import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; import static org.hamcrest.Matchers.containsString; @@ -37,8 +36,7 @@ */ public class BaseClassTests extends ScriptTestCase { - private final Definition definition = new Definition( - Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Definition.class, Definition.DEFINITION_FILES))); + private final Definition definition = new Definition(Whitelist.BASE_WHITELISTS); public abstract static class Gets { diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DebugTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DebugTests.java index a55b48f0189b3..279438e74a7c3 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DebugTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DebugTests.java @@ -22,10 +22,10 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.script.ScriptException; import java.io.IOException; -import java.util.Collections; import java.util.Map; import static java.util.Collections.singletonList; @@ -35,8 +35,7 @@ import static org.hamcrest.Matchers.not; public class DebugTests extends ScriptTestCase { - private final Definition definition = new Definition( - Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Definition.class, Definition.DEFINITION_FILES))); + private final Definition definition = new Definition(Whitelist.BASE_WHITELISTS); public void testExplain() { // Debug.explain can explain an object diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/Debugger.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/Debugger.java index 52ec783db4ef4..e29986a3c87de 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/Debugger.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/Debugger.java @@ -20,11 +20,11 @@ package org.elasticsearch.painless; import org.apache.lucene.util.IOUtils; +import org.elasticsearch.painless.spi.Whitelist; import org.objectweb.asm.util.Textifier; import java.io.PrintWriter; import java.io.StringWriter; -import java.util.Collections; /** quick and dirty tools for debugging */ final class Debugger { @@ -40,8 +40,7 @@ static String toString(Class iface, String source, CompilerSettings settings) PrintWriter outputWriter = new PrintWriter(output); Textifier textifier = new Textifier(); try { - new Compiler(iface, new Definition( - Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Definition.class, Definition.DEFINITION_FILES)))) + new Compiler(iface, new Definition(Whitelist.BASE_WHITELISTS)) .compile("", source, settings, textifier); } catch (Exception e) { textifier.print(outputWriter); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefBootstrapTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefBootstrapTests.java index 8d9338ae42165..2b6af6982ead0 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefBootstrapTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefBootstrapTests.java @@ -27,11 +27,11 @@ import java.util.Collections; import java.util.HashMap; +import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.test.ESTestCase; public class DefBootstrapTests extends ESTestCase { - private final Definition definition = new Definition( - Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Definition.class, Definition.DEFINITION_FILES))); + private final Definition definition = new Definition(Whitelist.BASE_WHITELISTS); /** calls toString() on integers, twice */ public void testOneType() throws Throwable { diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/FactoryTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/FactoryTests.java index b15a2747bd088..556ef8dd3c6d3 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/FactoryTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/FactoryTests.java @@ -19,21 +19,23 @@ package org.elasticsearch.painless; +import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.TemplateScript; -import java.util.Collection; import java.util.Collections; +import java.util.List; import java.util.Map; public class FactoryTests extends ScriptTestCase { - protected Collection> scriptContexts() { - Collection> contexts = super.scriptContexts(); - contexts.add(StatefulFactoryTestScript.CONTEXT); - contexts.add(FactoryTestScript.CONTEXT); - contexts.add(EmptyTestScript.CONTEXT); - contexts.add(TemplateScript.CONTEXT); + @Override + protected Map, List> scriptContexts() { + Map, List> contexts = super.scriptContexts(); + contexts.put(StatefulFactoryTestScript.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(FactoryTestScript.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(EmptyTestScript.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(TemplateScript.CONTEXT, Whitelist.BASE_WHITELISTS); return contexts; } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/NeedsScoreTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/NeedsScoreTests.java index db254b734a81a..50a377b881878 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/NeedsScoreTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/NeedsScoreTests.java @@ -22,14 +22,17 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.query.QueryShardContext; -import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.script.ExecutableScript; +import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.SearchScript; import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.test.ESSingleNodeTestCase; -import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; /** * Test that needsScores() is reported correctly depending on whether _score is used @@ -40,8 +43,10 @@ public class NeedsScoreTests extends ESSingleNodeTestCase { public void testNeedsScores() { IndexService index = createIndex("test", Settings.EMPTY, "type", "d", "type=double"); - PainlessScriptEngine service = new PainlessScriptEngine(Settings.EMPTY, - Arrays.asList(SearchScript.CONTEXT, ExecutableScript.CONTEXT)); + Map, List> contexts = new HashMap<>(); + contexts.put(SearchScript.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(ExecutableScript.CONTEXT, Whitelist.BASE_WHITELISTS); + PainlessScriptEngine service = new PainlessScriptEngine(Settings.EMPTY, contexts); QueryShardContext shardContext = index.newQueryShardContext(0, null, () -> 0, null); SearchLookup lookup = new SearchLookup(index.mapperService(), shardContext::getForField, null); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java index edd600c5664f2..87b1677102635 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java @@ -27,7 +27,7 @@ import org.elasticsearch.painless.Definition.Method; import org.elasticsearch.painless.Definition.Struct; import org.elasticsearch.painless.Definition.Type; -import org.elasticsearch.painless.api.Augmentation; +import org.elasticsearch.painless.spi.Whitelist; import java.io.IOException; import java.io.PrintStream; @@ -36,7 +36,6 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; -import java.util.Collections; import java.util.Comparator; import java.util.List; import java.util.Map; @@ -68,8 +67,7 @@ public static void main(String[] args) throws IOException { Files.newOutputStream(indexPath, StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE), false, StandardCharsets.UTF_8.name())) { emitGeneratedWarning(indexStream); - List types = new Definition(Collections.singletonList( - WhitelistLoader.loadFromResourceFiles(Definition.class, Definition.DEFINITION_FILES))). + List types = new Definition(Whitelist.BASE_WHITELISTS). allSimpleTypes().stream().sorted(comparing(t -> t.name)).collect(toList()); for (Type type : types) { if (type.clazz.isPrimitive()) { diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java index 730dd298f8a54..ea1d2275b3e8d 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.lucene.ScorerAware; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.painless.antlr.Walker; +import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptException; @@ -31,10 +32,8 @@ import org.elasticsearch.test.ESTestCase; import org.junit.Before; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import static org.elasticsearch.painless.node.SSource.MainMethodReserved; @@ -63,11 +62,10 @@ protected Settings scriptEngineSettings() { /** * Script contexts used to build the script engine. Override to customize which script contexts are available. */ - protected Collection> scriptContexts() { - Collection> contexts = new ArrayList<>(); - contexts.add(SearchScript.CONTEXT); - contexts.add(ExecutableScript.CONTEXT); - + protected Map, List> scriptContexts() { + Map, List> contexts = new HashMap<>(); + contexts.put(SearchScript.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(ExecutableScript.CONTEXT, Whitelist.BASE_WHITELISTS); return contexts; } @@ -92,8 +90,7 @@ public Object exec(String script, Map vars, boolean picky) { public Object exec(String script, Map vars, Map compileParams, Scorer scorer, boolean picky) { // test for ambiguity errors before running the actual script if picky is true if (picky) { - Definition definition = new Definition( - Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Definition.class, Definition.DEFINITION_FILES))); + Definition definition = new Definition(Whitelist.BASE_WHITELISTS); ScriptClassInfo scriptClassInfo = new ScriptClassInfo(definition, GenericElasticsearchScript.class); CompilerSettings pickySettings = new CompilerSettings(); pickySettings.setPicky(true); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/SimilarityScriptTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/SimilarityScriptTests.java index d8f43fb066867..0795ab7777526 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/SimilarityScriptTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/SimilarityScriptTests.java @@ -37,20 +37,25 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; import org.elasticsearch.index.similarity.ScriptedSimilarity; +import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.SimilarityScript; import org.elasticsearch.script.SimilarityWeightScript; import java.io.IOException; -import java.util.Arrays; -import java.util.Collection; import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; public class SimilarityScriptTests extends ScriptTestCase { @Override - protected Collection> scriptContexts() { - return Arrays.asList(SimilarityScript.CONTEXT, SimilarityWeightScript.CONTEXT); + protected Map, List> scriptContexts() { + Map, List> contexts = new HashMap<>(); + contexts.put(SimilarityScript.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(SimilarityWeightScript.CONTEXT, Whitelist.BASE_WHITELISTS); + return contexts; } public void testBasics() throws IOException { diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java index 9e3477b1cfe02..424b0c286ecff 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java @@ -33,12 +33,11 @@ import org.elasticsearch.painless.Location; import org.elasticsearch.painless.Operation; import org.elasticsearch.painless.ScriptClassInfo; -import org.elasticsearch.painless.WhitelistLoader; +import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.painless.antlr.Walker; import org.elasticsearch.test.ESTestCase; import java.util.Arrays; -import java.util.Collections; import java.util.List; import java.util.Map; @@ -50,8 +49,7 @@ * Tests {@link Object#toString} implementations on all extensions of {@link ANode}. */ public class NodeToStringTests extends ESTestCase { - private final Definition definition = new Definition( - Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Definition.class, Definition.DEFINITION_FILES))); + private final Definition definition = new Definition(Whitelist.BASE_WHITELISTS); public void testEAssignment() { assertToString( diff --git a/plugins/examples/painless-whitelist/build.gradle b/plugins/examples/painless-whitelist/build.gradle index 2213aea16f6cd..12bbff8b0419e 100644 --- a/plugins/examples/painless-whitelist/build.gradle +++ b/plugins/examples/painless-whitelist/build.gradle @@ -26,6 +26,10 @@ esplugin { extendedPlugins = ['lang-painless'] } +dependencies { + compileOnly project(':modules:lang-painless') +} + integTestCluster { distribution = 'zip' } diff --git a/plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/ExampleWhitelistExtension.java b/plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/ExampleWhitelistExtension.java new file mode 100644 index 0000000000000..9e3bc66e7d58d --- /dev/null +++ b/plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/ExampleWhitelistExtension.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.example.painlesswhitelist; + +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import org.elasticsearch.painless.spi.PainlessExtension; +import org.elasticsearch.painless.spi.Whitelist; +import org.elasticsearch.painless.spi.WhitelistLoader; +import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.SearchScript; + +/** An extension of painless which adds a whitelist. */ +public class ExampleWhitelistExtension implements PainlessExtension { + + private static final Whitelist WHITELIST = + WhitelistLoader.loadFromResourceFiles(ExampleWhitelistExtension.class, "example_whitelist.txt"); + + @Override + public Map, List> getContextWhitelists() { + return Collections.singletonMap(SearchScript.CONTEXT, Collections.singletonList(WHITELIST)); + } +} diff --git a/plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/ExampleWhitelistedClass.java b/plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/ExampleWhitelistedClass.java new file mode 100644 index 0000000000000..14f15b383d0c8 --- /dev/null +++ b/plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/ExampleWhitelistedClass.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.example.painlesswhitelist; + +/** + * An example of a class to be whitelisted for use by painless scripts + * + * Each of the members and methods below are whitelisted for use in search scripts. + * See example_whitelist.txt. + */ +public class ExampleWhitelistedClass { + + public static final int CONSTANT = 42; + + public int publicMember; + + private int privateMember; + + public ExampleWhitelistedClass(int publicMember, int privateMember) { + this.publicMember = publicMember; + this.privateMember = privateMember; + } + + public int getPrivateMemberAccessor() { + return this.privateMember; + } + + public void setPrivateMemberAccessor(int privateMember) { + this.privateMember = privateMember; + } + + public static void staticMethod() { + // electricity + } + + // example augmentation method + public static int toInt(String x) { + return Integer.parseInt(x); + } +} diff --git a/plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/MyWhitelistPlugin.java b/plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/MyWhitelistPlugin.java index 877a05391ac77..a4ef5f6f000e1 100644 --- a/plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/MyWhitelistPlugin.java +++ b/plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/MyWhitelistPlugin.java @@ -22,4 +22,5 @@ import org.elasticsearch.plugins.Plugin; public class MyWhitelistPlugin extends Plugin { + // we don't actually need anything here, since whitelists are extended through SPI } diff --git a/plugins/examples/painless-whitelist/src/main/resources/META-INF/services/org.elasticsearch.painless.spi.PainlessExtension b/plugins/examples/painless-whitelist/src/main/resources/META-INF/services/org.elasticsearch.painless.spi.PainlessExtension new file mode 100644 index 0000000000000..9babd702c8083 --- /dev/null +++ b/plugins/examples/painless-whitelist/src/main/resources/META-INF/services/org.elasticsearch.painless.spi.PainlessExtension @@ -0,0 +1 @@ +org.elasticsearch.example.painlesswhitelist.ExampleWhitelistExtension \ No newline at end of file diff --git a/plugins/examples/painless-whitelist/src/main/resources/org/elasticsearch/example/painlesswhitelist/example_whitelist.txt b/plugins/examples/painless-whitelist/src/main/resources/org/elasticsearch/example/painlesswhitelist/example_whitelist.txt new file mode 100644 index 0000000000000..7908d35417511 --- /dev/null +++ b/plugins/examples/painless-whitelist/src/main/resources/org/elasticsearch/example/painlesswhitelist/example_whitelist.txt @@ -0,0 +1,42 @@ +# +# Licensed to Elasticsearch under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# This file contains a whitelist for an example class which may be access from painless + +class org.elasticsearch.example.painlesswhitelist.ExampleWhitelistedClass { + # constructor + (int, int) + + # static constants and methods look the same as instance members and methods + int CONSTANT + void staticMethod() + + # members lack parenthesis that methods have + int publicMember + + # getter and setter for private member + int getPrivateMemberAccessor() + void setPrivateMemberAccessor(int) +} + +class java.lang.String { + # existing classes can be "augmented" to have additional methods, which take the object + # to operate on as the first argument to a static method + int org.elasticsearch.example.painlesswhitelist.ExampleWhitelistedClass toInt() +} \ No newline at end of file diff --git a/plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/20_whitelist.yml b/plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/20_whitelist.yml new file mode 100644 index 0000000000000..bbb0b44ef1d45 --- /dev/null +++ b/plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/20_whitelist.yml @@ -0,0 +1,26 @@ +# Example test using whitelisted members and methods + +"Whitelisted custom class": + - do: + index: + index: test + type: test + id: 1 + body: { "num1": 1.0 } + - do: + indices.refresh: {} + + - do: + index: test + search: + body: + query: + match_all: {} + script_fields: + sNum1: + script: + source: "def e = new ExampleWhitelistedClass(6, 42); ExampleWhitelistedClass.staticMethod(); return e.publicMember + e.privateMemberAccessor + ExampleWhitelistedClass.CONSTANT + '2'.toInt()" + lang: painless + + - match: { hits.total: 1 } + - match: { hits.hits.0.fields.sNum1.0: 92 } From 125aee6f96fe48489f51448acbf15b4a2ee4ce36 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Mon, 15 Jan 2018 18:34:10 +0100 Subject: [PATCH 21/31] Allow update of `eager_global_ordinals` on `_parent`. (#28014) A bug introduced in #24407 currently prevents `eager_global_ordinals` from being updated. This new approach should fix the issue while still allowing mapping updates to not specify the `_parent` field if it doesn't need updating, which was the goal of #24407. --- .../index/mapper/ParentFieldMapper.java | 11 +++++----- .../index/mapper/ParentFieldMapperTests.java | 20 +++++++++++++++++++ 2 files changed, 26 insertions(+), 5 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ParentFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ParentFieldMapper.java index 73109a3ecd8f9..34eaf569ca949 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ParentFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ParentFieldMapper.java @@ -303,15 +303,16 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { ParentFieldMapper fieldMergeWith = (ParentFieldMapper) mergeWith; - ParentFieldType currentFieldType = (ParentFieldType) fieldType.clone(); - super.doMerge(mergeWith, updateAllTypes); if (fieldMergeWith.parentType != null && Objects.equals(parentType, fieldMergeWith.parentType) == false) { throw new IllegalArgumentException("The _parent field's type option can't be changed: [" + parentType + "]->[" + fieldMergeWith.parentType + "]"); } - - if (active()) { - fieldType = currentFieldType; + // If fieldMergeWith is not active it means the user provided a mapping + // update that does not explicitly configure the _parent field, so we + // ignore it. + if (fieldMergeWith.active()) { + super.doMerge(mergeWith, updateAllTypes); } + } /** diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java index d0e17b808c596..d21827ee18cea 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java @@ -41,6 +41,7 @@ import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.InternalSettingsPlugin; +import java.io.IOException; import java.util.Collection; import java.util.Collections; import java.util.HashSet; @@ -138,4 +139,23 @@ private static int getNumberOfFieldWithParentPrefix(ParseContext.Document doc) { return numFieldWithParentPrefix; } + public void testUpdateEagerGlobalOrds() throws IOException { + String parentMapping = XContentFactory.jsonBuilder().startObject().startObject("parent_type") + .endObject().endObject().string(); + String childMapping = XContentFactory.jsonBuilder().startObject().startObject("child_type") + .startObject("_parent").field("type", "parent_type").endObject() + .endObject().endObject().string(); + IndexService indexService = createIndex("test", Settings.builder().put("index.version.created", Version.V_5_6_0).build()); + indexService.mapperService().merge("parent_type", new CompressedXContent(parentMapping), MergeReason.MAPPING_UPDATE, false); + indexService.mapperService().merge("child_type", new CompressedXContent(childMapping), MergeReason.MAPPING_UPDATE, false); + + assertTrue(indexService.mapperService().documentMapper("child_type").parentFieldMapper().fieldType().eagerGlobalOrdinals()); + + String childMappingUpdate = XContentFactory.jsonBuilder().startObject().startObject("child_type") + .startObject("_parent").field("type", "parent_type").field("eager_global_ordinals", false).endObject() + .endObject().endObject().string(); + indexService.mapperService().merge("child_type", new CompressedXContent(childMappingUpdate), MergeReason.MAPPING_UPDATE, false); + + assertFalse(indexService.mapperService().documentMapper("child_type").parentFieldMapper().fieldType().eagerGlobalOrdinals()); + } } From 7cd57534e87dc466fe9a6c98bda843038d79c76f Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Mon, 15 Jan 2018 18:34:38 +0100 Subject: [PATCH 22/31] Ignore the `-snapshot` suffix when comparing the Lucene version in the build and the docs. (#27927) Currently if the Lucene version is `X.Y.Z-snapshot-{gitrev}`, then we will expect the docs to have `X.Y.Z-snapshot` as a Lucene version. I would like to change it to `X.Y.Z` so that this doesn't need changing when we move from a snapshot to a final release. --- qa/verify-version-constants/build.gradle | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/qa/verify-version-constants/build.gradle b/qa/verify-version-constants/build.gradle index 111c4ccf20e50..1d31db6898b7b 100644 --- a/qa/verify-version-constants/build.gradle +++ b/qa/verify-version-constants/build.gradle @@ -76,10 +76,8 @@ task verifyDocsLuceneVersion { throw new GradleException('Could not find lucene version in docs version file') } String expectedLuceneVersion = VersionProperties.lucene - if (expectedLuceneVersion.contains('-snapshot-')) { - expectedLuceneVersion = expectedLuceneVersion.substring(0, expectedLuceneVersion.lastIndexOf('-')) - expectedLuceneVersion = expectedLuceneVersion.toUpperCase(Locale.ROOT) - } + // remove potential -snapshot-{gitrev} suffix + expectedLuceneVersion -= ~/-snapshot-[0-9a-f]+$/ if (docsLuceneVersion != expectedLuceneVersion) { throw new GradleException("Lucene version in docs [${docsLuceneVersion}] does not match version.properties [${expectedLuceneVersion}]") } From 35de53cec14eb3d849c956bfd11c639e244c155f Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Mon, 15 Jan 2018 18:35:27 +0100 Subject: [PATCH 23/31] Fix casts in HotThreads. (#27578) Even though an overflow would be very unlikely, it's better to use the longs directly in the comparator. --- .../elasticsearch/monitor/jvm/HotThreads.java | 36 ++++++++----------- 1 file changed, 15 insertions(+), 21 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java b/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java index 1714d00abb206..3b6415437f97c 100644 --- a/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java +++ b/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java @@ -35,6 +35,7 @@ import java.util.Locale; import java.util.Map; import java.util.concurrent.TimeUnit; +import java.util.function.ToLongFunction; public class HotThreads { @@ -187,19 +188,19 @@ private String innerDetect() throws Exception { List hotties = new ArrayList<>(threadInfos.values()); final int busiestThreads = Math.min(this.busiestThreads, hotties.size()); // skip that for now - CollectionUtil.introSort(hotties, new Comparator() { - @Override - public int compare(MyThreadInfo o1, MyThreadInfo o2) { - if ("cpu".equals(type)) { - return (int) (o2.cpuTime - o1.cpuTime); - } else if ("wait".equals(type)) { - return (int) (o2.waitedTime - o1.waitedTime); - } else if ("block".equals(type)) { - return (int) (o2.blockedTime - o1.blockedTime); - } - throw new IllegalArgumentException("expected thread type to be either 'cpu', 'wait', or 'block', but was " + type); - } - }); + final ToLongFunction getter; + if ("cpu".equals(type)) { + getter = o -> o.cpuTime; + } else if ("wait".equals(type)) { + getter = o -> o.waitedTime; + } else if ("block".equals(type)) { + getter = o -> o.blockedTime; + } else { + throw new IllegalArgumentException("expected thread type to be either 'cpu', 'wait', or 'block', but was " + type); + } + + CollectionUtil.introSort(hotties, Comparator.comparingLong(getter).reversed()); + // analyse N stack traces for M busiest threads long[] ids = new long[busiestThreads]; for (int i = 0; i < busiestThreads; i++) { @@ -215,14 +216,7 @@ public int compare(MyThreadInfo o1, MyThreadInfo o2) { Thread.sleep(threadElementsSnapshotDelay.millis()); } for (int t = 0; t < busiestThreads; t++) { - long time = 0; - if ("cpu".equals(type)) { - time = hotties.get(t).cpuTime; - } else if ("wait".equals(type)) { - time = hotties.get(t).waitedTime; - } else if ("block".equals(type)) { - time = hotties.get(t).blockedTime; - } + long time = getter.applyAsLong(hotties.get(t)); String threadName = null; for (ThreadInfo[] info : allInfos) { if (info != null && info[t] != null) { From 9854255559d5aafa10ac0d2b77927135d5be1ff5 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Mon, 15 Jan 2018 18:36:32 +0100 Subject: [PATCH 24/31] Avoid doing redundant work when checking for self references. (#26927) Currently we test all maps, arrays or iterables. However, in the case that maps contain sub maps for instance, we will test the sub maps again even though the work has already been done for the top-level map. Relates #26907 --- .../common/xcontent/XContentBuilder.java | 49 ++++++++++--------- .../common/xcontent/BaseXContentTestCase.java | 1 - 2 files changed, 25 insertions(+), 25 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java index f0427ce246669..070510e13ff69 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java @@ -773,32 +773,23 @@ public XContentBuilder field(String name, Object value) throws IOException { } public XContentBuilder array(String name, Object... values) throws IOException { - return field(name).values(values); + return field(name).values(values, true); } - XContentBuilder values(Object[] values) throws IOException { + private XContentBuilder values(Object[] values, boolean ensureNoSelfReferences) throws IOException { if (values == null) { return nullValue(); } - // checks that the array of object does not contain references to itself because - // iterating over entries will cause a stackoverflow error - ensureNoSelfReferences(values); - - startArray(); - for (Object o : values) { - value(o); - } - endArray(); - return this; + return value(Arrays.asList(values), ensureNoSelfReferences); } public XContentBuilder value(Object value) throws IOException { - unknownValue(value); + unknownValue(value, true); return this; } - private void unknownValue(Object value) throws IOException { + private void unknownValue(Object value, boolean ensureNoSelfReferences) throws IOException { if (value == null) { nullValue(); return; @@ -810,11 +801,11 @@ private void unknownValue(Object value) throws IOException { //Path implements Iterable and causes endless recursion and a StackOverFlow if treated as an Iterable here value((Path) value); } else if (value instanceof Map) { - map((Map) value); + map((Map) value, ensureNoSelfReferences); } else if (value instanceof Iterable) { - value((Iterable) value); + value((Iterable) value, ensureNoSelfReferences); } else if (value instanceof Object[]) { - values((Object[]) value); + values((Object[]) value, ensureNoSelfReferences); } else if (value instanceof Calendar) { value((Calendar) value); } else if (value instanceof ReadableInstant) { @@ -863,18 +854,25 @@ public XContentBuilder field(String name, Map values) throws IOE } public XContentBuilder map(Map values) throws IOException { + return map(values, true); + } + + private XContentBuilder map(Map values, boolean ensureNoSelfReferences) throws IOException { if (values == null) { return nullValue(); } // checks that the map does not contain references to itself because // iterating over map entries will cause a stackoverflow error - ensureNoSelfReferences(values); + if (ensureNoSelfReferences) { + ensureNoSelfReferences(values); + } startObject(); for (Map.Entry value : values.entrySet()) { field(value.getKey()); - unknownValue(value.getValue()); + // pass ensureNoSelfReferences=false as we already performed the check at a higher level + unknownValue(value.getValue(), false); } endObject(); return this; @@ -884,7 +882,7 @@ public XContentBuilder field(String name, Iterable values) throws IOException return field(name).value(values); } - private XContentBuilder value(Iterable values) throws IOException { + private XContentBuilder value(Iterable values, boolean ensureNoSelfReferences) throws IOException { if (values == null) { return nullValue(); } @@ -895,11 +893,14 @@ private XContentBuilder value(Iterable values) throws IOException { } else { // checks that the iterable does not contain references to itself because // iterating over entries will cause a stackoverflow error - ensureNoSelfReferences(values); + if (ensureNoSelfReferences) { + ensureNoSelfReferences(values); + } startArray(); for (Object value : values) { - unknownValue(value); + // pass ensureNoSelfReferences=false as we already performed the check at a higher level + unknownValue(value, false); } endArray(); } @@ -1076,9 +1077,9 @@ private static void ensureNoSelfReferences(final Object value, final Set Iterable it; if (value instanceof Map) { - it = ((Map) value).values(); + it = ((Map) value).values(); } else if ((value instanceof Iterable) && (value instanceof Path == false)) { - it = (Iterable) value; + it = (Iterable) value; } else if (value instanceof Object[]) { it = Arrays.asList((Object[]) value); } else { diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java b/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java index e468751cf4aba..e368163a4e95c 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java +++ b/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java @@ -534,7 +534,6 @@ public void testObjects() throws Exception { final String expected = o.getKey(); assertResult(expected, () -> builder().startObject().field("objects", o.getValue()).endObject()); assertResult(expected, () -> builder().startObject().field("objects").value(o.getValue()).endObject()); - assertResult(expected, () -> builder().startObject().field("objects").values(o.getValue()).endObject()); assertResult(expected, () -> builder().startObject().array("objects", o.getValue()).endObject()); } } From 37249c12f19f04772122be2729aad360063ce37b Mon Sep 17 00:00:00 2001 From: Nicholas Knize Date: Wed, 6 Dec 2017 11:58:20 -0600 Subject: [PATCH 25/31] [GEO] Add WKT Support to GeoBoundingBoxQueryBuilder Add WKT BBOX parsing support to GeoBoundingBoxQueryBuilder. --- .../query-dsl/geo-bounding-box-query.asciidoc | 25 ++++ .../common/geo/parsers/GeoWKTParser.java | 21 ++- .../query/GeoBoundingBoxQueryBuilder.java | 135 +++++++++++------- .../common/geo/GeoWKTShapeParserTests.java | 12 ++ .../GeoBoundingBoxQueryBuilderTests.java | 44 ++++++ 5 files changed, 181 insertions(+), 56 deletions(-) diff --git a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc index e8db949bbc6b8..a1b427acf2718 100644 --- a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc +++ b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc @@ -180,6 +180,31 @@ GET /_search -------------------------------------------------- // CONSOLE +[float] +===== Bounding Box as Well-Known Text (WKT) + +[source,js] +-------------------------------------------------- +GET /_search +{ + "query": { + "bool" : { + "must" : { + "match_all" : {} + }, + "filter" : { + "geo_bounding_box" : { + "pin.location" : { + "wkt" : "BBOX (-74.1, -71.12, 40.73, 40.01)" + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + [float] ===== Geohash diff --git a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java index 005caed53a7e9..38643df017943 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java +++ b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java @@ -63,6 +63,12 @@ private GeoWKTParser() {} public static ShapeBuilder parse(XContentParser parser) throws IOException, ElasticsearchParseException { + return parseExpectedType(parser, null); + } + + /** throws an exception if the parsed geometry type does not match the expected shape type */ + public static ShapeBuilder parseExpectedType(XContentParser parser, final GeoShapeType shapeType) + throws IOException, ElasticsearchParseException { FastStringReader reader = new FastStringReader(parser.text()); try { // setup the tokenizer; configured to read words w/o numbers @@ -77,7 +83,7 @@ public static ShapeBuilder parse(XContentParser parser) tokenizer.wordChars('.', '.'); tokenizer.whitespaceChars(0, ' '); tokenizer.commentChar('#'); - ShapeBuilder builder = parseGeometry(tokenizer); + ShapeBuilder builder = parseGeometry(tokenizer, shapeType); checkEOF(tokenizer); return builder; } finally { @@ -86,8 +92,14 @@ public static ShapeBuilder parse(XContentParser parser) } /** parse geometry from the stream tokenizer */ - private static ShapeBuilder parseGeometry(StreamTokenizer stream) throws IOException, ElasticsearchParseException { + private static ShapeBuilder parseGeometry(StreamTokenizer stream, GeoShapeType shapeType) + throws IOException, ElasticsearchParseException { final GeoShapeType type = GeoShapeType.forName(nextWord(stream)); + if (shapeType != null && shapeType != GeoShapeType.GEOMETRYCOLLECTION) { + if (type.wktName().equals(shapeType.wktName()) == false) { + throw new ElasticsearchParseException("Expected geometry type [{}] but found [{}]", shapeType, type); + } + } switch (type) { case POINT: return parsePoint(stream); @@ -228,9 +240,10 @@ private static GeometryCollectionBuilder parseGeometryCollection(StreamTokenizer if (nextEmptyOrOpen(stream).equals(EMPTY)) { return null; } - GeometryCollectionBuilder builder = new GeometryCollectionBuilder().shape(parseGeometry(stream)); + GeometryCollectionBuilder builder = new GeometryCollectionBuilder().shape( + parseGeometry(stream, GeoShapeType.GEOMETRYCOLLECTION)); while (nextCloserOrComma(stream).equals(COMMA)) { - builder.shape(parseGeometry(stream)); + builder.shape(parseGeometry(stream, null)); } return builder; } diff --git a/server/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java index c0e57cc45afd9..47dcbaa351454 100644 --- a/server/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java @@ -31,7 +31,10 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.geo.GeoHashUtils; import org.elasticsearch.common.geo.GeoPoint; +import org.elasticsearch.common.geo.GeoShapeType; import org.elasticsearch.common.geo.GeoUtils; +import org.elasticsearch.common.geo.builders.EnvelopeBuilder; +import org.elasticsearch.common.geo.parsers.GeoWKTParser; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -62,7 +65,6 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder GeoWKTParser.parseExpectedType(parser, GeoShapeType.POLYGON)); + assertThat(e, hasToString(containsString("Expected geometry type [polygon] but found [point]"))); + } } diff --git a/server/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java index 133057fb8d026..aeaca328ceb7b 100644 --- a/server/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java @@ -406,6 +406,50 @@ public void testFromJson() throws IOException { assertEquals(json, GeoExecType.MEMORY, parsed.type()); } + public void testFromWKT() throws IOException { + String wkt = + "{\n" + + " \"geo_bounding_box\" : {\n" + + " \"pin.location\" : {\n" + + " \"wkt\" : \"BBOX (-74.1, -71.12, 40.73, 40.01)\"\n" + + " },\n" + + " \"validation_method\" : \"STRICT\",\n" + + " \"type\" : \"MEMORY\",\n" + + " \"ignore_unmapped\" : false,\n" + + " \"boost\" : 1.0\n" + + " }\n" + + "}"; + + // toXContent generates the query in geojson only; for now we need to test against the expected + // geojson generated content + String expectedJson = + "{\n" + + " \"geo_bounding_box\" : {\n" + + " \"pin.location\" : {\n" + + " \"top_left\" : [ -74.1, 40.73 ],\n" + + " \"bottom_right\" : [ -71.12, 40.01 ]\n" + + " },\n" + + " \"validation_method\" : \"STRICT\",\n" + + " \"type\" : \"MEMORY\",\n" + + " \"ignore_unmapped\" : false,\n" + + " \"boost\" : 1.0\n" + + " }\n" + + "}"; + + // parse with wkt + GeoBoundingBoxQueryBuilder parsed = (GeoBoundingBoxQueryBuilder) parseQuery(wkt); + // check the builder's generated geojson content against the expected json output + checkGeneratedJson(expectedJson, parsed); + double delta = 0d; + assertEquals(expectedJson, "pin.location", parsed.fieldName()); + assertEquals(expectedJson, -74.1, parsed.topLeft().getLon(), delta); + assertEquals(expectedJson, 40.73, parsed.topLeft().getLat(), delta); + assertEquals(expectedJson, -71.12, parsed.bottomRight().getLon(), delta); + assertEquals(expectedJson, 40.01, parsed.bottomRight().getLat(), delta); + assertEquals(expectedJson, 1.0, parsed.boost(), delta); + assertEquals(expectedJson, GeoExecType.MEMORY, parsed.type()); + } + @Override public void testMustRewrite() throws IOException { assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); From b34d6482694ae94da56bd73cee7db85ad629f207 Mon Sep 17 00:00:00 2001 From: Nicholas Knize Date: Mon, 15 Jan 2018 14:05:06 -0600 Subject: [PATCH 26/31] [GEO] Deprecate field parameter in GeoBoundingBoxQueryBuilder Deprecates unused "field" parameter when parsing geo_bounding_box queries. --- .../index/query/GeoBoundingBoxQueryBuilder.java | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java index 47dcbaa351454..a109fe896392c 100644 --- a/server/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java @@ -37,6 +37,8 @@ import org.elasticsearch.common.geo.parsers.GeoWKTParser; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.GeoPointFieldMapper.GeoPointFieldType; @@ -58,12 +60,15 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder Date: Mon, 15 Jan 2018 18:13:47 -0500 Subject: [PATCH 27/31] TEST: Update logging for testAckedIndexing - Log the response of indexing requests - Correct logging setting for discovery package --- .../org/elasticsearch/discovery/ClusterDisruptionIT.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java index 8d21c6306382b..55f5b70e70299 100644 --- a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java @@ -81,7 +81,8 @@ public class ClusterDisruptionIT extends AbstractDisruptionTestCase { *

* This test is a superset of tests run in the Jepsen test suite, with the exception of versioned updates */ - @TestLogging("_root:DEBUG,org.elasticsearch.action.bulk:TRACE,org.elasticsearch.action.get:TRACE,discovery:TRACE," + + @TestLogging("_root:DEBUG,org.elasticsearch.action.bulk:TRACE,org.elasticsearch.action.get:TRACE," + + "org.elasticsearch.discovery:TRACE,org.elasticsearch.action.support.replication:TRACE," + "org.elasticsearch.cluster.service:TRACE,org.elasticsearch.indices.recovery:TRACE," + "org.elasticsearch.indices.cluster:TRACE,org.elasticsearch.index.shard:TRACE") public void testAckedIndexing() throws Exception { @@ -137,7 +138,7 @@ public void testAckedIndexing() throws Exception { .get(timeout); assertEquals(DocWriteResponse.Result.CREATED, response.getResult()); ackedDocs.put(id, node); - logger.trace("[{}] indexed id [{}] through node [{}]", name, id, node); + logger.trace("[{}] indexed id [{}] through node [{}], response [{}]", name, id, node, response); } catch (ElasticsearchException e) { exceptedExceptions.add(e); final String docId = id; From b75f06b6c475ddc9f82f5b9e7992675a8ad2ae2c Mon Sep 17 00:00:00 2001 From: fbsolo Date: Tue, 16 Jan 2018 00:35:35 -0800 Subject: [PATCH 28/31] [Docs] Changes to ingest.asciidoc (#28212) --- docs/reference/ingest.asciidoc | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/docs/reference/ingest.asciidoc b/docs/reference/ingest.asciidoc index da1164930bc1e..18349beab6ab1 100644 --- a/docs/reference/ingest.asciidoc +++ b/docs/reference/ingest.asciidoc @@ -3,26 +3,27 @@ [partintro] -- -You can use ingest node to pre-process documents before the actual indexing takes place. -This pre-processing happens by an ingest node that intercepts bulk and index requests, applies the -transformations, and then passes the documents back to the index or bulk APIs. +Use an ingest node to pre-process documents before the actual document indexing happens. +The ingest node intercepts bulk and index requests, it applies transformations, and it then +passes the documents back to the index or bulk APIs. -You can enable ingest on any node or even have dedicated ingest nodes. Ingest is enabled by default -on all nodes. To disable ingest on a node, configure the following setting in the `elasticsearch.yml` file: +All nodes enable ingest by default, so any node can handle ingest tasks. You can also create +dedicated ingest nodes. To disable ingest for a node, configure the following setting in the +elasticsearch.yml file: [source,yaml] -------------------------------------------------- node.ingest: false -------------------------------------------------- -To pre-process documents before indexing, you <> that specifies -a series of <>. Each processor transforms the document in some way. -For example, you may have a pipeline that consists of one processor that removes a field from -the document followed by another processor that renames a field. Configured pipelines are then stored -in the <>. +To pre-process documents before indexing, <> that specifies a series of +<>. Each processor transforms the document in some specific way. For example, a +pipeline might have one processor that removes a field from the document, followed by +another processor that renames a field. The <> then stores +the configured pipelines. -To use a pipeline, you simply specify the `pipeline` parameter on an index or bulk request to -tell the ingest node which pipeline to use. For example: +To use a pipeline, simply specify the `pipeline` parameter on an index or bulk request. This +way, the ingest node knows which pipeline to use. For example: [source,js] -------------------------------------------------- From 62ab666266fdee4d5df8cb4f82ae2cc985e8a697 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Tue, 16 Jan 2018 09:50:06 +0100 Subject: [PATCH 29/31] Fallback to TransportMasterNodeAction for cluster health retries (#28195) ClusterHealthAction does not use the regular retry logic, possibly causing StackOverflowErrors. Relates #28169 --- .../admin/cluster/health/TransportClusterHealthAction.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java index 60010a9d855ef..598b1a526779e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -30,6 +30,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.NotMasterException; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -125,7 +126,8 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS @Override public void onNoLongerMaster(String source) { logger.trace("stopped being master while waiting for events with priority [{}]. retrying.", request.waitForEvents()); - doExecute(task, request, listener); + // TransportMasterNodeAction implements the retry logic, which is triggered by passing a NotMasterException + listener.onFailure(new NotMasterException("no longer master. source: [" + source + "]")); } @Override From bdc766bafe2645e03d0a09ced3b558c32d8d3ae6 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Tue, 16 Jan 2018 09:58:58 +0100 Subject: [PATCH 30/31] Never return null from Strings.tokenizeToStringArray (#28224) This method has a different contract than all the other methods in this class, returning null instead of an empty array when receiving a null input. While switching over some methods from delimitedListToStringArray to this method tokenizeToStringArray, this resulted in unexpected nulls in some places of our code. Relates #28213 --- .../src/main/java/org/elasticsearch/common/Strings.java | 5 ++++- .../allocation/decider/FilterAllocationDeciderTests.java | 8 ++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/common/Strings.java b/server/src/main/java/org/elasticsearch/common/Strings.java index 6c2fc4e1ec153..02a0852b0a03a 100644 --- a/server/src/main/java/org/elasticsearch/common/Strings.java +++ b/server/src/main/java/org/elasticsearch/common/Strings.java @@ -474,6 +474,9 @@ public static String[] split(String toSplit, String delimiter) { * @see #delimitedListToStringArray */ public static String[] tokenizeToStringArray(final String s, final String delimiters) { + if (s == null) { + return EMPTY_ARRAY; + } return toStringArray(tokenizeToCollection(s, delimiters, ArrayList::new)); } @@ -536,7 +539,7 @@ public static String[] delimitedListToStringArray(String str, String delimiter) */ public static String[] delimitedListToStringArray(String str, String delimiter, String charsToDelete) { if (str == null) { - return new String[0]; + return EMPTY_ARRAY; } if (delimiter == null) { return new String[]{str}; diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java index c4105771229bc..8381f2f960b75 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java @@ -194,6 +194,14 @@ public void testInvalidIPFilter() { assertEquals("invalid IP address [" + invalidIP + "] for [" + filterSetting.getKey() + ipKey + "]", e.getMessage()); } + public void testNull() { + Setting filterSetting = randomFrom(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING, + IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING, IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING); + + IndexMetaData.builder("test") + .settings(settings(Version.CURRENT).putNull(filterSetting.getKey() + "name")).numberOfShards(2).numberOfReplicas(0).build(); + } + public void testWildcardIPFilter() { String ipKey = randomFrom("_ip", "_host_ip", "_publish_ip"); Setting filterSetting = randomFrom(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING, From c470061e228339f209bf91dd74f1d55765f245e3 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Tue, 16 Jan 2018 10:50:07 +0100 Subject: [PATCH 31/31] Fix eclipse build. (#28236) Relates #28191 --- libs/elasticsearch-core/src/main/eclipse-build.gradle | 2 ++ libs/elasticsearch-core/src/test/eclipse-build.gradle | 6 ++++++ settings.gradle | 5 +++++ 3 files changed, 13 insertions(+) create mode 100644 libs/elasticsearch-core/src/main/eclipse-build.gradle create mode 100644 libs/elasticsearch-core/src/test/eclipse-build.gradle diff --git a/libs/elasticsearch-core/src/main/eclipse-build.gradle b/libs/elasticsearch-core/src/main/eclipse-build.gradle new file mode 100644 index 0000000000000..9c84a4d6bd84b --- /dev/null +++ b/libs/elasticsearch-core/src/main/eclipse-build.gradle @@ -0,0 +1,2 @@ +// this is just shell gradle file for eclipse to have separate projects for elasticsearch-core src and tests +apply from: '../../build.gradle' diff --git a/libs/elasticsearch-core/src/test/eclipse-build.gradle b/libs/elasticsearch-core/src/test/eclipse-build.gradle new file mode 100644 index 0000000000000..f43f019941bb2 --- /dev/null +++ b/libs/elasticsearch-core/src/test/eclipse-build.gradle @@ -0,0 +1,6 @@ +// this is just shell gradle file for eclipse to have separate projects for elasticsearch-core src and tests +apply from: '../../build.gradle' + +dependencies { + testCompile project(':libs:elasticsearch-core') +} diff --git a/settings.gradle b/settings.gradle index b811a46ea42c7..fe2fd6e67503b 100644 --- a/settings.gradle +++ b/settings.gradle @@ -108,6 +108,7 @@ if (isEclipse) { // eclipse cannot handle an intermediate dependency between main and test, so we must create separate projects // for server-src and server-tests projects << 'server-tests' + projects << 'libs:elasticsearch-core-tests' } include projects.toArray(new String[0]) @@ -125,6 +126,10 @@ if (isEclipse) { project(":server").buildFileName = 'eclipse-build.gradle' project(":server-tests").projectDir = new File(rootProject.projectDir, 'server/src/test') project(":server-tests").buildFileName = 'eclipse-build.gradle' + project(":libs:elasticsearch-core").projectDir = new File(rootProject.projectDir, 'libs/elasticsearch-core/src/main') + project(":libs:elasticsearch-core").buildFileName = 'eclipse-build.gradle' + project(":libs:elasticsearch-core-tests").projectDir = new File(rootProject.projectDir, 'libs/elasticsearch-core/src/test') + project(":libs:elasticsearch-core-tests").buildFileName = 'eclipse-build.gradle' } /**