From f2db2a02e20f208afd3c90f9c11e2cc33b1f982e Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 12 Jan 2018 19:06:04 -0500 Subject: [PATCH 01/30] Truncate tlog cli should assign global checkpoint (#28192) We are targeting to always have a safe index once the recovery is done. This invariant does not hold if the translog is manually truncated by users because the truncate translog cli resets the global checkpoint to unassigned. This commit assigns the global checkpoint to the max_seqno of the last commit when truncating translog. We can only safely do it because the truncate translog command will generate a new history uuid for that shard. With a new history UUID, sequence-based recovery between that shard and other old shards will be disabled. Relates #28181 --- .../translog/TruncateTranslogCommand.java | 18 ++++++++++++++---- .../index/translog/TruncateTranslogIT.java | 17 +++++++++++++++++ 2 files changed, 31 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java index d9b77f841ed09..222e3e13d65e1 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java @@ -132,9 +132,19 @@ protected void execute(Terminal terminal, OptionSet options, Environment env) th } // Retrieve the generation and UUID from the existing data - commitData = commits.get(commits.size() - 1).getUserData(); + commitData = new HashMap<>(commits.get(commits.size() - 1).getUserData()); String translogGeneration = commitData.get(Translog.TRANSLOG_GENERATION_KEY); String translogUUID = commitData.get(Translog.TRANSLOG_UUID_KEY); + final long globalCheckpoint; + // In order to have a safe commit invariant, we have to assign the global checkpoint to the max_seqno of the last commit. + // We can only safely do it because we will generate a new history uuid this shard. + if (commitData.containsKey(SequenceNumbers.MAX_SEQ_NO)) { + globalCheckpoint = Long.parseLong(commitData.get(SequenceNumbers.MAX_SEQ_NO)); + // Also advances the local checkpoint of the last commit to its max_seqno. + commitData.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(globalCheckpoint)); + } else { + globalCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; + } if (translogGeneration == null || translogUUID == null) { throw new ElasticsearchException("shard must have a valid translog generation and UUID but got: [{}] and: [{}]", translogGeneration, translogUUID); @@ -153,7 +163,7 @@ protected void execute(Terminal terminal, OptionSet options, Environment env) th // Write empty checkpoint and translog to empty files long gen = Long.parseLong(translogGeneration); int translogLen = writeEmptyTranslog(tempEmptyTranslog, translogUUID); - writeEmptyCheckpoint(tempEmptyCheckpoint, translogLen, gen); + writeEmptyCheckpoint(tempEmptyCheckpoint, translogLen, gen, globalCheckpoint); terminal.println("Removing existing translog files"); IOUtils.rm(translogFiles.toArray(new Path[]{})); @@ -190,9 +200,9 @@ protected void execute(Terminal terminal, OptionSet options, Environment env) th } /** Write a checkpoint file to the given location with the given generation */ - public static void writeEmptyCheckpoint(Path filename, int translogLength, long translogGeneration) throws IOException { + static void writeEmptyCheckpoint(Path filename, int translogLength, long translogGeneration, long globalCheckpoint) throws IOException { Checkpoint emptyCheckpoint = Checkpoint.emptyTranslogCheckpoint(translogLength, translogGeneration, - SequenceNumbers.UNASSIGNED_SEQ_NO, translogGeneration); + globalCheckpoint, translogGeneration); Checkpoint.write(FileChannel::open, filename, emptyCheckpoint, StandardOpenOption.WRITE, StandardOpenOption.READ, StandardOpenOption.CREATE_NEW); // fsync with metadata here to make sure. diff --git a/server/src/test/java/org/elasticsearch/index/translog/TruncateTranslogIT.java b/server/src/test/java/org/elasticsearch/index/translog/TruncateTranslogIT.java index d98359cdd06a0..029ed50fb2851 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TruncateTranslogIT.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TruncateTranslogIT.java @@ -31,6 +31,7 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; +import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequestBuilder; @@ -48,6 +49,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.MockEngineFactoryPlugin; +import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; @@ -74,6 +76,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, numDataNodes = 0) @@ -214,6 +217,10 @@ public void testCorruptTranslogTruncation() throws Exception { final RecoveryState replicaRecoveryState = recoveryResponse.shardRecoveryStates().get("test").stream() .filter(recoveryState -> recoveryState.getPrimary() == false).findFirst().get(); assertThat(replicaRecoveryState.getIndex().toString(), replicaRecoveryState.getIndex().recoveredFileCount(), greaterThan(0)); + // Ensure that the global checkpoint and local checkpoint are restored from the max seqno of the last commit. + final SeqNoStats seqNoStats = getSeqNoStats("test", 0); + assertThat(seqNoStats.getGlobalCheckpoint(), equalTo(seqNoStats.getMaxSeqNo())); + assertThat(seqNoStats.getLocalCheckpoint(), equalTo(seqNoStats.getMaxSeqNo())); } public void testCorruptTranslogTruncationOfReplica() throws Exception { @@ -316,6 +323,10 @@ public void testCorruptTranslogTruncationOfReplica() throws Exception { .filter(recoveryState -> recoveryState.getPrimary() == false).findFirst().get(); // the replica translog was disabled so it doesn't know what hte global checkpoint is and thus can't do ops based recovery assertThat(replicaRecoveryState.getIndex().toString(), replicaRecoveryState.getIndex().recoveredFileCount(), greaterThan(0)); + // Ensure that the global checkpoint and local checkpoint are restored from the max seqno of the last commit. + final SeqNoStats seqNoStats = getSeqNoStats("test", 0); + assertThat(seqNoStats.getGlobalCheckpoint(), equalTo(seqNoStats.getMaxSeqNo())); + assertThat(seqNoStats.getLocalCheckpoint(), equalTo(seqNoStats.getMaxSeqNo())); } private Set getTranslogDirs(String indexName) throws IOException { @@ -360,4 +371,10 @@ private static void disableTranslogFlush(String index) { client().admin().indices().prepareUpdateSettings(index).setSettings(settings).get(); } + private SeqNoStats getSeqNoStats(String index, int shardId) { + final ShardStats[] shardStats = client().admin().indices() + .prepareStats(index).get() + .getIndices().get(index).getShards(); + return shardStats[shardId].getSeqNoStats(); + } } From 095f31b80ef22ea4b29a0673774158d452c454cc Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 12 Jan 2018 19:09:31 -0500 Subject: [PATCH 02/30] Replica start peer recovery with safe commit (#28181) Today a replica starts a peer-recovery with the last commit. If the last commit is not a safe commit, a replica will immediately fallback to the file based sync which is more expensive than the sequence based recovery. This commit modifies the peer-recovery in replica to start with a safe commit. Moreover we can keep the existing translog on the target if the recovery is sequence based recovery. Relates #10708 --- .../elasticsearch/index/engine/Engine.java | 5 + .../index/engine/InternalEngine.java | 9 ++ .../elasticsearch/index/shard/IndexShard.java | 16 ++- .../index/shard/StoreRecovery.java | 2 +- .../recovery/PeerRecoveryTargetService.java | 14 +- ...ryPrepareForTranslogOperationsRequest.java | 47 ++++--- .../recovery/RecoverySourceHandler.java | 11 +- .../indices/recovery/RecoveryTarget.java | 10 +- .../recovery/RecoveryTargetHandler.java | 6 +- .../recovery/RemoteRecoveryTargetHandler.java | 4 +- .../RecoveryDuringReplicationTests.java | 29 +++- .../index/shard/IndexShardTests.java | 11 +- .../PeerRecoveryTargetServiceTests.java | 128 ++++++------------ .../recovery/RecoverySourceHandlerTests.java | 2 +- .../indices/recovery/RecoveryTests.java | 35 +++++ 15 files changed, 196 insertions(+), 133 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index 5de7062ab18ee..b0e2654e7f2fb 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -1512,6 +1512,11 @@ public interface Warmer { */ public abstract Engine recoverFromTranslog() throws IOException; + /** + * Do not replay translog operations, but make the engine be ready. + */ + public abstract void skipTranslogRecovery(); + /** * Returns true iff this engine is currently recovering from translog. */ diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 77b8275277079..1b7b891efd6ff 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -401,6 +401,15 @@ public InternalEngine recoverFromTranslog() throws IOException { return this; } + @Override + public void skipTranslogRecovery() { + if (openMode != EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) { + throw new IllegalStateException("Can't skip translog recovery with open mode: " + openMode); + } + assert pendingTranslogRecovery.get() : "translogRecovery is not pending but should be"; + pendingTranslogRecovery.set(false); // we are good - now we can commit + } + private IndexCommit getStartingCommitPoint() throws IOException { if (openMode == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) { final long lastSyncedGlobalCheckpoint = translog.getLastSyncedGlobalCheckpoint(); diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 4c6c6a17c234d..3832cd0ae2055 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -1304,9 +1304,20 @@ public void openIndexAndCreateTranslog(boolean forceNewHistoryUUID, long globalC * opens the engine on top of the existing lucene engine and translog. * Operations from the translog will be replayed to bring lucene up to date. **/ - public void openIndexAndTranslog() throws IOException { + public void openIndexAndRecoveryFromTranslog() throws IOException { assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.EXISTING_STORE; innerOpenEngineAndTranslog(EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG, false); + getEngine().recoverFromTranslog(); + } + + /** + * Opens the engine on top of the existing lucene engine and translog. + * The translog is kept but its operations won't be replayed. + */ + public void openIndexAndSkipTranslogRecovery() throws IOException { + assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.PEER; + innerOpenEngineAndTranslog(EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG, false); + getEngine().skipTranslogRecovery(); } private void innerOpenEngineAndTranslog(final EngineConfig.OpenMode openMode, final boolean forceNewHistoryUUID) throws IOException { @@ -1339,13 +1350,12 @@ private void innerOpenEngineAndTranslog(final EngineConfig.OpenMode openMode, fi globalCheckpointTracker.updateGlobalCheckpointOnReplica(Translog.readGlobalCheckpoint(translogConfig.getTranslogPath()), "read from translog checkpoint"); } - Engine newEngine = createNewEngine(config); + createNewEngine(config); verifyNotClosed(); if (openMode == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) { // We set active because we are now writing operations to the engine; this way, if we go idle after some time and become inactive, // we still give sync'd flush a chance to run: active.set(true); - newEngine.recoverFromTranslog(); } assertSequenceNumbersInCommit(); assert recoveryState.getStage() == RecoveryState.Stage.TRANSLOG : "TRANSLOG stage expected but was: " + recoveryState.getStage(); diff --git a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index 6bc1ce2882c92..81ffbea642c58 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -401,7 +401,7 @@ private void internalRecoverFromStore(IndexShard indexShard) throws IndexShardRe logger.debug("failed to list file details", e); } if (indexShouldExists) { - indexShard.openIndexAndTranslog(); + indexShard.openIndexAndRecoveryFromTranslog(); indexShard.getEngine().fillSeqNoGaps(indexShard.getPrimaryTerm()); } else { indexShard.createIndexAndTranslog(); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index ba5dc5c60f29f..88b0f23d72a99 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -21,6 +21,8 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexCommit; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.RateLimiter; import org.elasticsearch.ElasticsearchException; @@ -39,6 +41,7 @@ import org.elasticsearch.common.util.CancellableThreads; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.engine.CombinedDeletionPolicy; import org.elasticsearch.index.engine.RecoveryEngineException; import org.elasticsearch.index.mapper.MapperException; import org.elasticsearch.index.seqno.SequenceNumbers; @@ -60,6 +63,7 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; +import java.util.List; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; @@ -108,8 +112,8 @@ public PeerRecoveryTargetService(Settings settings, ThreadPool threadPool, Trans FileChunkTransportRequestHandler()); transportService.registerRequestHandler(Actions.CLEAN_FILES, RecoveryCleanFilesRequest::new, ThreadPool.Names.GENERIC, new CleanFilesRequestHandler()); - transportService.registerRequestHandler(Actions.PREPARE_TRANSLOG, RecoveryPrepareForTranslogOperationsRequest::new, ThreadPool - .Names.GENERIC, new PrepareForTranslogOperationsRequestHandler()); + transportService.registerRequestHandler(Actions.PREPARE_TRANSLOG, ThreadPool.Names.GENERIC, + RecoveryPrepareForTranslogOperationsRequest::new, new PrepareForTranslogOperationsRequestHandler()); transportService.registerRequestHandler(Actions.TRANSLOG_OPS, RecoveryTranslogOperationsRequest::new, ThreadPool.Names.GENERIC, new TranslogOperationsRequestHandler()); transportService.registerRequestHandler(Actions.FINALIZE, RecoveryFinalizeRecoveryRequest::new, ThreadPool.Names.GENERIC, new @@ -353,7 +357,9 @@ private StartRecoveryRequest getStartRecoveryRequest(final RecoveryTarget recove public static long getStartingSeqNo(final RecoveryTarget recoveryTarget) { try { final long globalCheckpoint = Translog.readGlobalCheckpoint(recoveryTarget.translogLocation()); - final SequenceNumbers.CommitInfo seqNoStats = recoveryTarget.store().loadSeqNoInfo(null); + final List existingCommits = DirectoryReader.listCommits(recoveryTarget.store().directory()); + final IndexCommit safeCommit = CombinedDeletionPolicy.findSafeCommitPoint(existingCommits, globalCheckpoint); + final SequenceNumbers.CommitInfo seqNoStats = recoveryTarget.store().loadSeqNoInfo(safeCommit); if (seqNoStats.maxSeqNo <= globalCheckpoint) { assert seqNoStats.localCheckpoint <= globalCheckpoint; /* @@ -387,7 +393,7 @@ class PrepareForTranslogOperationsRequestHandler implements TransportRequestHand public void messageReceived(RecoveryPrepareForTranslogOperationsRequest request, TransportChannel channel) throws Exception { try (RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId() )) { - recoveryRef.target().prepareForTranslogOperations(request.totalTranslogOps()); + recoveryRef.target().prepareForTranslogOperations(request.deleteLocalTranslog(), request.totalTranslogOps()); } channel.sendResponse(TransportResponse.Empty.INSTANCE); } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java index 61cd986a1aef4..ae8c7472f89b4 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java @@ -28,19 +28,33 @@ import java.io.IOException; -public class RecoveryPrepareForTranslogOperationsRequest extends TransportRequest { +class RecoveryPrepareForTranslogOperationsRequest extends TransportRequest { - private long recoveryId; - private ShardId shardId; - private int totalTranslogOps = RecoveryState.Translog.UNKNOWN; + private final long recoveryId; + private final ShardId shardId; + private final int totalTranslogOps; + private final boolean deleteLocalTranslog; - public RecoveryPrepareForTranslogOperationsRequest() { - } - - RecoveryPrepareForTranslogOperationsRequest(long recoveryId, ShardId shardId, int totalTranslogOps) { + RecoveryPrepareForTranslogOperationsRequest(long recoveryId, ShardId shardId, int totalTranslogOps, boolean deleteLocalTranslog) { this.recoveryId = recoveryId; this.shardId = shardId; this.totalTranslogOps = totalTranslogOps; + this.deleteLocalTranslog = deleteLocalTranslog; + } + + RecoveryPrepareForTranslogOperationsRequest(StreamInput in) throws IOException { + super.readFrom(in); + recoveryId = in.readLong(); + shardId = ShardId.readShardId(in); + totalTranslogOps = in.readVInt(); + if (in.getVersion().before(Version.V_6_0_0_alpha1)) { + in.readLong(); // maxUnsafeAutoIdTimestamp + } + if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + deleteLocalTranslog = in.readBoolean(); + } else { + deleteLocalTranslog = true; + } } public long recoveryId() { @@ -55,15 +69,11 @@ public int totalTranslogOps() { return totalTranslogOps; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - recoveryId = in.readLong(); - shardId = ShardId.readShardId(in); - totalTranslogOps = in.readVInt(); - if (in.getVersion().before(Version.V_6_0_0_alpha1)) { - in.readLong(); // maxUnsafeAutoIdTimestamp - } + /** + * Whether or not the recover target should delete its local translog + */ + boolean deleteLocalTranslog() { + return deleteLocalTranslog; } @Override @@ -75,5 +85,8 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().before(Version.V_6_0_0_alpha1)) { out.writeLong(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP); // maxUnsafeAutoIdTimestamp } + if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + out.writeBoolean(deleteLocalTranslog); + } } } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 7afe6c977da21..3ee9b953757c3 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -150,9 +150,9 @@ public RecoveryResponse recoverToTarget() throws IOException { final long startingSeqNo; final long requiredSeqNoRangeStart; - final boolean isSequenceNumberBasedRecoveryPossible = request.startingSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && + final boolean isSequenceNumberBasedRecovery = request.startingSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && isTargetSameHistory() && isTranslogReadyForSequenceNumberBasedRecovery(); - if (isSequenceNumberBasedRecoveryPossible) { + if (isSequenceNumberBasedRecovery) { logger.trace("performing sequence numbers based recovery. starting at [{}]", request.startingSeqNo()); startingSeqNo = request.startingSeqNo(); requiredSeqNoRangeStart = startingSeqNo; @@ -188,7 +188,8 @@ public RecoveryResponse recoverToTarget() throws IOException { runUnderPrimaryPermit(() -> shard.initiateTracking(request.targetAllocationId())); try { - prepareTargetForTranslog(translog.estimateTotalOperationsFromMinSeq(startingSeqNo)); + // For a sequence based recovery, the target can keep its local translog + prepareTargetForTranslog(isSequenceNumberBasedRecovery == false, translog.estimateTotalOperationsFromMinSeq(startingSeqNo)); } catch (final Exception e) { throw new RecoveryEngineException(shard.shardId(), 1, "prepare target for translog failed", e); } @@ -421,13 +422,13 @@ public void phase1(final IndexCommit snapshot, final Supplier translogO } } - void prepareTargetForTranslog(final int totalTranslogOps) throws IOException { + void prepareTargetForTranslog(final boolean createNewTranslog, final int totalTranslogOps) throws IOException { StopWatch stopWatch = new StopWatch().start(); logger.trace("recovery [phase1]: prepare remote engine for translog"); final long startEngineStart = stopWatch.totalTime().millis(); // Send a request preparing the new shard's translog to receive operations. This ensures the shard engine is started and disables // garbage collection (not the JVM's GC!) of tombstone deletes. - cancellableThreads.executeIO(() -> recoveryTarget.prepareForTranslogOperations(totalTranslogOps)); + cancellableThreads.executeIO(() -> recoveryTarget.prepareForTranslogOperations(createNewTranslog, totalTranslogOps)); stopWatch.stop(); response.startTime = stopWatch.totalTime().millis() - startEngineStart; diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index d383891345818..1bbcb9efa9644 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -362,10 +362,14 @@ private void ensureRefCount() { /*** Implementation of {@link RecoveryTargetHandler } */ @Override - public void prepareForTranslogOperations(int totalTranslogOps) throws IOException { + public void prepareForTranslogOperations(boolean createNewTranslog, int totalTranslogOps) throws IOException { state().getTranslog().totalOperations(totalTranslogOps); - // TODO: take the local checkpoint from store as global checkpoint, once we know it's safe - indexShard().openIndexAndCreateTranslog(false, SequenceNumbers.UNASSIGNED_SEQ_NO); + if (createNewTranslog) { + // TODO: Assigns the global checkpoint to the max_seqno of the safe commit if the index version >= 6.2 + indexShard().openIndexAndCreateTranslog(false, SequenceNumbers.UNASSIGNED_SEQ_NO); + } else { + indexShard().openIndexAndSkipTranslogRecovery(); + } } @Override diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java index e7403986dc233..736d602044656 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java @@ -32,10 +32,10 @@ public interface RecoveryTargetHandler { /** * Prepares the target to receive translog operations, after all file have been copied - * - * @param totalTranslogOps total translog operations expected to be sent + * @param createNewTranslog whether or not to delete the local translog on the target + * @param totalTranslogOps total translog operations expected to be sent */ - void prepareForTranslogOperations(int totalTranslogOps) throws IOException; + void prepareForTranslogOperations(boolean createNewTranslog, int totalTranslogOps) throws IOException; /** * The finalize request refreshes the engine now that new segments are available, enables garbage collection of tombstone files, and diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java index 279bec186a433..4ea2be0e72659 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java @@ -76,9 +76,9 @@ public RemoteRecoveryTargetHandler(long recoveryId, ShardId shardId, TransportSe } @Override - public void prepareForTranslogOperations(int totalTranslogOps) throws IOException { + public void prepareForTranslogOperations(boolean createNewTranslog, int totalTranslogOps) throws IOException { transportService.submitRequest(targetNode, PeerRecoveryTargetService.Actions.PREPARE_TRANSLOG, - new RecoveryPrepareForTranslogOperationsRequest(recoveryId, shardId, totalTranslogOps), + new RecoveryPrepareForTranslogOperationsRequest(recoveryId, shardId, totalTranslogOps, createNewTranslog), TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(), EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); } diff --git a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index 2bf7de6b94a82..881eb16d619d0 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -31,7 +31,9 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineConfig; @@ -226,7 +228,6 @@ public void testRecoveryAfterPrimaryPromotion() throws Exception { final IndexShard oldPrimary = shards.getPrimary(); final IndexShard newPrimary = shards.getReplicas().get(0); final IndexShard replica = shards.getReplicas().get(1); - boolean expectSeqNoRecovery = true; if (randomBoolean()) { // simulate docs that were inflight when primary failed, these will be rolled back final int rollbackDocs = randomIntBetween(1, 5); @@ -239,7 +240,6 @@ public void testRecoveryAfterPrimaryPromotion() throws Exception { } if (randomBoolean()) { oldPrimary.flush(new FlushRequest(index.getName())); - expectSeqNoRecovery = false; } } @@ -252,9 +252,30 @@ public void testRecoveryAfterPrimaryPromotion() throws Exception { equalTo(totalDocs - 1L)); // index some more - totalDocs += shards.indexDocs(randomIntBetween(0, 5)); + int moreDocs = shards.indexDocs(randomIntBetween(0, 5)); + totalDocs += moreDocs; + + // As a replica keeps a safe commit, the file-based recovery only happens if the required translog + // for the sequence based recovery are not fully retained and extra documents were added to the primary. + boolean expectSeqNoRecovery = (moreDocs == 0 || randomBoolean()); + int uncommittedOpsOnPrimary = 0; + if (expectSeqNoRecovery == false) { + IndexMetaData.Builder builder = IndexMetaData.builder(newPrimary.indexSettings().getIndexMetaData()); + builder.settings(Settings.builder().put(newPrimary.indexSettings().getSettings()) + .put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), "-1") + .put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), "-1") + ); + newPrimary.indexSettings().updateIndexMetaData(builder.build()); + newPrimary.onSettingsChanged(); + shards.syncGlobalCheckpoint(); + newPrimary.flush(new FlushRequest()); + uncommittedOpsOnPrimary = shards.indexDocs(randomIntBetween(0, 10)); + totalDocs += uncommittedOpsOnPrimary; + } if (randomBoolean()) { + uncommittedOpsOnPrimary = 0; + shards.syncGlobalCheckpoint(); newPrimary.flush(new FlushRequest()); } @@ -269,7 +290,7 @@ public void testRecoveryAfterPrimaryPromotion() throws Exception { assertThat(newReplica.recoveryState().getTranslog().recoveredOperations(), equalTo(totalDocs - committedDocs)); } else { assertThat(newReplica.recoveryState().getIndex().fileDetails(), not(empty())); - assertThat(newReplica.recoveryState().getTranslog().recoveredOperations(), equalTo(totalDocs)); + assertThat(newReplica.recoveryState().getTranslog().recoveredOperations(), equalTo(uncommittedOpsOnPrimary)); } // roll back the extra ops in the replica diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 48887aa4c11c7..cd75c7a08fbc3 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -22,7 +22,6 @@ import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; -import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.TermQuery; @@ -2109,7 +2108,7 @@ public void testShardActiveDuringInternalRecovery() throws IOException { shard.prepareForIndexRecovery(); // Shard is still inactive since we haven't started recovering yet assertFalse(shard.isActive()); - shard.openIndexAndTranslog(); + shard.openIndexAndRecoveryFromTranslog(); // Shard should now be active since we did recover: assertTrue(shard.isActive()); closeShards(shard); @@ -2137,8 +2136,8 @@ public void testShardActiveDuringPeerRecovery() throws IOException { new RecoveryTarget(shard, discoveryNode, recoveryListener, aLong -> { }) { @Override - public void prepareForTranslogOperations(int totalTranslogOps) throws IOException { - super.prepareForTranslogOperations(totalTranslogOps); + public void prepareForTranslogOperations(boolean createNewTranslog, int totalTranslogOps) throws IOException { + super.prepareForTranslogOperations(createNewTranslog, totalTranslogOps); // Shard is still inactive since we haven't started recovering yet assertFalse(replica.isActive()); @@ -2186,8 +2185,8 @@ public void testRefreshListenersDuringPeerRecovery() throws IOException { }) { // we're only checking that listeners are called when the engine is open, before there is no point @Override - public void prepareForTranslogOperations(int totalTranslogOps) throws IOException { - super.prepareForTranslogOperations(totalTranslogOps); + public void prepareForTranslogOperations(boolean createNewTranslog, int totalTranslogOps) throws IOException { + super.prepareForTranslogOperations(createNewTranslog, totalTranslogOps); assertListenerCalled.accept(replica); } diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java index f691cfd0238d4..31521e33f21b6 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java @@ -19,103 +19,63 @@ package org.elasticsearch.indices.recovery; -import org.elasticsearch.action.admin.indices.flush.FlushRequest; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.VersionType; -import org.elasticsearch.index.mapper.SourceToParse; -import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardTestCase; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.index.translog.TranslogConfig; -import org.elasticsearch.index.translog.TranslogWriter; - -import java.io.IOException; -import java.nio.channels.FileChannel; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.concurrent.atomic.AtomicReference; import static org.hamcrest.Matchers.equalTo; public class PeerRecoveryTargetServiceTests extends IndexShardTestCase { public void testGetStartingSeqNo() throws Exception { - IndexShard replica = newShard(false); - final AtomicReference translogLocation = new AtomicReference<>(); - RecoveryTarget recoveryTarget = new RecoveryTarget(replica, null, null, null) { - @Override - Path translogLocation() { - return translogLocation.get(); - } - }; + final IndexShard replica = newShard(false); try { - recoveryEmptyReplica(replica); - int docs = randomIntBetween(1, 10); - final String index = replica.shardId().getIndexName(); - long seqNo = 0; - for (int i = 0; i < docs; i++) { - replica.applyIndexOperationOnReplica(seqNo++, 1, VersionType.EXTERNAL, - IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, - SourceToParse.source(index, "type", "doc_" + i, new BytesArray("{}"), XContentType.JSON), - update -> {}); - if (rarely()) { - // insert a gap - seqNo++; + // Empty store + { + recoveryEmptyReplica(replica); + final RecoveryTarget recoveryTarget = new RecoveryTarget(replica, null, null, null); + assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(0L)); + recoveryTarget.decRef(); + } + // Last commit is good - use it. + final long initDocs = scaledRandomIntBetween(1, 10); + { + for (int i = 0; i < initDocs; i++) { + indexDoc(replica, "doc", Integer.toString(i)); + if (randomBoolean()) { + flushShard(replica); + } } + flushShard(replica); + replica.updateGlobalCheckpointOnReplica(initDocs - 1, "test"); + replica.getTranslog().sync(); + final RecoveryTarget recoveryTarget = new RecoveryTarget(replica, null, null, null); + assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(initDocs)); + recoveryTarget.decRef(); + } + // Global checkpoint does not advance, last commit is not good - use the previous commit + final int moreDocs = randomIntBetween(1, 10); + { + for (int i = 0; i < moreDocs; i++) { + indexDoc(replica, "doc", Long.toString(i)); + if (randomBoolean()) { + flushShard(replica); + } + } + flushShard(replica); + final RecoveryTarget recoveryTarget = new RecoveryTarget(replica, null, null, null); + assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(initDocs)); + recoveryTarget.decRef(); + } + // Advances the global checkpoint, a safe commit also advances + { + replica.updateGlobalCheckpointOnReplica(initDocs + moreDocs - 1, "test"); + replica.getTranslog().sync(); + final RecoveryTarget recoveryTarget = new RecoveryTarget(replica, null, null, null); + assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(initDocs + moreDocs)); + recoveryTarget.decRef(); } - - final long maxSeqNo = replica.seqNoStats().getMaxSeqNo(); - final long localCheckpoint = replica.getLocalCheckpoint(); - - translogLocation.set(replica.getTranslog().location()); - - final Translog translog = replica.getTranslog(); - final String translogUUID = translog.getTranslogUUID(); - assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(0L)); - - translogLocation.set(writeTranslog(replica.shardId(), translogUUID, translog.currentFileGeneration(), maxSeqNo - 1)); - - // commit is good, global checkpoint is at least max *committed* which is NO_OPS_PERFORMED - assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(0L)); - - replica.flush(new FlushRequest()); - - translogLocation.set(replica.getTranslog().location()); - - // commit is not good, global checkpoint is below max - assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(SequenceNumbers.UNASSIGNED_SEQ_NO)); - - translogLocation.set(writeTranslog(replica.shardId(), translogUUID, translog.currentFileGeneration(), maxSeqNo)); - - // commit is good, global checkpoint is above max - assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(localCheckpoint + 1)); } finally { closeShards(replica); - recoveryTarget.decRef(); } } - - private Path writeTranslog( - final ShardId shardId, - final String translogUUID, - final long generation, - final long globalCheckpoint - ) throws IOException { - final Path tempDir = createTempDir(); - final Path resolve = tempDir.resolve(Translog.getFilename(generation)); - Files.createFile(tempDir.resolve(Translog.CHECKPOINT_FILE_NAME)); - try (TranslogWriter ignored = TranslogWriter.create( - shardId, - translogUUID, - generation, - resolve, - FileChannel::open, - TranslogConfig.DEFAULT_BUFFER_SIZE, generation, globalCheckpoint, () -> globalCheckpoint, () -> generation)) {} - return tempDir; - } - } diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index 4963c1b74a53f..7ab6925ce57b9 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -423,7 +423,7 @@ public void phase1(final IndexCommit snapshot, final Supplier translogO } @Override - void prepareTargetForTranslog(final int totalTranslogOps) throws IOException { + void prepareTargetForTranslog(final boolean createNewTranslog, final int totalTranslogOps) throws IOException { prepareTargetForTranslogCalled.set(true); } diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java index 85dc3a5fc3906..2089c36d06bc0 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java @@ -41,6 +41,7 @@ import org.elasticsearch.index.replication.RecoveryDuringReplicationTests; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.translog.SnapshotMatchers; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogConfig; @@ -271,4 +272,38 @@ public void testPeerRecoverySendSafeCommitInFileBased() throws Exception { assertThat(maxSeqNo, lessThanOrEqualTo(globalCheckpoint)); closeShards(primaryShard, replicaShard); } + + public void testSequenceBasedRecoveryKeepsTranslog() throws Exception { + try (ReplicationGroup shards = createGroup(1)) { + shards.startAll(); + final IndexShard replica = shards.getReplicas().get(0); + final int initDocs = scaledRandomIntBetween(0, 20); + int uncommittedDocs = 0; + for (int i = 0; i < initDocs; i++) { + shards.indexDocs(1); + uncommittedDocs++; + if (randomBoolean()) { + shards.syncGlobalCheckpoint(); + shards.flush(); + uncommittedDocs = 0; + } + } + shards.removeReplica(replica); + final int moreDocs = shards.indexDocs(scaledRandomIntBetween(0, 20)); + if (randomBoolean()) { + shards.flush(); + } + replica.close("test", randomBoolean()); + replica.store().close(); + final IndexShard newReplica = shards.addReplicaWithExistingPath(replica.shardPath(), replica.routingEntry().currentNodeId()); + shards.recoverReplica(newReplica); + + try (Translog.Snapshot snapshot = newReplica.getTranslog().newSnapshot()) { + assertThat("Sequence based recovery should keep existing translog", snapshot, SnapshotMatchers.size(initDocs + moreDocs)); + } + assertThat(newReplica.recoveryState().getTranslog().recoveredOperations(), equalTo(uncommittedDocs + moreDocs)); + assertThat(newReplica.recoveryState().getIndex().fileDetails(), empty()); + } + } + } From 82722ebad362968fb316116b581945479137b5b6 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 12 Jan 2018 20:09:34 -0500 Subject: [PATCH 03/30] TEST: init unassigned gcp in testAcquireIndexCommit The global checkpoint should be assigned to unassigned rather than 0. If a single document is indexed and the global checkpoint is initialized with 0, the first commit is safe which the test does not suppose. Relates #28038 --- .../org/elasticsearch/index/engine/InternalEngineTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 9755304d2f1eb..a508d691ed3a6 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -4324,7 +4324,7 @@ public void testConcurrentAppendUpdateAndRefresh() throws InterruptedException, public void testAcquireIndexCommit() throws Exception { IOUtils.close(engine, store); store = createStore(); - final AtomicLong globalCheckpoint = new AtomicLong(); + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO); try (InternalEngine engine = createEngine(store, createTempDir(), globalCheckpoint::get)) { int numDocs = between(1, 20); for (int i = 0; i < numDocs; i++) { From fafdb8d9e3ea741150d09503c73b3d6a2f28dac5 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Sat, 13 Jan 2018 11:43:15 -0500 Subject: [PATCH 04/30] AwaitsFix #testRecoveryAfterPrimaryPromotion Relates #28209 --- .../index/replication/RecoveryDuringReplicationTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index 881eb16d619d0..77576426252d9 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -215,6 +215,7 @@ public void testRecoveryToReplicaThatReceivedExtraDocument() throws Exception { } @TestLogging("org.elasticsearch.index.shard:TRACE,org.elasticsearch.indices.recovery:TRACE") + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/28209") public void testRecoveryAfterPrimaryPromotion() throws Exception { try (ReplicationGroup shards = createGroup(2)) { shards.startAll(); From e44e34f42a127fe90004006b6f3ceec355166841 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 12 Jan 2018 21:01:35 -0500 Subject: [PATCH 05/30] Rename deleteLocalTranslog to createNewTranslog We introduced a new option `createNewTranslog` in #28181. However, we named that parameter as deleteLocalTranslog in other places. This commit makes sure to have a consistent naming in these places. Relates #28181 --- .../recovery/PeerRecoveryTargetService.java | 2 +- ...eryPrepareForTranslogOperationsRequest.java | 18 +++++++++--------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index 88b0f23d72a99..5920e286aa194 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -393,7 +393,7 @@ class PrepareForTranslogOperationsRequestHandler implements TransportRequestHand public void messageReceived(RecoveryPrepareForTranslogOperationsRequest request, TransportChannel channel) throws Exception { try (RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId() )) { - recoveryRef.target().prepareForTranslogOperations(request.deleteLocalTranslog(), request.totalTranslogOps()); + recoveryRef.target().prepareForTranslogOperations(request.createNewTranslog(), request.totalTranslogOps()); } channel.sendResponse(TransportResponse.Empty.INSTANCE); } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java index ae8c7472f89b4..2b0220b265990 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java @@ -33,13 +33,13 @@ class RecoveryPrepareForTranslogOperationsRequest extends TransportRequest { private final long recoveryId; private final ShardId shardId; private final int totalTranslogOps; - private final boolean deleteLocalTranslog; + private final boolean createNewTranslog; - RecoveryPrepareForTranslogOperationsRequest(long recoveryId, ShardId shardId, int totalTranslogOps, boolean deleteLocalTranslog) { + RecoveryPrepareForTranslogOperationsRequest(long recoveryId, ShardId shardId, int totalTranslogOps, boolean createNewTranslog) { this.recoveryId = recoveryId; this.shardId = shardId; this.totalTranslogOps = totalTranslogOps; - this.deleteLocalTranslog = deleteLocalTranslog; + this.createNewTranslog = createNewTranslog; } RecoveryPrepareForTranslogOperationsRequest(StreamInput in) throws IOException { @@ -51,9 +51,9 @@ class RecoveryPrepareForTranslogOperationsRequest extends TransportRequest { in.readLong(); // maxUnsafeAutoIdTimestamp } if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { - deleteLocalTranslog = in.readBoolean(); + createNewTranslog = in.readBoolean(); } else { - deleteLocalTranslog = true; + createNewTranslog = true; } } @@ -70,10 +70,10 @@ public int totalTranslogOps() { } /** - * Whether or not the recover target should delete its local translog + * Whether or not the recover target should create a new local translog */ - boolean deleteLocalTranslog() { - return deleteLocalTranslog; + boolean createNewTranslog() { + return createNewTranslog; } @Override @@ -86,7 +86,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP); // maxUnsafeAutoIdTimestamp } if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { - out.writeBoolean(deleteLocalTranslog); + out.writeBoolean(createNewTranslog); } } } From 0151c1565d2f2fe2518b011bba08125fafb6c616 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 12 Jan 2018 21:02:31 -0500 Subject: [PATCH 06/30] Backport replica rollback to 6.2 (#28181) Relates #28181 --- .../recovery/RecoveryPrepareForTranslogOperationsRequest.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java index 2b0220b265990..ce96013d343e2 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java @@ -50,7 +50,7 @@ class RecoveryPrepareForTranslogOperationsRequest extends TransportRequest { if (in.getVersion().before(Version.V_6_0_0_alpha1)) { in.readLong(); // maxUnsafeAutoIdTimestamp } - if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (in.getVersion().onOrAfter(Version.V_6_2_0)) { createNewTranslog = in.readBoolean(); } else { createNewTranslog = true; @@ -82,7 +82,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(recoveryId); shardId.writeTo(out); out.writeVInt(totalTranslogOps); - if (out.getVersion().before(Version.V_6_0_0_alpha1)) { + if (out.getVersion().before(Version.V_6_2_0)) { out.writeLong(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP); // maxUnsafeAutoIdTimestamp } if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { From 9774ba35a18115ee62403d312ea74c4178821c54 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Sat, 13 Jan 2018 14:10:23 -0500 Subject: [PATCH 07/30] Correct backport replica rollback to 6.2 (#28181) The previous backport was not corect. Relates #28181 --- .../recovery/RecoveryPrepareForTranslogOperationsRequest.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java index ce96013d343e2..28df2897d9778 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java @@ -82,10 +82,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(recoveryId); shardId.writeTo(out); out.writeVInt(totalTranslogOps); - if (out.getVersion().before(Version.V_6_2_0)) { + if (out.getVersion().before(Version.V_6_0_0_alpha1)) { out.writeLong(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP); // maxUnsafeAutoIdTimestamp } - if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (out.getVersion().onOrAfter(Version.V_6_2_0)) { out.writeBoolean(createNewTranslog); } } From fbb840b5c870b238163b3e4726d490e2b6711c23 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Sat, 13 Jan 2018 21:59:26 -0500 Subject: [PATCH 08/30] TEST: Tightens file-based condition in peer-recovery As a replica always keeps a safe commit and starts peer-recovery with that commit; file-based recovery only happens if new operations are added to the primary and the required translog is not fully retained. In the test, we tried to produce this condition by flushing a new commit in order to trim all translog. However, if the new global checkpoint is not persisted yet, we will keep two commits and not trim translog. This commit tightens the file-based condition in the test by waiting for the global checkpoint persisted properly on the new primary before flushing. Close #28209 Relates #28181 --- .../index/replication/RecoveryDuringReplicationTests.java | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index 77576426252d9..aa97c2049915f 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -215,7 +215,6 @@ public void testRecoveryToReplicaThatReceivedExtraDocument() throws Exception { } @TestLogging("org.elasticsearch.index.shard:TRACE,org.elasticsearch.indices.recovery:TRACE") - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/28209") public void testRecoveryAfterPrimaryPromotion() throws Exception { try (ReplicationGroup shards = createGroup(2)) { shards.startAll(); @@ -268,7 +267,12 @@ public void testRecoveryAfterPrimaryPromotion() throws Exception { ); newPrimary.indexSettings().updateIndexMetaData(builder.build()); newPrimary.onSettingsChanged(); - shards.syncGlobalCheckpoint(); + // Make sure the global checkpoint on the new primary is persisted properly, + // otherwise the deletion policy won't trim translog + assertBusy(() -> { + shards.syncGlobalCheckpoint(); + assertThat(newPrimary.getTranslog().getLastSyncedGlobalCheckpoint(), equalTo(newPrimary.seqNoStats().getMaxSeqNo())); + }); newPrimary.flush(new FlushRequest()); uncommittedOpsOnPrimary = shards.indexDocs(randomIntBetween(0, 10)); totalDocs += uncommittedOpsOnPrimary; From aec0c0f9b6b9e511e56c725f68f9010a8298ed02 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Sun, 14 Jan 2018 19:20:32 -0500 Subject: [PATCH 09/30] Update version of TaskInfo header serialization after backport Update the serialization version after backporting #27764 to 6.x. --- server/src/main/java/org/elasticsearch/tasks/TaskInfo.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java b/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java index 2bd16a9addf6a..19e9baedd753b 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java @@ -97,7 +97,7 @@ public TaskInfo(StreamInput in) throws IOException { runningTimeNanos = in.readLong(); cancellable = in.readBoolean(); parentTaskId = TaskId.readFromStream(in); - if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (in.getVersion().onOrAfter(Version.V_6_2_0)) { headers = in.readMap(StreamInput::readString, StreamInput::readString); } else { headers = Collections.emptyMap(); @@ -115,7 +115,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(runningTimeNanos); out.writeBoolean(cancellable); parentTaskId.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (out.getVersion().onOrAfter(Version.V_6_2_0)) { out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); } } From 023d08ee919b6508ec19b0eb10f15001b1e8a0b1 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Mon, 15 Jan 2018 08:44:49 +0000 Subject: [PATCH 10/30] Adds metadata to rewritten aggregations (#28185) * Adds metadata to rewritten aggregations Previous to this change, if any filters in the filters aggregation were rewritten, the rewritten version of the FiltersAggregationBuilder would not contain the metadata form the original. This is because `AbstractAggregationBuilder.getMetadata()` returns an empty map when not metadata is set. Closes #28170 * Always set metadata when rewritten --- .../search/aggregations/AggregationBuilder.java | 4 +--- .../search/aggregations/FiltersAggsRewriteIT.java | 6 ++++++ .../search/aggregations/bucket/FiltersTests.java | 2 ++ 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java index 99bf9be683ee3..80d8277f4cab2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java @@ -101,9 +101,7 @@ public final AggregationBuilder rewrite(QueryRewriteContext context) throws IOEx if (rewritten == this) { return rewritten; } - if (getMetaData() != null && rewritten.getMetaData() == null) { - rewritten.setMetaData(getMetaData()); - } + rewritten.setMetaData(getMetaData()); AggregatorFactories.Builder rewrittenSubAggs = factoriesBuilder.rewrite(context); rewritten.subAggregations(rewrittenSubAggs); return rewritten; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/FiltersAggsRewriteIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/FiltersAggsRewriteIT.java index bb4c3a2a5eb0f..ce5e4a694f279 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/FiltersAggsRewriteIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/FiltersAggsRewriteIT.java @@ -32,6 +32,8 @@ import org.elasticsearch.test.ESSingleNodeTestCase; import java.io.IOException; +import java.util.HashMap; +import java.util.Map; public class FiltersAggsRewriteIT extends ESSingleNodeTestCase { @@ -58,10 +60,14 @@ public void testWrapperQueryIsRewritten() throws IOException { } FiltersAggregationBuilder builder = new FiltersAggregationBuilder("titles", new FiltersAggregator.KeyedFilter("titleterms", new WrapperQueryBuilder(bytesReference))); + Map metadata = new HashMap<>(); + metadata.put(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + builder.setMetaData(metadata); SearchResponse searchResponse = client().prepareSearch("test").setSize(0).addAggregation(builder).get(); assertEquals(3, searchResponse.getHits().getTotalHits()); InternalFilters filters = searchResponse.getAggregations().get("titles"); assertEquals(1, filters.getBuckets().size()); assertEquals(2, filters.getBuckets().get(0).getDocCount()); + assertEquals(metadata, filters.getMetaData()); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersTests.java index 7e63bbb6f3855..e0cd490134f14 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersTests.java @@ -35,6 +35,7 @@ import org.elasticsearch.search.aggregations.bucket.filter.FiltersAggregator.KeyedFilter; import java.io.IOException; +import java.util.Collections; import static org.hamcrest.Matchers.instanceOf; @@ -123,6 +124,7 @@ public void testOtherBucket() throws IOException { public void testRewrite() throws IOException { // test non-keyed filter that doesn't rewrite AggregationBuilder original = new FiltersAggregationBuilder("my-agg", new MatchAllQueryBuilder()); + original.setMetaData(Collections.singletonMap(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20))); AggregationBuilder rewritten = original.rewrite(new QueryRewriteContext(xContentRegistry(), null, null, () -> 0L)); assertSame(original, rewritten); From 784eba86b220452da6d356296ecb15366c388307 Mon Sep 17 00:00:00 2001 From: hanbj Date: Mon, 15 Jan 2018 22:09:27 +0800 Subject: [PATCH 11/30] [Docs] Fix an error in painless-types.asciidoc (#28221) --- docs/painless/painless-types.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/painless/painless-types.asciidoc b/docs/painless/painless-types.asciidoc index 36cf78312ea26..9e5077503b4a8 100644 --- a/docs/painless/painless-types.asciidoc +++ b/docs/painless/painless-types.asciidoc @@ -311,7 +311,7 @@ to floating point types. | int | explicit | explicit | explicit | | implicit | implicit | implicit | long | explicit | explicit | explicit | explicit | | implicit | implicit | float | explicit | explicit | explicit | explicit | explicit | | implicit -| float | explicit | explicit | explicit | explicit | explicit | explicit | +| double | explicit | explicit | explicit | explicit | explicit | explicit | |==== @@ -376,7 +376,7 @@ cast would normally be required between the non-def types. def x; // Declare def variable x and set it to null x = 3; // Set the def variable x to the literal 3 with an implicit // cast from int to def -double a = x; // Declare double variable y and set it to def variable x, +double a = x; // Declare double variable a and set it to def variable x, // which contains a double int b = x; // ERROR: Results in a run-time error because an explicit cast is // required to cast from a double to an int From be012b132605ab0052d4ab1d7eb736d64b84a2e5 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Mon, 15 Jan 2018 16:47:46 +0100 Subject: [PATCH 12/30] upgrade to lucene 7.2.1 (#28218) --- buildSrc/version.properties | 2 +- docs/Versions.asciidoc | 4 ++-- .../licenses/lucene-expressions-7.2.0.jar.sha1 | 1 - .../licenses/lucene-expressions-7.2.1.jar.sha1 | 1 + .../analysis-icu/licenses/lucene-analyzers-icu-7.2.0.jar.sha1 | 1 - .../analysis-icu/licenses/lucene-analyzers-icu-7.2.1.jar.sha1 | 1 + .../licenses/lucene-analyzers-kuromoji-7.2.0.jar.sha1 | 1 - .../licenses/lucene-analyzers-kuromoji-7.2.1.jar.sha1 | 1 + .../licenses/lucene-analyzers-phonetic-7.2.0.jar.sha1 | 1 - .../licenses/lucene-analyzers-phonetic-7.2.1.jar.sha1 | 1 + .../licenses/lucene-analyzers-smartcn-7.2.0.jar.sha1 | 1 - .../licenses/lucene-analyzers-smartcn-7.2.1.jar.sha1 | 1 + .../licenses/lucene-analyzers-stempel-7.2.0.jar.sha1 | 1 - .../licenses/lucene-analyzers-stempel-7.2.1.jar.sha1 | 1 + .../licenses/lucene-analyzers-morfologik-7.2.0.jar.sha1 | 1 - .../licenses/lucene-analyzers-morfologik-7.2.1.jar.sha1 | 1 + server/licenses/lucene-analyzers-common-7.2.0.jar.sha1 | 1 - server/licenses/lucene-analyzers-common-7.2.1.jar.sha1 | 1 + server/licenses/lucene-backward-codecs-7.2.0.jar.sha1 | 1 - server/licenses/lucene-backward-codecs-7.2.1.jar.sha1 | 1 + server/licenses/lucene-core-7.2.0.jar.sha1 | 1 - server/licenses/lucene-core-7.2.1.jar.sha1 | 1 + server/licenses/lucene-grouping-7.2.0.jar.sha1 | 1 - server/licenses/lucene-grouping-7.2.1.jar.sha1 | 1 + server/licenses/lucene-highlighter-7.2.0.jar.sha1 | 1 - server/licenses/lucene-highlighter-7.2.1.jar.sha1 | 1 + server/licenses/lucene-join-7.2.0.jar.sha1 | 1 - server/licenses/lucene-join-7.2.1.jar.sha1 | 1 + server/licenses/lucene-memory-7.2.0.jar.sha1 | 1 - server/licenses/lucene-memory-7.2.1.jar.sha1 | 1 + server/licenses/lucene-misc-7.2.0.jar.sha1 | 1 - server/licenses/lucene-misc-7.2.1.jar.sha1 | 1 + server/licenses/lucene-queries-7.2.0.jar.sha1 | 1 - server/licenses/lucene-queries-7.2.1.jar.sha1 | 1 + server/licenses/lucene-queryparser-7.2.0.jar.sha1 | 1 - server/licenses/lucene-queryparser-7.2.1.jar.sha1 | 1 + server/licenses/lucene-sandbox-7.2.0.jar.sha1 | 1 - server/licenses/lucene-sandbox-7.2.1.jar.sha1 | 1 + server/licenses/lucene-spatial-7.2.0.jar.sha1 | 1 - server/licenses/lucene-spatial-7.2.1.jar.sha1 | 1 + server/licenses/lucene-spatial-extras-7.2.0.jar.sha1 | 1 - server/licenses/lucene-spatial-extras-7.2.1.jar.sha1 | 1 + server/licenses/lucene-spatial3d-7.2.0.jar.sha1 | 1 - server/licenses/lucene-spatial3d-7.2.1.jar.sha1 | 1 + server/licenses/lucene-suggest-7.2.0.jar.sha1 | 1 - server/licenses/lucene-suggest-7.2.1.jar.sha1 | 1 + server/src/main/java/org/elasticsearch/Version.java | 2 +- 47 files changed, 26 insertions(+), 26 deletions(-) delete mode 100644 modules/lang-expression/licenses/lucene-expressions-7.2.0.jar.sha1 create mode 100644 modules/lang-expression/licenses/lucene-expressions-7.2.1.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analyzers-icu-7.2.0.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analyzers-icu-7.2.1.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.2.0.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.2.1.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.2.0.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.2.1.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.2.0.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.2.1.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.2.0.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.2.1.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.2.0.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-analyzers-common-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-analyzers-common-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-backward-codecs-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-backward-codecs-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-core-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-core-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-grouping-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-grouping-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-highlighter-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-highlighter-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-join-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-join-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-memory-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-memory-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-misc-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-misc-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-queries-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-queries-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-queryparser-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-queryparser-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-sandbox-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-sandbox-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-spatial-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-extras-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-spatial-extras-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-spatial3d-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-spatial3d-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-suggest-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-suggest-7.2.1.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 3c06aecb0fa09..fabcadabd9f96 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ elasticsearch = 7.0.0-alpha1 -lucene = 7.2.0 +lucene = 7.2.1 # optional dependencies spatial4j = 0.6 diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index ae588350b9c8a..3008b1bb3e09a 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -1,7 +1,7 @@ :version: 7.0.0-alpha1 :major-version: 7.x -:lucene_version: 7.2.0 -:lucene_version_path: 7_2_0 +:lucene_version: 7.2.1 +:lucene_version_path: 7_2_1 :branch: master :jdk: 1.8.0_131 :jdk_major: 8 diff --git a/modules/lang-expression/licenses/lucene-expressions-7.2.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.2.0.jar.sha1 deleted file mode 100644 index 0e903acab596e..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -848eda48b43c30a7c7e38fa50182a7e866460e95 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-7.2.1.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..a57efa8c26aa6 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-7.2.1.jar.sha1 @@ -0,0 +1 @@ +51fbb33cdb17bb36a0e86485685bba18eb1c2ccf \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.2.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.2.0.jar.sha1 deleted file mode 100644 index 8c744b138d9b4..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -726e5cf3515ba765f5f326cdced8abaaa64da875 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.2.1.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..fb8e4b0167bf5 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.2.1.jar.sha1 @@ -0,0 +1 @@ +cfdfcd54c052cdd08140c7cd4daa7929b9657da0 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.2.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.2.0.jar.sha1 deleted file mode 100644 index 72de0db978a26..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -879c63f60c20d9f0f2a106062ad2512158007108 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.2.1.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..f8c67b9480380 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.2.1.jar.sha1 @@ -0,0 +1 @@ +21418892a16434ecb4f8efdbf4e62838f58a6a59 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.2.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.2.0.jar.sha1 deleted file mode 100644 index fe98e5ed6ba59..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bdf0ae30f09641d2c0b098c3b7a340d59a7ab4b1 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.2.1.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..2443de6a49b0a --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.2.1.jar.sha1 @@ -0,0 +1 @@ +970e860a6e252e7c1dc117c45176a847ce961ffc \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.2.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.2.0.jar.sha1 deleted file mode 100644 index e019470764969..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -575096198d49aad52d2e12eb4d43dd547747dd7d \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.2.1.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..1c301d32445ec --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.2.1.jar.sha1 @@ -0,0 +1 @@ +ec08375a8392720cc378995d8234cd6138a735f6 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.2.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.2.0.jar.sha1 deleted file mode 100644 index 83c0a09eed763..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b0f748e15d3b6b8abbe654ba48ca7cbbebcfb98a \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.2.1.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..4833879967b8e --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.2.1.jar.sha1 @@ -0,0 +1 @@ +58305876f7fb0fbfad288910378cf4770da43892 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.2.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.2.0.jar.sha1 deleted file mode 100644 index b7453ece71681..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -547938ebce6a7ea4308c4753e28c39d09e4c7423 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.2.1.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..dc33291c7a3cb --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.2.1.jar.sha1 @@ -0,0 +1 @@ +51cf40e2606863840e52d7e8981314a5a0323e06 \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-7.2.0.jar.sha1 b/server/licenses/lucene-analyzers-common-7.2.0.jar.sha1 deleted file mode 100644 index 2ca17a5b5c1ab..0000000000000 --- a/server/licenses/lucene-analyzers-common-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4e1b4638fb8b07befc8175880641f821af3e655a \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-7.2.1.jar.sha1 b/server/licenses/lucene-analyzers-common-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..5ffdd6b7ba4cf --- /dev/null +++ b/server/licenses/lucene-analyzers-common-7.2.1.jar.sha1 @@ -0,0 +1 @@ +324c3a090a04136720f4ef612db03b5c14866efa \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-7.2.0.jar.sha1 b/server/licenses/lucene-backward-codecs-7.2.0.jar.sha1 deleted file mode 100644 index f53f41fd9f865..0000000000000 --- a/server/licenses/lucene-backward-codecs-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -35f5a26abb7fd466749fea7edfedae7897192e95 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-7.2.1.jar.sha1 b/server/licenses/lucene-backward-codecs-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..b166b97dd7c4d --- /dev/null +++ b/server/licenses/lucene-backward-codecs-7.2.1.jar.sha1 @@ -0,0 +1 @@ +bc8dc9cc1555543532953d1dff33b67f849e19f9 \ No newline at end of file diff --git a/server/licenses/lucene-core-7.2.0.jar.sha1 b/server/licenses/lucene-core-7.2.0.jar.sha1 deleted file mode 100644 index 41e1103ca2570..0000000000000 --- a/server/licenses/lucene-core-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f88107aa577ce8edc0a5cee036b485943107a552 \ No newline at end of file diff --git a/server/licenses/lucene-core-7.2.1.jar.sha1 b/server/licenses/lucene-core-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..e2fd2d7533737 --- /dev/null +++ b/server/licenses/lucene-core-7.2.1.jar.sha1 @@ -0,0 +1 @@ +91897dbbbbada95ccddbd90505f0a0ba6bf7c199 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-7.2.0.jar.sha1 b/server/licenses/lucene-grouping-7.2.0.jar.sha1 deleted file mode 100644 index 034534ffef35a..0000000000000 --- a/server/licenses/lucene-grouping-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1536a1a0fd24d0a8c03cfd45d00a52a88f9f52d1 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-7.2.1.jar.sha1 b/server/licenses/lucene-grouping-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..7537cd21bf326 --- /dev/null +++ b/server/licenses/lucene-grouping-7.2.1.jar.sha1 @@ -0,0 +1 @@ +5dbae570b1a4e54cd978fe5c3ed2d6b2f87be968 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-7.2.0.jar.sha1 b/server/licenses/lucene-highlighter-7.2.0.jar.sha1 deleted file mode 100644 index f13d7cc8489bf..0000000000000 --- a/server/licenses/lucene-highlighter-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -afd4093723520b0cdb59852018b545efeefd544a \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-7.2.1.jar.sha1 b/server/licenses/lucene-highlighter-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..38837afb0a623 --- /dev/null +++ b/server/licenses/lucene-highlighter-7.2.1.jar.sha1 @@ -0,0 +1 @@ +2f4b8c93563409cfebb36d910c4dab4910678689 \ No newline at end of file diff --git a/server/licenses/lucene-join-7.2.0.jar.sha1 b/server/licenses/lucene-join-7.2.0.jar.sha1 deleted file mode 100644 index 8cc521e31a007..0000000000000 --- a/server/licenses/lucene-join-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -16029d54fa9c99b3187b68791b182a1ea4f78e89 \ No newline at end of file diff --git a/server/licenses/lucene-join-7.2.1.jar.sha1 b/server/licenses/lucene-join-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..c2944aa323e2f --- /dev/null +++ b/server/licenses/lucene-join-7.2.1.jar.sha1 @@ -0,0 +1 @@ +3121a038d472f51087500dd6da9146a9b0031ae4 \ No newline at end of file diff --git a/server/licenses/lucene-memory-7.2.0.jar.sha1 b/server/licenses/lucene-memory-7.2.0.jar.sha1 deleted file mode 100644 index a267d12bd71ba..0000000000000 --- a/server/licenses/lucene-memory-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -32f26371224c595f625f061d67fc2edd9c8c836b \ No newline at end of file diff --git a/server/licenses/lucene-memory-7.2.1.jar.sha1 b/server/licenses/lucene-memory-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..543e123b2a733 --- /dev/null +++ b/server/licenses/lucene-memory-7.2.1.jar.sha1 @@ -0,0 +1 @@ +21233b2baeed2aaa5acf8359bf8c4a90cc6bf553 \ No newline at end of file diff --git a/server/licenses/lucene-misc-7.2.0.jar.sha1 b/server/licenses/lucene-misc-7.2.0.jar.sha1 deleted file mode 100644 index d378ea1ae2cc2..0000000000000 --- a/server/licenses/lucene-misc-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1067351bfca1fc72ece5cb4a4f219762b097de36 \ No newline at end of file diff --git a/server/licenses/lucene-misc-7.2.1.jar.sha1 b/server/licenses/lucene-misc-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..2a9f649d7d527 --- /dev/null +++ b/server/licenses/lucene-misc-7.2.1.jar.sha1 @@ -0,0 +1 @@ +0478fed6c474c95f6c0c678c04297a3df0c1687e \ No newline at end of file diff --git a/server/licenses/lucene-queries-7.2.0.jar.sha1 b/server/licenses/lucene-queries-7.2.0.jar.sha1 deleted file mode 100644 index 04b1048ee15dc..0000000000000 --- a/server/licenses/lucene-queries-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e0b41af59bc2baed0315abb04621d62e500d094a \ No newline at end of file diff --git a/server/licenses/lucene-queries-7.2.1.jar.sha1 b/server/licenses/lucene-queries-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..e0f2d575e8a2a --- /dev/null +++ b/server/licenses/lucene-queries-7.2.1.jar.sha1 @@ -0,0 +1 @@ +02135cf5047409ed1ca6cd098e802b30f9dbd1ff \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-7.2.0.jar.sha1 b/server/licenses/lucene-queryparser-7.2.0.jar.sha1 deleted file mode 100644 index bedb4fbd1448b..0000000000000 --- a/server/licenses/lucene-queryparser-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9a17128e35e5e924cf28c283415d83c7a8935e58 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-7.2.1.jar.sha1 b/server/licenses/lucene-queryparser-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..56c5dbfa18678 --- /dev/null +++ b/server/licenses/lucene-queryparser-7.2.1.jar.sha1 @@ -0,0 +1 @@ +a87d8b14d1c8045f61cb704955706f6681170be3 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-7.2.0.jar.sha1 b/server/licenses/lucene-sandbox-7.2.0.jar.sha1 deleted file mode 100644 index 62704a0258e92..0000000000000 --- a/server/licenses/lucene-sandbox-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1fa77169831ec17636357b55bd2c8ca5a97ec7a2 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-7.2.1.jar.sha1 b/server/licenses/lucene-sandbox-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..9445acbdd87d8 --- /dev/null +++ b/server/licenses/lucene-sandbox-7.2.1.jar.sha1 @@ -0,0 +1 @@ +dc8dd132fd183791dc27591a69974f55b685d0d7 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-7.2.0.jar.sha1 b/server/licenses/lucene-spatial-7.2.0.jar.sha1 deleted file mode 100644 index adcb3b8de7603..0000000000000 --- a/server/licenses/lucene-spatial-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -575f7507d526b2692ae461a4df349e90f048ec77 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-7.2.1.jar.sha1 b/server/licenses/lucene-spatial-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..8c1b3d01c2339 --- /dev/null +++ b/server/licenses/lucene-spatial-7.2.1.jar.sha1 @@ -0,0 +1 @@ +09c4d96e6ea34292f7cd20c4ff1d16ff31eb7869 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-7.2.0.jar.sha1 b/server/licenses/lucene-spatial-extras-7.2.0.jar.sha1 deleted file mode 100644 index b9c4e84c78eb0..0000000000000 --- a/server/licenses/lucene-spatial-extras-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8f6e31d08dc86bb3edeb6ef132f0920941735e15 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-7.2.1.jar.sha1 b/server/licenses/lucene-spatial-extras-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..50422956651d3 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-7.2.1.jar.sha1 @@ -0,0 +1 @@ +8aff7e8a5547c03d0c4e7e1b58cb30773bb1d7d5 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-7.2.0.jar.sha1 b/server/licenses/lucene-spatial3d-7.2.0.jar.sha1 deleted file mode 100644 index 225d318bcda9d..0000000000000 --- a/server/licenses/lucene-spatial3d-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f857630bfafde418e6e3cf748fe8d18f7b771a70 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-7.2.1.jar.sha1 b/server/licenses/lucene-spatial3d-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..85aae1cfdd053 --- /dev/null +++ b/server/licenses/lucene-spatial3d-7.2.1.jar.sha1 @@ -0,0 +1 @@ +8b0db8ff795b31994ebe93779c450d17c612590d \ No newline at end of file diff --git a/server/licenses/lucene-suggest-7.2.0.jar.sha1 b/server/licenses/lucene-suggest-7.2.0.jar.sha1 deleted file mode 100644 index f99189e7b9aae..0000000000000 --- a/server/licenses/lucene-suggest-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0409ce8d0d7e1203143b5be41aa6dd31d4c1bcf9 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-7.2.1.jar.sha1 b/server/licenses/lucene-suggest-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..e46240d1c6287 --- /dev/null +++ b/server/licenses/lucene-suggest-7.2.1.jar.sha1 @@ -0,0 +1 @@ +1c3804602e35589c21b0391fa7088ef012751a22 \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 09d98b75fe9d2..cfd8485f785f4 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -146,7 +146,7 @@ public class Version implements Comparable { public static final Version V_6_2_0 = new Version(V_6_2_0_ID, org.apache.lucene.util.Version.LUCENE_7_2_0); public static final int V_7_0_0_alpha1_ID = 7000001; public static final Version V_7_0_0_alpha1 = - new Version(V_7_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_2_0); + new Version(V_7_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); public static final Version CURRENT = V_7_0_0_alpha1; static { From 5973c2bf31e3e71ad684fc626fc0cd0c2442c546 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Mon, 15 Jan 2018 17:27:51 +0100 Subject: [PATCH 13/30] #28218: Update the Lucene version for 6.2.0 after backport --- server/src/main/java/org/elasticsearch/Version.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index cfd8485f785f4..8a4bc0752be3f 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -143,7 +143,7 @@ public class Version implements Comparable { public static final int V_6_1_2_ID = 6010299; public static final Version V_6_1_2 = new Version(V_6_1_2_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); public static final int V_6_2_0_ID = 6020099; - public static final Version V_6_2_0 = new Version(V_6_2_0_ID, org.apache.lucene.util.Version.LUCENE_7_2_0); + public static final Version V_6_2_0 = new Version(V_6_2_0_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); public static final int V_7_0_0_alpha1_ID = 7000001; public static final Version V_7_0_0_alpha1 = new Version(V_7_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); From 3895add2ca11ccb045e1557363682b48331ad8a6 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Mon, 15 Jan 2018 09:59:01 -0700 Subject: [PATCH 14/30] Introduce elasticsearch-core jar (#28191) This is related to #27933. It introduces a jar named elasticsearch-core in the lib directory. This commit moves the JarHell class from server to elasticsearch-core. Additionally, PathUtils and some of Loggers are moved as JarHell depends on them. --- build.gradle | 1 + client/rest/build.gradle | 1 + client/sniffer/build.gradle | 1 + client/test/build.gradle | 1 + libs/elasticsearch-core/build.gradle | 81 +++++++ .../licenses/log4j-api-2.9.1.jar.sha1 | 1 + .../licenses/log4j-api-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/log4j-api-NOTICE.txt | 5 + .../org/elasticsearch/bootstrap/JarHell.java | 19 +- .../elasticsearch/bootstrap/JavaVersion.java | 1 + .../common/SuppressForbidden.java | 0 .../elasticsearch/common/io/PathUtils.java | 0 .../common/logging/ESLoggerFactory.java | 9 - .../elasticsearch/common/logging/Loggers.java | 69 ++++++ .../common/logging/PrefixLogger.java | 2 +- .../elasticsearch/bootstrap/JarHellTests.java | 31 +-- .../bootstrap/duplicate-classes.jar | Bin .../bootstrap/duplicate-xmlbeans-classes.jar | Bin .../transport/netty4/ESLoggingHandlerIT.java | 5 +- .../logging/EvilLoggerConfigurationTests.java | 2 +- .../common/logging/EvilLoggerTests.java | 6 +- server/build.gradle | 2 + .../org/elasticsearch/action/bulk/Retry.java | 4 +- .../elasticsearch/bootstrap/Bootstrap.java | 11 +- .../common/component/AbstractComponent.java | 4 +- .../common/logging/LogConfigurator.java | 12 +- .../{Loggers.java => ServerLoggers.java} | 61 +----- .../common/settings/ClusterSettings.java | 18 +- .../common/settings/SettingsModule.java | 5 +- .../discovery/DiscoveryModule.java | 4 +- .../elasticsearch/env/NodeEnvironment.java | 8 +- .../index/AbstractIndexComponent.java | 4 +- .../index/CompositeIndexEventListener.java | 4 +- .../elasticsearch/index/IndexSettings.java | 4 +- .../elasticsearch/index/IndexingSlowLog.java | 6 +- .../elasticsearch/index/SearchSlowLog.java | 10 +- ...ElasticsearchConcurrentMergeScheduler.java | 4 +- .../elasticsearch/index/engine/Engine.java | 4 +- .../plain/DocValuesIndexFieldData.java | 2 - .../RandomScoreFunctionBuilder.java | 2 +- .../shard/AbstractIndexShardComponent.java | 4 +- .../index/similarity/SimilarityService.java | 1 - .../org/elasticsearch/index/store/Store.java | 4 +- .../recovery/RecoverySourceHandler.java | 4 +- .../indices/recovery/RecoveryTarget.java | 4 +- .../java/org/elasticsearch/node/Node.java | 4 +- .../bucket/terms/TermsAggregatorFactory.java | 4 +- .../bootstrap/MaxMapCountCheckTests.java | 10 +- .../cluster/allocation/ClusterRerouteIT.java | 9 +- .../metadata/TemplateUpgradeServiceIT.java | 4 +- .../ExpectedShardSizeAllocationTests.java | 1 - .../allocation/FailedNodeRoutingTests.java | 6 - .../allocation/RebalanceAfterActiveTests.java | 1 - .../service/ClusterApplierServiceTests.java | 10 +- .../cluster/service/MasterServiceTests.java | 9 +- .../common/settings/ScopedSettingsTests.java | 8 +- .../gateway/GatewayIndexStateIT.java | 3 - .../index/MergeSchedulerSettingsTests.java | 17 +- .../index/engine/InternalEngineTests.java | 21 +- settings.gradle | 1 + .../index/store/EsBaseDirectoryTestCase.java | 1 - .../org/elasticsearch/test/TestCluster.java | 1 - .../test/engine/MockEngineSupport.java | 1 - .../test/junit/listeners/LoggingListener.java | 5 +- .../elasticsearch/test/rest/yaml/Stash.java | 1 - .../test/store/MockFSIndexStore.java | 4 +- 66 files changed, 507 insertions(+), 237 deletions(-) create mode 100644 libs/elasticsearch-core/build.gradle create mode 100644 libs/elasticsearch-core/licenses/log4j-api-2.9.1.jar.sha1 create mode 100644 libs/elasticsearch-core/licenses/log4j-api-LICENSE.txt create mode 100644 libs/elasticsearch-core/licenses/log4j-api-NOTICE.txt rename {server => libs/elasticsearch-core}/src/main/java/org/elasticsearch/bootstrap/JarHell.java (94%) rename {server => libs/elasticsearch-core}/src/main/java/org/elasticsearch/bootstrap/JavaVersion.java (99%) rename {server => libs/elasticsearch-core}/src/main/java/org/elasticsearch/common/SuppressForbidden.java (100%) rename {server => libs/elasticsearch-core}/src/main/java/org/elasticsearch/common/io/PathUtils.java (100%) rename {server => libs/elasticsearch-core}/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java (80%) create mode 100644 libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/Loggers.java rename {server => libs/elasticsearch-core}/src/main/java/org/elasticsearch/common/logging/PrefixLogger.java (98%) rename {server => libs/elasticsearch-core}/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java (88%) rename {server => libs/elasticsearch-core}/src/test/resources/org/elasticsearch/bootstrap/duplicate-classes.jar (100%) rename {server => libs/elasticsearch-core}/src/test/resources/org/elasticsearch/bootstrap/duplicate-xmlbeans-classes.jar (100%) rename server/src/main/java/org/elasticsearch/common/logging/{Loggers.java => ServerLoggers.java} (76%) diff --git a/build.gradle b/build.gradle index f7936d5efed71..bb789b7c18b5c 100644 --- a/build.gradle +++ b/build.gradle @@ -183,6 +183,7 @@ subprojects { "org.elasticsearch:rest-api-spec:${version}": ':rest-api-spec', "org.elasticsearch:elasticsearch:${version}": ':server', "org.elasticsearch:elasticsearch-cli:${version}": ':server:cli', + "org.elasticsearch:elasticsearch-core:${version}": ':libs:elasticsearch-core', "org.elasticsearch:elasticsearch-nio:${version}": ':libs:elasticsearch-nio', "org.elasticsearch.client:elasticsearch-rest-client:${version}": ':client:rest', "org.elasticsearch.client:elasticsearch-rest-client-sniffer:${version}": ':client:sniffer', diff --git a/client/rest/build.gradle b/client/rest/build.gradle index 1c7e86f799f61..8e0f179634a27 100644 --- a/client/rest/build.gradle +++ b/client/rest/build.gradle @@ -72,6 +72,7 @@ forbiddenApisTest { } // JarHell is part of es server, which we don't want to pull in +// TODO: Not anymore. Now in elasticsearch-core jarHell.enabled=false namingConventions { diff --git a/client/sniffer/build.gradle b/client/sniffer/build.gradle index bcde806f4df16..03e4a082d274c 100644 --- a/client/sniffer/build.gradle +++ b/client/sniffer/build.gradle @@ -75,6 +75,7 @@ dependencyLicenses { } // JarHell is part of es server, which we don't want to pull in +// TODO: Not anymore. Now in elasticsearch-core jarHell.enabled=false namingConventions { diff --git a/client/test/build.gradle b/client/test/build.gradle index ccc7be81466a4..fd5777cc8df3f 100644 --- a/client/test/build.gradle +++ b/client/test/build.gradle @@ -49,6 +49,7 @@ forbiddenApisTest { } // JarHell is part of es server, which we don't want to pull in +// TODO: Not anymore. Now in elasticsearch-core jarHell.enabled=false // TODO: should we have licenses for our test deps? diff --git a/libs/elasticsearch-core/build.gradle b/libs/elasticsearch-core/build.gradle new file mode 100644 index 0000000000000..4cbee03649bb7 --- /dev/null +++ b/libs/elasticsearch-core/build.gradle @@ -0,0 +1,81 @@ +import org.elasticsearch.gradle.precommit.PrecommitTasks + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +apply plugin: 'elasticsearch.build' +apply plugin: 'nebula.optional-base' +apply plugin: 'nebula.maven-base-publish' +apply plugin: 'nebula.maven-scm' + +archivesBaseName = 'elasticsearch-core' + +publishing { + publications { + nebula { + artifactId = archivesBaseName + } + } +} + +dependencies { + compile "org.apache.logging.log4j:log4j-api:${versions.log4j}" + + testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" + testCompile "junit:junit:${versions.junit}" + testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" + + if (isEclipse == false || project.path == ":libs:elasticsearch-core-tests") { + testCompile("org.elasticsearch.test:framework:${version}") { + exclude group: 'org.elasticsearch', module: 'elasticsearch-core' + } + } +} + +forbiddenApisMain { + // elasticsearch-core does not depend on server + // TODO: Need to decide how we want to handle for forbidden signatures with the changes to core + signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] +} + +if (isEclipse) { + // in eclipse the project is under a fake root, we need to change around the source sets + sourceSets { + if (project.path == ":libs:elasticsearch-core") { + main.java.srcDirs = ['java'] + main.resources.srcDirs = ['resources'] + } else { + test.java.srcDirs = ['java'] + test.resources.srcDirs = ['resources'] + } + } +} + +thirdPartyAudit.excludes = [ + // from log4j + 'org/osgi/framework/AdaptPermission', + 'org/osgi/framework/AdminPermission', + 'org/osgi/framework/Bundle', + 'org/osgi/framework/BundleActivator', + 'org/osgi/framework/BundleContext', + 'org/osgi/framework/BundleEvent', + 'org/osgi/framework/SynchronousBundleListener', + 'org/osgi/framework/wiring/BundleWire', + 'org/osgi/framework/wiring/BundleWiring' +] \ No newline at end of file diff --git a/libs/elasticsearch-core/licenses/log4j-api-2.9.1.jar.sha1 b/libs/elasticsearch-core/licenses/log4j-api-2.9.1.jar.sha1 new file mode 100644 index 0000000000000..e1a89fadfed95 --- /dev/null +++ b/libs/elasticsearch-core/licenses/log4j-api-2.9.1.jar.sha1 @@ -0,0 +1 @@ +7a2999229464e7a324aa503c0a52ec0f05efe7bd \ No newline at end of file diff --git a/libs/elasticsearch-core/licenses/log4j-api-LICENSE.txt b/libs/elasticsearch-core/licenses/log4j-api-LICENSE.txt new file mode 100644 index 0000000000000..6279e5206de13 --- /dev/null +++ b/libs/elasticsearch-core/licenses/log4j-api-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 1999-2005 The Apache Software Foundation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/libs/elasticsearch-core/licenses/log4j-api-NOTICE.txt b/libs/elasticsearch-core/licenses/log4j-api-NOTICE.txt new file mode 100644 index 0000000000000..0375732360047 --- /dev/null +++ b/libs/elasticsearch-core/licenses/log4j-api-NOTICE.txt @@ -0,0 +1,5 @@ +Apache log4j +Copyright 2007 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/bootstrap/JarHell.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/bootstrap/JarHell.java similarity index 94% rename from server/src/main/java/org/elasticsearch/bootstrap/JarHell.java rename to libs/elasticsearch-core/src/main/java/org/elasticsearch/bootstrap/JarHell.java index 1959e5e81394b..0e5c9597b7ec8 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/JarHell.java +++ b/libs/elasticsearch-core/src/main/java/org/elasticsearch/bootstrap/JarHell.java @@ -20,7 +20,6 @@ package org.elasticsearch.bootstrap; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.logging.Loggers; @@ -120,7 +119,8 @@ static Set parseClassPath(String classPath) { // } // Instead we just throw an exception, and keep it clean. if (element.isEmpty()) { - throw new IllegalStateException("Classpath should not contain empty elements! (outdated shell script from a previous version?) classpath='" + classPath + "'"); + throw new IllegalStateException("Classpath should not contain empty elements! (outdated shell script from a previous" + + " version?) classpath='" + classPath + "'"); } // we should be able to just Paths.get() each element, but unfortunately this is not the // whole story on how classpath parsing works: if you want to know, start at sun.misc.Launcher, @@ -215,21 +215,13 @@ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IO } /** inspect manifest for sure incompatibilities */ - static void checkManifest(Manifest manifest, Path jar) { + private static void checkManifest(Manifest manifest, Path jar) { // give a nice error if jar requires a newer java version String targetVersion = manifest.getMainAttributes().getValue("X-Compile-Target-JDK"); if (targetVersion != null) { checkVersionFormat(targetVersion); checkJavaVersion(jar.toString(), targetVersion); } - - // give a nice error if jar is compiled against different es version - String systemESVersion = Version.CURRENT.toString(); - String targetESVersion = manifest.getMainAttributes().getValue("X-Compile-Elasticsearch-Version"); - if (targetESVersion != null && targetESVersion.equals(systemESVersion) == false) { - throw new IllegalStateException(jar + " requires Elasticsearch " + targetESVersion - + ", your system: " + systemESVersion); - } } public static void checkVersionFormat(String targetVersion) { @@ -237,7 +229,8 @@ public static void checkVersionFormat(String targetVersion) { throw new IllegalStateException( String.format( Locale.ROOT, - "version string must be a sequence of nonnegative decimal integers separated by \".\"'s and may have leading zeros but was %s", + "version string must be a sequence of nonnegative decimal integers separated by \".\"'s and may have " + + "leading zeros but was %s", targetVersion ) ); @@ -263,7 +256,7 @@ public static void checkJavaVersion(String resource, String targetVersion) { } } - static void checkClass(Map clazzes, String clazz, Path jarpath) { + private static void checkClass(Map clazzes, String clazz, Path jarpath) { Path previous = clazzes.put(clazz, jarpath); if (previous != null) { if (previous.equals(jarpath)) { diff --git a/server/src/main/java/org/elasticsearch/bootstrap/JavaVersion.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/bootstrap/JavaVersion.java similarity index 99% rename from server/src/main/java/org/elasticsearch/bootstrap/JavaVersion.java rename to libs/elasticsearch-core/src/main/java/org/elasticsearch/bootstrap/JavaVersion.java index 03722e03060a7..f22087c6e7d8d 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/JavaVersion.java +++ b/libs/elasticsearch-core/src/main/java/org/elasticsearch/bootstrap/JavaVersion.java @@ -26,6 +26,7 @@ import java.util.stream.Collectors; public class JavaVersion implements Comparable { + private final List version; public List getVersion() { diff --git a/server/src/main/java/org/elasticsearch/common/SuppressForbidden.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/SuppressForbidden.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/SuppressForbidden.java rename to libs/elasticsearch-core/src/main/java/org/elasticsearch/common/SuppressForbidden.java diff --git a/server/src/main/java/org/elasticsearch/common/io/PathUtils.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/io/PathUtils.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/io/PathUtils.java rename to libs/elasticsearch-core/src/main/java/org/elasticsearch/common/io/PathUtils.java diff --git a/server/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java similarity index 80% rename from server/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java rename to libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java index d8f2ebe9be843..44d7d17b59325 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java +++ b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java @@ -19,12 +19,9 @@ package org.elasticsearch.common.logging; -import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.spi.ExtendedLogger; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Property; /** * Factory to get {@link Logger}s @@ -35,12 +32,6 @@ private ESLoggerFactory() { } - public static final Setting LOG_DEFAULT_LEVEL_SETTING = - new Setting<>("logger.level", Level.INFO.name(), Level::valueOf, Property.NodeScope); - public static final Setting.AffixSetting LOG_LEVEL_SETTING = - Setting.prefixKeySetting("logger.", (key) -> new Setting<>(key, Level.INFO.name(), Level::valueOf, Property.Dynamic, - Property.NodeScope)); - public static Logger getLogger(String prefix, String name) { return getLogger(prefix, LogManager.getLogger(name)); } diff --git a/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/Loggers.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/Loggers.java new file mode 100644 index 0000000000000..89073bdce54c4 --- /dev/null +++ b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/Loggers.java @@ -0,0 +1,69 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.apache.logging.log4j.Logger; + +public class Loggers { + + public static final String SPACE = " "; + + public static Logger getLogger(Logger parentLogger, String s) { + assert parentLogger instanceof PrefixLogger; + return ESLoggerFactory.getLogger(((PrefixLogger)parentLogger).prefix(), parentLogger.getName() + s); + } + + public static Logger getLogger(String s) { + return ESLoggerFactory.getLogger(s); + } + + public static Logger getLogger(Class clazz) { + return ESLoggerFactory.getLogger(clazz); + } + + public static Logger getLogger(Class clazz, String... prefixes) { + return ESLoggerFactory.getLogger(formatPrefix(prefixes), clazz); + } + + public static Logger getLogger(String name, String... prefixes) { + return ESLoggerFactory.getLogger(formatPrefix(prefixes), name); + } + + private static String formatPrefix(String... prefixes) { + String prefix = null; + if (prefixes != null && prefixes.length > 0) { + StringBuilder sb = new StringBuilder(); + for (String prefixX : prefixes) { + if (prefixX != null) { + if (prefixX.equals(SPACE)) { + sb.append(" "); + } else { + sb.append("[").append(prefixX).append("]"); + } + } + } + if (sb.length() > 0) { + sb.append(" "); + prefix = sb.toString(); + } + } + return prefix; + } +} diff --git a/server/src/main/java/org/elasticsearch/common/logging/PrefixLogger.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/PrefixLogger.java similarity index 98% rename from server/src/main/java/org/elasticsearch/common/logging/PrefixLogger.java rename to libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/PrefixLogger.java index a78330c3e8564..b24e839690366 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/PrefixLogger.java +++ b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/PrefixLogger.java @@ -32,7 +32,7 @@ * A logger that prefixes all messages with a fixed prefix specified during construction. The prefix mechanism uses the marker construct, so * for the prefixes to appear, the logging layout pattern must include the marker in its pattern. */ -class PrefixLogger extends ExtendedLoggerWrapper { +public class PrefixLogger extends ExtendedLoggerWrapper { /* * We can not use the built-in Marker tracking (MarkerManager) because the MarkerManager holds a permanent reference to the marker; diff --git a/server/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java b/libs/elasticsearch-core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java similarity index 88% rename from server/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java rename to libs/elasticsearch-core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java index 7003ef3d81efe..b3dee0b004584 100644 --- a/server/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java +++ b/libs/elasticsearch-core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.bootstrap; -import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.test.ESTestCase; @@ -164,7 +163,8 @@ public void testBadJDKVersionInJar() throws Exception { JarHell.checkJarHell(jars); fail("did not get expected exception"); } catch (IllegalStateException e) { - assertTrue(e.getMessage().equals("version string must be a sequence of nonnegative decimal integers separated by \".\"'s and may have leading zeros but was bogus")); + assertTrue(e.getMessage().equals("version string must be a sequence of nonnegative decimal integers separated " + + "by \".\"'s and may have leading zeros but was bogus")); } } @@ -178,33 +178,6 @@ public void testRequiredJDKVersionIsOK() throws Exception { JarHell.checkJarHell(jars); } - /** make sure if a plugin is compiled against the same ES version, it works */ - public void testGoodESVersionInJar() throws Exception { - Path dir = createTempDir(); - Manifest manifest = new Manifest(); - Attributes attributes = manifest.getMainAttributes(); - attributes.put(Attributes.Name.MANIFEST_VERSION, "1.0.0"); - attributes.put(new Attributes.Name("X-Compile-Elasticsearch-Version"), Version.CURRENT.toString()); - Set jars = Collections.singleton(makeJar(dir, "foo.jar", manifest, "Foo.class")); - JarHell.checkJarHell(jars); - } - - /** make sure if a plugin is compiled against a different ES version, it fails */ - public void testBadESVersionInJar() throws Exception { - Path dir = createTempDir(); - Manifest manifest = new Manifest(); - Attributes attributes = manifest.getMainAttributes(); - attributes.put(Attributes.Name.MANIFEST_VERSION, "1.0.0"); - attributes.put(new Attributes.Name("X-Compile-Elasticsearch-Version"), "1.0-bogus"); - Set jars = Collections.singleton(makeJar(dir, "foo.jar", manifest, "Foo.class")); - try { - JarHell.checkJarHell(jars); - fail("did not get expected exception"); - } catch (IllegalStateException e) { - assertTrue(e.getMessage().contains("requires Elasticsearch 1.0-bogus")); - } - } - public void testValidVersions() { String[] versions = new String[]{"1.7", "1.7.0", "0.1.7", "1.7.0.80"}; for (String version : versions) { diff --git a/server/src/test/resources/org/elasticsearch/bootstrap/duplicate-classes.jar b/libs/elasticsearch-core/src/test/resources/org/elasticsearch/bootstrap/duplicate-classes.jar similarity index 100% rename from server/src/test/resources/org/elasticsearch/bootstrap/duplicate-classes.jar rename to libs/elasticsearch-core/src/test/resources/org/elasticsearch/bootstrap/duplicate-classes.jar diff --git a/server/src/test/resources/org/elasticsearch/bootstrap/duplicate-xmlbeans-classes.jar b/libs/elasticsearch-core/src/test/resources/org/elasticsearch/bootstrap/duplicate-xmlbeans-classes.jar similarity index 100% rename from server/src/test/resources/org/elasticsearch/bootstrap/duplicate-xmlbeans-classes.jar rename to libs/elasticsearch-core/src/test/resources/org/elasticsearch/bootstrap/duplicate-xmlbeans-classes.jar diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java index acd71749e2333..67368cb577a81 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java @@ -22,6 +22,7 @@ import org.apache.logging.log4j.Level; import org.elasticsearch.ESNetty4IntegTestCase; import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequest; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.MockLogAppender; @@ -36,12 +37,12 @@ public class ESLoggingHandlerIT extends ESNetty4IntegTestCase { public void setUp() throws Exception { super.setUp(); appender = new MockLogAppender(); - Loggers.addAppender(Loggers.getLogger(ESLoggingHandler.class), appender); + ServerLoggers.addAppender(Loggers.getLogger(ESLoggingHandler.class), appender); appender.start(); } public void tearDown() throws Exception { - Loggers.removeAppender(Loggers.getLogger(ESLoggingHandler.class), appender); + ServerLoggers.removeAppender(Loggers.getLogger(ESLoggingHandler.class), appender); appender.stop(); super.tearDown(); } diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java index f53c9d3b1f5e7..8dab47bd1ceee 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java @@ -138,7 +138,7 @@ public void testHierarchy() throws Exception { assertThat(ESLoggerFactory.getLogger("x.y").getLevel(), equalTo(Level.DEBUG)); final Level level = randomFrom(Level.TRACE, Level.DEBUG, Level.INFO, Level.WARN, Level.ERROR); - Loggers.setLevel(ESLoggerFactory.getLogger("x"), level); + ServerLoggers.setLevel(ESLoggerFactory.getLogger("x"), level); assertThat(ESLoggerFactory.getLogger("x").getLevel(), equalTo(level)); assertThat(ESLoggerFactory.getLogger("x.y").getLevel(), equalTo(level)); diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java index d4bc754689e68..55e359697eb15 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java @@ -285,12 +285,12 @@ public void testFindAppender() throws IOException, UserException { final Logger hasConsoleAppender = ESLoggerFactory.getLogger("has_console_appender"); - final Appender testLoggerConsoleAppender = Loggers.findAppender(hasConsoleAppender, ConsoleAppender.class); + final Appender testLoggerConsoleAppender = ServerLoggers.findAppender(hasConsoleAppender, ConsoleAppender.class); assertNotNull(testLoggerConsoleAppender); assertThat(testLoggerConsoleAppender.getName(), equalTo("console")); final Logger hasCountingNoOpAppender = ESLoggerFactory.getLogger("has_counting_no_op_appender"); - assertNull(Loggers.findAppender(hasCountingNoOpAppender, ConsoleAppender.class)); - final Appender countingNoOpAppender = Loggers.findAppender(hasCountingNoOpAppender, CountingNoOpAppender.class); + assertNull(ServerLoggers.findAppender(hasCountingNoOpAppender, ConsoleAppender.class)); + final Appender countingNoOpAppender = ServerLoggers.findAppender(hasCountingNoOpAppender, CountingNoOpAppender.class); assertThat(countingNoOpAppender.getName(), equalTo("counting_no_op")); } diff --git a/server/build.gradle b/server/build.gradle index 20693a30c0cec..4f69c2ee159b5 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -38,6 +38,8 @@ archivesBaseName = 'elasticsearch' dependencies { + compile "org.elasticsearch:elasticsearch-core:${version}" + compileOnly project(':libs:plugin-classloader') testRuntime project(':libs:plugin-classloader') diff --git a/server/src/main/java/org/elasticsearch/action/bulk/Retry.java b/server/src/main/java/org/elasticsearch/action/bulk/Retry.java index 9985d23b9badb..b173fc074bd82 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/Retry.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/Retry.java @@ -22,7 +22,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.FutureUtils; @@ -102,7 +102,7 @@ static class RetryHandler implements ActionListener { this.backoff = backoffPolicy.iterator(); this.consumer = consumer; this.listener = listener; - this.logger = Loggers.getLogger(getClass(), settings); + this.logger = ServerLoggers.getLogger(getClass(), settings); this.scheduler = scheduler; // in contrast to System.currentTimeMillis(), nanoTime() uses a monotonic clock under the hood this.startTimestampNanos = System.nanoTime(); diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index 0a3d7f675c234..2f86489bce39f 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.inject.CreationException; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.logging.LogConfigurator; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.IfConfig; import org.elasticsearch.common.settings.KeyStoreWrapper; @@ -300,9 +301,9 @@ static void init( try { if (closeStandardStreams) { final Logger rootLogger = ESLoggerFactory.getRootLogger(); - final Appender maybeConsoleAppender = Loggers.findAppender(rootLogger, ConsoleAppender.class); + final Appender maybeConsoleAppender = ServerLoggers.findAppender(rootLogger, ConsoleAppender.class); if (maybeConsoleAppender != null) { - Loggers.removeAppender(rootLogger, maybeConsoleAppender); + ServerLoggers.removeAppender(rootLogger, maybeConsoleAppender); } closeSystOut(); } @@ -333,9 +334,9 @@ static void init( } catch (NodeValidationException | RuntimeException e) { // disable console logging, so user does not see the exception twice (jvm will show it already) final Logger rootLogger = ESLoggerFactory.getRootLogger(); - final Appender maybeConsoleAppender = Loggers.findAppender(rootLogger, ConsoleAppender.class); + final Appender maybeConsoleAppender = ServerLoggers.findAppender(rootLogger, ConsoleAppender.class); if (foreground && maybeConsoleAppender != null) { - Loggers.removeAppender(rootLogger, maybeConsoleAppender); + ServerLoggers.removeAppender(rootLogger, maybeConsoleAppender); } Logger logger = Loggers.getLogger(Bootstrap.class); if (INSTANCE.node != null) { @@ -368,7 +369,7 @@ static void init( } // re-enable it if appropriate, so they can see any logging during the shutdown process if (foreground && maybeConsoleAppender != null) { - Loggers.addAppender(rootLogger, maybeConsoleAppender); + ServerLoggers.addAppender(rootLogger, maybeConsoleAppender); } throw e; diff --git a/server/src/main/java/org/elasticsearch/common/component/AbstractComponent.java b/server/src/main/java/org/elasticsearch/common/component/AbstractComponent.java index 8cb51f2b06b0e..f335a754f3771 100644 --- a/server/src/main/java/org/elasticsearch/common/component/AbstractComponent.java +++ b/server/src/main/java/org/elasticsearch/common/component/AbstractComponent.java @@ -23,7 +23,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.node.Node; @@ -34,7 +34,7 @@ public abstract class AbstractComponent { protected final Settings settings; public AbstractComponent(Settings settings) { - this.logger = Loggers.getLogger(getClass(), settings); + this.logger = ServerLoggers.getLogger(getClass(), settings); this.deprecationLogger = new DeprecationLogger(logger); this.settings = settings; } diff --git a/server/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java b/server/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java index b97fc13e73038..b38c3d3bdd78e 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java +++ b/server/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java @@ -177,15 +177,15 @@ private static void configureStatusLogger() { * @param settings the settings from which logger levels will be extracted */ private static void configureLoggerLevels(final Settings settings) { - if (ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.exists(settings)) { - final Level level = ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.get(settings); - Loggers.setLevel(ESLoggerFactory.getRootLogger(), level); + if (ServerLoggers.LOG_DEFAULT_LEVEL_SETTING.exists(settings)) { + final Level level = ServerLoggers.LOG_DEFAULT_LEVEL_SETTING.get(settings); + ServerLoggers.setLevel(ESLoggerFactory.getRootLogger(), level); } - ESLoggerFactory.LOG_LEVEL_SETTING.getAllConcreteSettings(settings) + ServerLoggers.LOG_LEVEL_SETTING.getAllConcreteSettings(settings) // do not set a log level for a logger named level (from the default log setting) - .filter(s -> s.getKey().equals(ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.getKey()) == false).forEach(s -> { + .filter(s -> s.getKey().equals(ServerLoggers.LOG_DEFAULT_LEVEL_SETTING.getKey()) == false).forEach(s -> { final Level level = s.get(settings); - Loggers.setLevel(ESLoggerFactory.getLogger(s.getKey().substring("logger.".length())), level); + ServerLoggers.setLevel(ESLoggerFactory.getLogger(s.getKey().substring("logger.".length())), level); }); } diff --git a/server/src/main/java/org/elasticsearch/common/logging/Loggers.java b/server/src/main/java/org/elasticsearch/common/logging/ServerLoggers.java similarity index 76% rename from server/src/main/java/org/elasticsearch/common/logging/Loggers.java rename to server/src/main/java/org/elasticsearch/common/logging/ServerLoggers.java index 812a0b70f2877..99049c53d1637 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/Loggers.java +++ b/server/src/main/java/org/elasticsearch/common/logging/ServerLoggers.java @@ -27,28 +27,29 @@ import org.apache.logging.log4j.core.config.Configuration; import org.apache.logging.log4j.core.config.Configurator; import org.apache.logging.log4j.core.config.LoggerConfig; -import org.apache.logging.log4j.message.MessageFactory; -import org.elasticsearch.common.Classes; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.node.Node; import java.util.ArrayList; -import java.util.Collection; import java.util.List; import java.util.Map; import static java.util.Arrays.asList; -import static javax.security.auth.login.Configuration.getConfiguration; import static org.elasticsearch.common.util.CollectionUtils.asArrayList; /** * A set of utilities around Logging. */ -public class Loggers { +public class ServerLoggers { - public static final String SPACE = " "; + public static final Setting LOG_DEFAULT_LEVEL_SETTING = + new Setting<>("logger.level", Level.INFO.name(), Level::valueOf, Setting.Property.NodeScope); + public static final Setting.AffixSetting LOG_LEVEL_SETTING = + Setting.prefixKeySetting("logger.", (key) -> new Setting<>(key, Level.INFO.name(), Level::valueOf, Setting.Property.Dynamic, + Setting.Property.NodeScope)); public static Logger getLogger(Class clazz, Settings settings, ShardId shardId, String... prefixes) { return getLogger(clazz, settings, shardId.getIndex(), asArrayList(Integer.toString(shardId.id()), prefixes).toArray(new String[0])); @@ -64,17 +65,17 @@ public static Logger getLogger(String loggerName, Settings settings, ShardId sha } public static Logger getLogger(Class clazz, Settings settings, Index index, String... prefixes) { - return getLogger(clazz, settings, asArrayList(SPACE, index.getName(), prefixes).toArray(new String[0])); + return getLogger(clazz, settings, asArrayList(Loggers.SPACE, index.getName(), prefixes).toArray(new String[0])); } public static Logger getLogger(Class clazz, Settings settings, String... prefixes) { final List prefixesList = prefixesList(settings, prefixes); - return getLogger(clazz, prefixesList.toArray(new String[prefixesList.size()])); + return Loggers.getLogger(clazz, prefixesList.toArray(new String[prefixesList.size()])); } public static Logger getLogger(String loggerName, Settings settings, String... prefixes) { final List prefixesList = prefixesList(settings, prefixes); - return getLogger(loggerName, prefixesList.toArray(new String[prefixesList.size()])); + return Loggers.getLogger(loggerName, prefixesList.toArray(new String[prefixesList.size()])); } private static List prefixesList(Settings settings, String... prefixes) { @@ -88,48 +89,6 @@ private static List prefixesList(Settings settings, String... prefixes) return prefixesList; } - public static Logger getLogger(Logger parentLogger, String s) { - assert parentLogger instanceof PrefixLogger; - return ESLoggerFactory.getLogger(((PrefixLogger)parentLogger).prefix(), parentLogger.getName() + s); - } - - public static Logger getLogger(String s) { - return ESLoggerFactory.getLogger(s); - } - - public static Logger getLogger(Class clazz) { - return ESLoggerFactory.getLogger(clazz); - } - - public static Logger getLogger(Class clazz, String... prefixes) { - return ESLoggerFactory.getLogger(formatPrefix(prefixes), clazz); - } - - public static Logger getLogger(String name, String... prefixes) { - return ESLoggerFactory.getLogger(formatPrefix(prefixes), name); - } - - private static String formatPrefix(String... prefixes) { - String prefix = null; - if (prefixes != null && prefixes.length > 0) { - StringBuilder sb = new StringBuilder(); - for (String prefixX : prefixes) { - if (prefixX != null) { - if (prefixX.equals(SPACE)) { - sb.append(" "); - } else { - sb.append("[").append(prefixX).append("]"); - } - } - } - if (sb.length() > 0) { - sb.append(" "); - prefix = sb.toString(); - } - } - return prefix; - } - /** * Set the level of the logger. If the new level is null, the logger will inherit it's level from its nearest ancestor with a non-null * level. diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index db8dd461dd737..aec14415db3fc 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -46,7 +46,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.logging.ESLoggerFactory; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting.Property; @@ -111,7 +111,7 @@ public ClusterSettings(Settings nodeSettings, Set> settingsSet) { } private static final class LoggingSettingUpdater implements SettingUpdater { - final Predicate loggerPredicate = ESLoggerFactory.LOG_LEVEL_SETTING::match; + final Predicate loggerPredicate = ServerLoggers.LOG_LEVEL_SETTING::match; private final Settings settings; LoggingSettingUpdater(Settings settings) { @@ -129,10 +129,10 @@ public Settings getValue(Settings current, Settings previous) { builder.put(current.filter(loggerPredicate)); for (String key : previous.keySet()) { if (loggerPredicate.test(key) && builder.keys().contains(key) == false) { - if (ESLoggerFactory.LOG_LEVEL_SETTING.getConcreteSetting(key).exists(settings) == false) { + if (ServerLoggers.LOG_LEVEL_SETTING.getConcreteSetting(key).exists(settings) == false) { builder.putNull(key); } else { - builder.put(key, ESLoggerFactory.LOG_LEVEL_SETTING.getConcreteSetting(key).get(settings).toString()); + builder.put(key, ServerLoggers.LOG_LEVEL_SETTING.getConcreteSetting(key).get(settings).toString()); } } } @@ -150,12 +150,12 @@ public void apply(Settings value, Settings current, Settings previous) { if ("_root".equals(component)) { final String rootLevel = value.get(key); if (rootLevel == null) { - Loggers.setLevel(ESLoggerFactory.getRootLogger(), ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.get(settings)); + ServerLoggers.setLevel(ESLoggerFactory.getRootLogger(), ServerLoggers.LOG_DEFAULT_LEVEL_SETTING.get(settings)); } else { - Loggers.setLevel(ESLoggerFactory.getRootLogger(), rootLevel); + ServerLoggers.setLevel(ESLoggerFactory.getRootLogger(), rootLevel); } } else { - Loggers.setLevel(ESLoggerFactory.getLogger(component), value.get(key)); + ServerLoggers.setLevel(ESLoggerFactory.getLogger(component), value.get(key)); } } } @@ -379,8 +379,8 @@ public void apply(Settings value, Settings current, Settings previous) { ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING, EsExecutors.PROCESSORS_SETTING, ThreadContext.DEFAULT_HEADERS_SETTING, - ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING, - ESLoggerFactory.LOG_LEVEL_SETTING, + ServerLoggers.LOG_DEFAULT_LEVEL_SETTING, + ServerLoggers.LOG_LEVEL_SETTING, NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING, NodeEnvironment.ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING, OsService.REFRESH_INTERVAL_SETTING, diff --git a/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java b/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java index 0304b20e992e5..20253f7876880 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java +++ b/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java @@ -22,7 +22,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.inject.Binder; import org.elasticsearch.common.inject.Module; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; @@ -35,7 +35,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.function.Predicate; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -58,7 +57,7 @@ public SettingsModule(Settings settings, Setting... additionalSettings) { } public SettingsModule(Settings settings, List> additionalSettings, List settingsFilter) { - logger = Loggers.getLogger(getClass(), settings); + logger = ServerLoggers.getLogger(getClass(), settings); this.settings = settings; for (Setting setting : ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) { registerSetting(setting); diff --git a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java index 179692cd516c8..b2602e8f2c596 100644 --- a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java +++ b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java @@ -25,7 +25,7 @@ import org.elasticsearch.cluster.service.ClusterApplier; import org.elasticsearch.cluster.service.MasterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; @@ -109,7 +109,7 @@ public DiscoveryModule(Settings settings, ThreadPool threadPool, TransportServic if (discoverySupplier == null) { throw new IllegalArgumentException("Unknown discovery type [" + discoveryType + "]"); } - Loggers.getLogger(getClass(), settings).info("using discovery type [{}]", discoveryType); + ServerLoggers.getLogger(getClass(), settings).info("using discovery type [{}]", discoveryType); discovery = Objects.requireNonNull(discoverySupplier.get()); } diff --git a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 172e3687e3931..ecf0b31934c26 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -38,7 +38,7 @@ import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -182,7 +182,7 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce locks = null; nodeLockId = -1; nodeMetaData = new NodeMetaData(generateNodeId(settings)); - logger = Loggers.getLogger(getClass(), Node.addNodeNameIfNeeded(settings, this.nodeMetaData.nodeId())); + logger = ServerLoggers.getLogger(getClass(), Node.addNodeNameIfNeeded(settings, this.nodeMetaData.nodeId())); return; } final NodePath[] nodePaths = new NodePath[environment.dataWithClusterFiles().length]; @@ -190,7 +190,7 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce boolean success = false; // trace logger to debug issues before the default node name is derived from the node id - Logger startupTraceLogger = Loggers.getLogger(getClass(), settings); + Logger startupTraceLogger = ServerLoggers.getLogger(getClass(), settings); try { sharedDataPath = environment.sharedDataFile(); @@ -244,7 +244,7 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce throw new IllegalStateException(message, lastException); } this.nodeMetaData = loadOrCreateNodeMetaData(settings, startupTraceLogger, nodePaths); - this.logger = Loggers.getLogger(getClass(), Node.addNodeNameIfNeeded(settings, this.nodeMetaData.nodeId())); + this.logger = ServerLoggers.getLogger(getClass(), Node.addNodeNameIfNeeded(settings, this.nodeMetaData.nodeId())); this.nodeLockId = nodeLockId; this.locks = locks; diff --git a/server/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java b/server/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java index 25acdd06b44a6..ce13c12c8496f 100644 --- a/server/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java +++ b/server/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java @@ -21,7 +21,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; public abstract class AbstractIndexComponent implements IndexComponent { @@ -33,7 +33,7 @@ public abstract class AbstractIndexComponent implements IndexComponent { * Constructs a new index component, with the index name and its settings. */ protected AbstractIndexComponent(IndexSettings indexSettings) { - this.logger = Loggers.getLogger(getClass(), indexSettings.getSettings(), indexSettings.getIndex()); + this.logger = ServerLoggers.getLogger(getClass(), indexSettings.getSettings(), indexSettings.getIndex()); this.deprecationLogger = new DeprecationLogger(logger); this.indexSettings = indexSettings; } diff --git a/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java b/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java index 90d8a205e8b57..e50ddd8e3966c 100644 --- a/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java +++ b/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java @@ -24,7 +24,7 @@ import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; @@ -52,7 +52,7 @@ final class CompositeIndexEventListener implements IndexEventListener { } } this.listeners = Collections.unmodifiableList(new ArrayList<>(listeners)); - this.logger = Loggers.getLogger(getClass(), indexSettings.getSettings(), indexSettings.getIndex()); + this.logger = ServerLoggers.getLogger(getClass(), indexSettings.getSettings(), indexSettings.getIndex()); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index 8fc23e79d0557..5baca022a216a 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -22,7 +22,7 @@ import org.apache.lucene.index.MergePolicy; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -374,7 +374,7 @@ public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSetti this.settings = Settings.builder().put(nodeSettings).put(indexMetaData.getSettings()).build(); this.index = indexMetaData.getIndex(); version = Version.indexCreated(settings); - logger = Loggers.getLogger(getClass(), settings, index); + logger = ServerLoggers.getLogger(getClass(), settings, index); nodeName = Node.NODE_NAME_SETTING.get(settings); this.indexMetaData = indexMetaData; numberOfShards = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, null); diff --git a/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java b/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java index 94c3892ef361e..53d63bf64bb6b 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java +++ b/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java @@ -22,7 +22,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.TimeValue; @@ -87,7 +87,7 @@ public final class IndexingSlowLog implements IndexingOperationListener { }, Property.Dynamic, Property.IndexScope); IndexingSlowLog(IndexSettings indexSettings) { - this.indexLogger = Loggers.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".index", indexSettings.getSettings()); + this.indexLogger = ServerLoggers.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".index", indexSettings.getSettings()); this.index = indexSettings.getIndex(); indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING, this::setReformat); @@ -117,7 +117,7 @@ private void setMaxSourceCharsToLog(int maxSourceCharsToLog) { private void setLevel(SlowLogLevel level) { this.level = level; - Loggers.setLevel(this.indexLogger, level.name()); + ServerLoggers.setLevel(this.indexLogger, level.name()); } private void setWarnThreshold(TimeValue warnThreshold) { diff --git a/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java b/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java index a48e3d7bd72c5..d02d4820fd402 100644 --- a/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java +++ b/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java @@ -21,7 +21,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.TimeValue; @@ -81,8 +81,8 @@ public final class SearchSlowLog implements SearchOperationListener { public SearchSlowLog(IndexSettings indexSettings) { - this.queryLogger = Loggers.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".query", indexSettings.getSettings()); - this.fetchLogger = Loggers.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".fetch", indexSettings.getSettings()); + this.queryLogger = ServerLoggers.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".query", indexSettings.getSettings()); + this.fetchLogger = ServerLoggers.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".fetch", indexSettings.getSettings()); indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING, this::setQueryWarnThreshold); this.queryWarnThreshold = indexSettings.getValue(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING).nanos(); @@ -108,8 +108,8 @@ public SearchSlowLog(IndexSettings indexSettings) { private void setLevel(SlowLogLevel level) { this.level = level; - Loggers.setLevel(queryLogger, level.name()); - Loggers.setLevel(fetchLogger, level.name()); + ServerLoggers.setLevel(queryLogger, level.name()); + ServerLoggers.setLevel(fetchLogger, level.name()); } @Override public void onQueryPhase(SearchContext context, long tookInNanos) { diff --git a/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java b/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java index f4876149cac13..871f1f62f41be 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java @@ -25,7 +25,7 @@ import org.apache.lucene.index.MergePolicy; import org.apache.lucene.index.MergeScheduler; import org.apache.lucene.index.OneMergeHelper; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.settings.Settings; @@ -71,7 +71,7 @@ class ElasticsearchConcurrentMergeScheduler extends ConcurrentMergeScheduler { this.config = indexSettings.getMergeSchedulerConfig(); this.shardId = shardId; this.indexSettings = indexSettings.getSettings(); - this.logger = Loggers.getLogger(getClass(), this.indexSettings, shardId); + this.logger = ServerLoggers.getLogger(getClass(), this.indexSettings, shardId); refreshConfig(); } diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index b0e2654e7f2fb..b73bfb78f3cb9 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -51,7 +51,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver; @@ -130,7 +130,7 @@ protected Engine(EngineConfig engineConfig) { this.shardId = engineConfig.getShardId(); this.allocationId = engineConfig.getAllocationId(); this.store = engineConfig.getStore(); - this.logger = Loggers.getLogger(Engine.class, // we use the engine class directly here to make sure all subclasses have the same logger name + this.logger = ServerLoggers.getLogger(Engine.class, // we use the engine class directly here to make sure all subclasses have the same logger name engineConfig.getIndexSettings().getSettings(), engineConfig.getShardId()); this.eventListener = engineConfig.getEventListener(); } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java index 698b289d758be..2384e34732040 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java @@ -19,10 +19,8 @@ package org.elasticsearch.index.fielddata.plain; -import org.apache.logging.log4j.Logger; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.SortedSetDocValues; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.fielddata.IndexFieldData; diff --git a/server/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java b/server/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java index d3d9ffa481871..d7ce32d9b7628 100644 --- a/server/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java @@ -120,7 +120,7 @@ public Integer getSeed() { /** * Set the field to be used for random number generation. This parameter is compulsory * when a {@link #seed(int) seed} is set and ignored otherwise. Note that documents that - * have the same value for a field will get the same score. + * have the same value for a field will get the same score. */ public RandomScoreFunctionBuilder setField(String field) { this.field = field; diff --git a/server/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java b/server/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java index 0e46a562488d3..1d02c33dd3e1b 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java +++ b/server/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java @@ -21,7 +21,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.index.IndexSettings; public abstract class AbstractIndexShardComponent implements IndexShardComponent { @@ -34,7 +34,7 @@ public abstract class AbstractIndexShardComponent implements IndexShardComponent protected AbstractIndexShardComponent(ShardId shardId, IndexSettings indexSettings) { this.shardId = shardId; this.indexSettings = indexSettings; - this.logger = Loggers.getLogger(getClass(), this.indexSettings.getSettings(), shardId); + this.logger = ServerLoggers.getLogger(getClass(), this.indexSettings.getSettings(), shardId); this.deprecationLogger = new DeprecationLogger(logger); } diff --git a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java index e1080f2c2ccae..16afb55599d49 100644 --- a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java +++ b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java @@ -21,7 +21,6 @@ import org.apache.lucene.search.similarities.PerFieldSimilarityWrapper; import org.apache.lucene.search.similarities.Similarity; -import org.elasticsearch.Version; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index dab39c26a3c5b..74be98b813238 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -58,7 +58,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.store.ByteArrayIndexInput; import org.elasticsearch.common.lucene.store.InputStreamIndexInput; @@ -159,7 +159,7 @@ public Store(ShardId shardId, IndexSettings indexSettings, DirectoryService dire public Store(ShardId shardId, IndexSettings indexSettings, DirectoryService directoryService, ShardLock shardLock, OnClose onClose) throws IOException { super(shardId, indexSettings); final Settings settings = indexSettings.getSettings(); - this.directory = new StoreDirectory(directoryService.newDirectory(), Loggers.getLogger("index.store.deletes", settings, shardId)); + this.directory = new StoreDirectory(directoryService.newDirectory(), ServerLoggers.getLogger("index.store.deletes", settings, shardId)); this.shardLock = shardLock; this.onClose = onClose; final TimeValue refreshInterval = indexSettings.getValue(INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 3ee9b953757c3..5a0ee1cf44d07 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -40,7 +40,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.lucene.store.InputStreamIndexInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; @@ -120,7 +120,7 @@ public RecoverySourceHandler(final IndexShard shard, RecoveryTargetHandler recov this.recoveryTarget = recoveryTarget; this.request = request; this.shardId = this.request.shardId().id(); - this.logger = Loggers.getLogger(getClass(), nodeSettings, request.shardId(), "recover to " + request.targetNode().getName()); + this.logger = ServerLoggers.getLogger(getClass(), nodeSettings, request.shardId(), "recover to " + request.targetNode().getName()); this.chunkSizeInBytes = fileChunkSizeInBytes; this.response = new RecoveryResponse(); } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index 1bbcb9efa9644..f4c823c0e96a7 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -34,7 +34,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.util.CancellableThreads; import org.elasticsearch.common.util.concurrent.AbstractRefCounted; @@ -117,7 +117,7 @@ public RecoveryTarget(final IndexShard indexShard, this.cancellableThreads = new CancellableThreads(); this.recoveryId = idGenerator.incrementAndGet(); this.listener = listener; - this.logger = Loggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId()); + this.logger = ServerLoggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId()); this.indexShard = indexShard; this.sourceNode = sourceNode; this.shardId = indexShard.shardId(); diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 62fc271f99084..630afe4579bd1 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -67,6 +67,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkModule; @@ -143,7 +144,6 @@ import java.io.Closeable; import java.io.IOException; import java.io.UncheckedIOException; -import java.net.Inet6Address; import java.net.InetAddress; import java.net.InetSocketAddress; import java.nio.charset.Charset; @@ -267,7 +267,7 @@ protected Node(final Environment environment, Collection throw new IllegalStateException("Failed to create node environment", ex); } final boolean hadPredefinedNodeName = NODE_NAME_SETTING.exists(tmpSettings); - Logger logger = Loggers.getLogger(Node.class, tmpSettings); + Logger logger = ServerLoggers.getLogger(Node.class, tmpSettings); final String nodeId = nodeEnvironment.nodeId(); tmpSettings = addNodeNameIfNeeded(tmpSettings, nodeId); // this must be captured after the node name is possibly added to the settings diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java index 1027785c57711..a6481b58ca499 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java @@ -259,8 +259,8 @@ Aggregator create(String name, final long maxOrd = getMaxOrd(valuesSource, context.searcher()); assert maxOrd != -1; - final double ratio = maxOrd / ((double) context.searcher().getIndexReader().numDocs()); - + final double ratio = maxOrd / ((double) context.searcher().getIndexReader().numDocs()); + if (factories == AggregatorFactories.EMPTY && includeExclude == null && Aggregator.descendsFromBucketAggregator(parent) == false && diff --git a/server/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java b/server/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java index c5b99a91ffa3b..2c51c210b1edc 100644 --- a/server/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java +++ b/server/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java @@ -26,7 +26,7 @@ import org.apache.lucene.util.Constants; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.logging.ESLoggerFactory; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; @@ -83,11 +83,11 @@ BufferedReader getBufferedReader(Path path) throws IOException { "I/O exception while trying to read [{}]", new Object[] { procSysVmMaxMapCountPath }, e -> ioException == e)); - Loggers.addAppender(logger, appender); + ServerLoggers.addAppender(logger, appender); assertThat(check.getMaxMapCount(logger), equalTo(-1L)); appender.assertAllExpectationsMatched(); verify(reader).close(); - Loggers.removeAppender(logger, appender); + ServerLoggers.removeAppender(logger, appender); appender.stop(); } @@ -105,11 +105,11 @@ BufferedReader getBufferedReader(Path path) throws IOException { "unable to parse vm.max_map_count [{}]", new Object[] { "eof" }, e -> e instanceof NumberFormatException && e.getMessage().equals("For input string: \"eof\""))); - Loggers.addAppender(logger, appender); + ServerLoggers.addAppender(logger, appender); assertThat(check.getMaxMapCount(logger), equalTo(-1L)); appender.assertAllExpectationsMatched(); verify(reader).close(); - Loggers.removeAppender(logger, appender); + ServerLoggers.removeAppender(logger, appender); appender.stop(); } diff --git a/server/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java b/server/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java index 0522f3f15f817..b8050d728a6b3 100644 --- a/server/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java @@ -44,6 +44,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.common.Priority; import org.elasticsearch.common.io.FileSystemUtils; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -342,7 +343,7 @@ public void testMessageLogging() throws Exception{ new MockLogAppender.UnseenEventExpectation("no completed message logged on dry run", TransportClusterRerouteAction.class.getName(), Level.INFO, "allocated an empty primary*") ); - Loggers.addAppender(actionLogger, dryRunMockLog); + ServerLoggers.addAppender(actionLogger, dryRunMockLog); AllocationCommand dryRunAllocation = new AllocateEmptyPrimaryAllocationCommand(indexName, 0, nodeName1, true); ClusterRerouteResponse dryRunResponse = client().admin().cluster().prepareReroute() @@ -357,7 +358,7 @@ public void testMessageLogging() throws Exception{ dryRunMockLog.assertAllExpectationsMatched(); dryRunMockLog.stop(); - Loggers.removeAppender(actionLogger, dryRunMockLog); + ServerLoggers.removeAppender(actionLogger, dryRunMockLog); MockLogAppender allocateMockLog = new MockLogAppender(); allocateMockLog.start(); @@ -369,7 +370,7 @@ public void testMessageLogging() throws Exception{ new MockLogAppender.UnseenEventExpectation("no message for second allocate empty primary", TransportClusterRerouteAction.class.getName(), Level.INFO, "allocated an empty primary*" + nodeName2 + "*") ); - Loggers.addAppender(actionLogger, allocateMockLog); + ServerLoggers.addAppender(actionLogger, allocateMockLog); AllocationCommand yesDecisionAllocation = new AllocateEmptyPrimaryAllocationCommand(indexName, 0, nodeName1, true); AllocationCommand noDecisionAllocation = new AllocateEmptyPrimaryAllocationCommand("noexist", 1, nodeName2, true); @@ -385,7 +386,7 @@ public void testMessageLogging() throws Exception{ allocateMockLog.assertAllExpectationsMatched(); allocateMockLog.stop(); - Loggers.removeAppender(actionLogger, allocateMockLog); + ServerLoggers.removeAppender(actionLogger, allocateMockLog); } public void testClusterRerouteWithBlocks() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java b/server/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java index c8d5cdc6c86db..be03fbe1cd640 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java @@ -23,7 +23,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -63,7 +63,7 @@ public static class TestPlugin extends Plugin { protected final Settings settings; public TestPlugin(Settings settings) { - this.logger = Loggers.getLogger(getClass(), settings); + this.logger = ServerLoggers.getLogger(getClass(), settings); this.settings = settings; } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java index 1ed5a3ac7ed90..8ebe627751ce4 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java @@ -22,7 +22,6 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterInfo; -import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ESAllocationTestCase; import org.elasticsearch.cluster.metadata.IndexMetaData; diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java index 3b551e912947a..4b941a6ce4a7f 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.routing.allocation; import com.carrotsearch.hppc.cursors.ObjectCursor; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest; @@ -41,24 +40,19 @@ import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; -import org.elasticsearch.indices.cluster.AbstractIndicesClusterStateServiceTestCase; import org.elasticsearch.indices.cluster.ClusterStateChanges; -import org.elasticsearch.indices.cluster.IndicesClusterStateService; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import java.util.ArrayList; import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Locale; -import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Supplier; import java.util.stream.Collectors; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java index f6ab967a10b46..1406e4d6d6121 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java @@ -22,7 +22,6 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterInfo; -import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ESAllocationTestCase; import org.elasticsearch.cluster.metadata.IndexMetaData; diff --git a/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java index 34750180ff185..c104df913b205 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; @@ -31,6 +30,7 @@ import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -130,7 +130,7 @@ public void testClusterStateUpdateLogging() throws Exception { "*failed to execute cluster state applier in [2s]*")); Logger clusterLogger = Loggers.getLogger("org.elasticsearch.cluster.service"); - Loggers.addAppender(clusterLogger, mockAppender); + ServerLoggers.addAppender(clusterLogger, mockAppender); try { final CountDownLatch latch = new CountDownLatch(3); clusterApplierService.currentTimeOverride = System.nanoTime(); @@ -180,7 +180,7 @@ public void onFailure(String source, Exception e) { }); latch.await(); } finally { - Loggers.removeAppender(clusterLogger, mockAppender); + ServerLoggers.removeAppender(clusterLogger, mockAppender); mockAppender.stop(); } mockAppender.assertAllExpectationsMatched(); @@ -210,7 +210,7 @@ public void testLongClusterStateUpdateLogging() throws Exception { "*cluster state applier task [test3] took [34s] above the warn threshold of *")); Logger clusterLogger = Loggers.getLogger("org.elasticsearch.cluster.service"); - Loggers.addAppender(clusterLogger, mockAppender); + ServerLoggers.addAppender(clusterLogger, mockAppender); try { final CountDownLatch latch = new CountDownLatch(4); final CountDownLatch processedFirstTask = new CountDownLatch(1); @@ -276,7 +276,7 @@ public void onFailure(String source, Exception e) { }); latch.await(); } finally { - Loggers.removeAppender(clusterLogger, mockAppender); + ServerLoggers.removeAppender(clusterLogger, mockAppender); mockAppender.stop(); } mockAppender.assertAllExpectationsMatched(); diff --git a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java index 1b747f2268747..3b999b5f7733a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java @@ -35,6 +35,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Priority; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -231,7 +232,7 @@ public void testClusterStateUpdateLogging() throws Exception { "*processing [test3]: took [3s] done publishing updated cluster state (version: *, uuid: *)")); Logger clusterLogger = Loggers.getLogger(masterService.getClass().getPackage().getName()); - Loggers.addAppender(clusterLogger, mockAppender); + ServerLoggers.addAppender(clusterLogger, mockAppender); try { final CountDownLatch latch = new CountDownLatch(4); masterService.currentTimeOverride = System.nanoTime(); @@ -306,7 +307,7 @@ public void onFailure(String source, Exception e) { }); latch.await(); } finally { - Loggers.removeAppender(clusterLogger, mockAppender); + ServerLoggers.removeAppender(clusterLogger, mockAppender); mockAppender.stop(); } mockAppender.assertAllExpectationsMatched(); @@ -578,7 +579,7 @@ public void testLongClusterStateUpdateLogging() throws Exception { "*cluster state update task [test4] took [34s] above the warn threshold of *")); Logger clusterLogger = Loggers.getLogger(masterService.getClass().getPackage().getName()); - Loggers.addAppender(clusterLogger, mockAppender); + ServerLoggers.addAppender(clusterLogger, mockAppender); try { final CountDownLatch latch = new CountDownLatch(5); final CountDownLatch processedFirstTask = new CountDownLatch(1); @@ -674,7 +675,7 @@ public void onFailure(String source, Exception e) { }); latch.await(); } finally { - Loggers.removeAppender(clusterLogger, mockAppender); + ServerLoggers.removeAppender(clusterLogger, mockAppender); mockAppender.stop(); } mockAppender.assertAllExpectationsMatched(); diff --git a/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java b/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java index 2015a6b42d16f..29c7a2b161403 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java @@ -24,7 +24,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.elasticsearch.common.logging.ESLoggerFactory; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.index.IndexModule; import org.elasticsearch.test.ESTestCase; @@ -751,8 +751,8 @@ public void testLoggingUpdates() { settings.applySettings(Settings.builder().build()); assertEquals(property, ESLoggerFactory.getLogger("test").getLevel()); } finally { - Loggers.setLevel(ESLoggerFactory.getRootLogger(), level); - Loggers.setLevel(ESLoggerFactory.getLogger("test"), testLevel); + ServerLoggers.setLevel(ESLoggerFactory.getRootLogger(), level); + ServerLoggers.setLevel(ESLoggerFactory.getLogger("test"), testLevel); } } @@ -767,7 +767,7 @@ public void testFallbackToLoggerLevel() { settings.applySettings(Settings.builder().build()); // here we fall back to 'logger.level' which is our default. assertEquals(Level.ERROR, ESLoggerFactory.getRootLogger().getLevel()); } finally { - Loggers.setLevel(ESLoggerFactory.getRootLogger(), level); + ServerLoggers.setLevel(ESLoggerFactory.getRootLogger(), level); } } diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index 880f8dcba5de5..aeadcf30e3678 100644 --- a/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -41,7 +41,6 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.discovery.zen.ElectMasterService; -import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.indices.IndexClosedException; @@ -52,8 +51,6 @@ import org.elasticsearch.test.InternalTestCluster.RestartCallback; import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; import java.util.List; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; diff --git a/server/src/test/java/org/elasticsearch/index/MergeSchedulerSettingsTests.java b/server/src/test/java/org/elasticsearch/index/MergeSchedulerSettingsTests.java index e9eb5d8b83d2e..301d4e3cfa360 100644 --- a/server/src/test/java/org/elasticsearch/index/MergeSchedulerSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/index/MergeSchedulerSettingsTests.java @@ -26,6 +26,7 @@ import org.apache.logging.log4j.core.filter.RegexFilter; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; @@ -71,8 +72,8 @@ public void testUpdateAutoThrottleSettings() throws Exception { MockAppender mockAppender = new MockAppender("testUpdateAutoThrottleSettings"); mockAppender.start(); final Logger settingsLogger = Loggers.getLogger("org.elasticsearch.common.settings.IndexScopedSettings"); - Loggers.addAppender(settingsLogger, mockAppender); - Loggers.setLevel(settingsLogger, Level.TRACE); + ServerLoggers.addAppender(settingsLogger, mockAppender); + ServerLoggers.setLevel(settingsLogger, Level.TRACE); try { Settings.Builder builder = Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) @@ -91,9 +92,9 @@ public void testUpdateAutoThrottleSettings() throws Exception { assertTrue(mockAppender.sawUpdateAutoThrottle); assertEquals(settings.getMergeSchedulerConfig().isAutoThrottle(), false); } finally { - Loggers.removeAppender(settingsLogger, mockAppender); + ServerLoggers.removeAppender(settingsLogger, mockAppender); mockAppender.stop(); - Loggers.setLevel(settingsLogger, (Level) null); + ServerLoggers.setLevel(settingsLogger, (Level) null); } } @@ -102,8 +103,8 @@ public void testUpdateMergeMaxThreadCount() throws Exception { MockAppender mockAppender = new MockAppender("testUpdateAutoThrottleSettings"); mockAppender.start(); final Logger settingsLogger = Loggers.getLogger("org.elasticsearch.common.settings.IndexScopedSettings"); - Loggers.addAppender(settingsLogger, mockAppender); - Loggers.setLevel(settingsLogger, Level.TRACE); + ServerLoggers.addAppender(settingsLogger, mockAppender); + ServerLoggers.setLevel(settingsLogger, Level.TRACE); try { Settings.Builder builder = Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) @@ -123,9 +124,9 @@ public void testUpdateMergeMaxThreadCount() throws Exception { // Make sure we log the change: assertTrue(mockAppender.sawUpdateMaxThreadCount); } finally { - Loggers.removeAppender(settingsLogger, mockAppender); + ServerLoggers.removeAppender(settingsLogger, mockAppender); mockAppender.stop(); - Loggers.setLevel(settingsLogger, (Level) null); + ServerLoggers.setLevel(settingsLogger, (Level) null); } } diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index a508d691ed3a6..518411e59e8cd 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -78,6 +78,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; @@ -1924,8 +1925,8 @@ public void testIndexWriterInfoStream() throws IllegalAccessException, IOExcepti Logger rootLogger = LogManager.getRootLogger(); Level savedLevel = rootLogger.getLevel(); - Loggers.addAppender(rootLogger, mockAppender); - Loggers.setLevel(rootLogger, Level.DEBUG); + ServerLoggers.addAppender(rootLogger, mockAppender); + ServerLoggers.setLevel(rootLogger, Level.DEBUG); rootLogger = LogManager.getRootLogger(); try { @@ -1936,15 +1937,15 @@ public void testIndexWriterInfoStream() throws IllegalAccessException, IOExcepti assertFalse(mockAppender.sawIndexWriterMessage); // Again, with TRACE, which should log IndexWriter output: - Loggers.setLevel(rootLogger, Level.TRACE); + ServerLoggers.setLevel(rootLogger, Level.TRACE); engine.index(indexForDoc(doc)); engine.flush(); assertTrue(mockAppender.sawIndexWriterMessage); } finally { - Loggers.removeAppender(rootLogger, mockAppender); + ServerLoggers.removeAppender(rootLogger, mockAppender); mockAppender.stop(); - Loggers.setLevel(rootLogger, savedLevel); + ServerLoggers.setLevel(rootLogger, savedLevel); } } @@ -2214,8 +2215,8 @@ public void testIndexWriterIFDInfoStream() throws IllegalAccessException, IOExce final Logger iwIFDLogger = Loggers.getLogger("org.elasticsearch.index.engine.Engine.IFD"); - Loggers.addAppender(iwIFDLogger, mockAppender); - Loggers.setLevel(iwIFDLogger, Level.DEBUG); + ServerLoggers.addAppender(iwIFDLogger, mockAppender); + ServerLoggers.setLevel(iwIFDLogger, Level.DEBUG); try { // First, with DEBUG, which should NOT log IndexWriter output: @@ -2226,16 +2227,16 @@ public void testIndexWriterIFDInfoStream() throws IllegalAccessException, IOExce assertFalse(mockAppender.sawIndexWriterIFDMessage); // Again, with TRACE, which should only log IndexWriter IFD output: - Loggers.setLevel(iwIFDLogger, Level.TRACE); + ServerLoggers.setLevel(iwIFDLogger, Level.TRACE); engine.index(indexForDoc(doc)); engine.flush(); assertFalse(mockAppender.sawIndexWriterMessage); assertTrue(mockAppender.sawIndexWriterIFDMessage); } finally { - Loggers.removeAppender(iwIFDLogger, mockAppender); + ServerLoggers.removeAppender(iwIFDLogger, mockAppender); mockAppender.stop(); - Loggers.setLevel(iwIFDLogger, (Level) null); + ServerLoggers.setLevel(iwIFDLogger, (Level) null); } } diff --git a/settings.gradle b/settings.gradle index 40034a8542448..b844af52df76b 100644 --- a/settings.gradle +++ b/settings.gradle @@ -28,6 +28,7 @@ List projects = [ 'test:fixtures:krb5kdc-fixture', 'test:fixtures:old-elasticsearch', 'test:logger-usage', + 'libs:elasticsearch-core', 'libs:elasticsearch-nio', 'modules:aggs-matrix-stats', 'modules:analysis-common', diff --git a/test/framework/src/main/java/org/elasticsearch/index/store/EsBaseDirectoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/store/EsBaseDirectoryTestCase.java index 69dfae2c6788c..c078e88da20ee 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/store/EsBaseDirectoryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/store/EsBaseDirectoryTestCase.java @@ -27,7 +27,6 @@ import org.apache.lucene.util.TimeUnits; import org.elasticsearch.bootstrap.BootstrapForTesting; import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter; /** diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java index 470847e65f25f..a11b70bfa104e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java @@ -35,7 +35,6 @@ import java.io.Closeable; import java.io.IOException; import java.net.InetSocketAddress; -import java.util.List; import java.util.Random; import java.util.Set; diff --git a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java index 144b2be1b0235..f30c498b21020 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java @@ -27,7 +27,6 @@ import org.apache.lucene.search.QueryCache; import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.ReferenceManager; -import org.apache.lucene.search.SearcherManager; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.logging.Loggers; diff --git a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java index e021df52c60fe..60cc6ceeccfa7 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java +++ b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java @@ -21,6 +21,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.test.junit.annotations.TestLogging; import org.junit.runner.Description; @@ -106,7 +107,7 @@ private Map processTestLogging(final TestLogging testLogging) { } for (final Map.Entry entry : map.entrySet()) { final Logger logger = resolveLogger(entry.getKey()); - Loggers.setLevel(logger, entry.getValue()); + ServerLoggers.setLevel(logger, entry.getValue()); } return existing; } @@ -145,7 +146,7 @@ private static Map getLoggersAndLevelsFromAnnotation(final TestL private Map reset(final Map map) { for (final Map.Entry previousLogger : map.entrySet()) { final Logger logger = resolveLogger(previousLogger.getKey()); - Loggers.setLevel(logger, previousLogger.getValue()); + ServerLoggers.setLevel(logger, previousLogger.getValue()); } return Collections.emptyMap(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Stash.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Stash.java index e2eefc6376ad1..c7b8e0fef2f9b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Stash.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Stash.java @@ -22,7 +22,6 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.xcontent.ToXContent.Params; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; diff --git a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java index 1efd210b110c8..858a8ebd5ed0b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java +++ b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java @@ -21,7 +21,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -95,7 +95,7 @@ public void afterIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSha if (indexShard != null) { Boolean remove = shardSet.remove(indexShard); if (remove == Boolean.TRUE) { - Logger logger = Loggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId()); + Logger logger = ServerLoggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId()); MockFSDirectoryService.checkIndex(logger, indexShard.store(), indexShard.shardId()); } } From 190f1e1fb317a9f9e1e1d11e9df60c0aeb7e267c Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Mon, 15 Jan 2018 18:00:20 +0100 Subject: [PATCH 15/30] Fix synonym phrase query expansion for cross_fields parsing (#28045) * Fix synonym phrase query expansion for cross_fields parsing The `cross_fields` mode for query parser ignores phrase query generated by multi-word synonyms. In such case only the first field of each analyzer group is kept. This change fixes this issue by expanding the phrase query for each analyzer group to **all** fields using a disjunction max query. --- .../index/search/MatchQuery.java | 17 ++++++- .../index/search/MultiMatchQuery.java | 47 +++++++++++++++++- .../index/search/MultiMatchQueryTests.java | 49 +++++++++++++++++++ 3 files changed, 110 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java b/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java index f37b1d6f47012..d6a0bf5f73802 100644 --- a/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java +++ b/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java @@ -29,6 +29,7 @@ import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; +import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.MultiPhraseQuery; import org.apache.lucene.search.MultiTermQuery; @@ -350,7 +351,12 @@ protected Query analyzePhrase(String field, TokenStream stream, int slop) throws throw exc; } } - return super.analyzePhrase(field, stream, slop); + Query query = super.analyzePhrase(field, stream, slop); + if (query instanceof PhraseQuery) { + // synonyms that expand to multiple terms can return a phrase query. + return blendPhraseQuery((PhraseQuery) query, mapper); + } + return query; } /** @@ -472,6 +478,14 @@ private Query boolToExtendedCommonTermsQuery(BooleanQuery bq, Occur highFreqOccu } } + /** + * Called when a phrase query is built with {@link QueryBuilder#analyzePhrase(String, TokenStream, int)}. + * Subclass can override this function to blend this query to multiple fields. + */ + protected Query blendPhraseQuery(PhraseQuery query, MappedFieldType fieldType) { + return query; + } + protected Query blendTermsQuery(Term[] terms, MappedFieldType fieldType) { return new SynonymQuery(terms); } @@ -494,5 +508,4 @@ protected Query blendTermQuery(Term term, MappedFieldType fieldType) { } return termQuery(fieldType, term.bytes(), lenient); } - } diff --git a/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java b/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java index 61029f70e8f19..8a85c67b6815f 100644 --- a/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java +++ b/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java @@ -25,10 +25,10 @@ import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.AbstractQueryBuilder; @@ -143,6 +143,10 @@ public Query blendTerms(Term[] terms, MappedFieldType fieldType) { public Query termQuery(MappedFieldType fieldType, BytesRef value) { return MultiMatchQuery.this.termQuery(fieldType, value, lenient); } + + public Query blendPhrase(PhraseQuery query, MappedFieldType type) { + return MultiMatchQuery.super.blendPhraseQuery(query, type); + } } final class CrossFieldsQueryBuilder extends QueryBuilder { @@ -226,6 +230,17 @@ public Query termQuery(MappedFieldType fieldType, BytesRef value) { */ return blendTerm(new Term(fieldType.name(), value.utf8ToString()), fieldType); } + + @Override + public Query blendPhrase(PhraseQuery query, MappedFieldType type) { + if (blendedFields == null) { + return super.blendPhrase(query, type); + } + /** + * We build phrase queries for multi-word synonyms when {@link QueryBuilder#autoGenerateSynonymsPhraseQuery} is true. + */ + return MultiMatchQuery.blendPhrase(query, blendedFields); + } } static Query blendTerm(QueryShardContext context, BytesRef value, Float commonTermsCutoff, float tieBreaker, @@ -288,6 +303,28 @@ static Query blendTerms(QueryShardContext context, BytesRef[] values, Float comm } } + /** + * Expand a {@link PhraseQuery} to multiple fields that share the same analyzer. + * Returns a {@link DisjunctionMaxQuery} with a disjunction for each expanded field. + */ + static Query blendPhrase(PhraseQuery query, FieldAndFieldType... fields) { + List disjunctions = new ArrayList<>(); + for (FieldAndFieldType field : fields) { + int[] positions = query.getPositions(); + Term[] terms = query.getTerms(); + PhraseQuery.Builder builder = new PhraseQuery.Builder(); + for (int i = 0; i < terms.length; i++) { + builder.add(new Term(field.fieldType.name(), terms[i].bytes()), positions[i]); + } + Query q = builder.build(); + if (field.boost != AbstractQueryBuilder.DEFAULT_BOOST) { + q = new BoostQuery(q, field.boost); + } + disjunctions.add(q); + } + return new DisjunctionMaxQuery(disjunctions, 0.0f); + } + @Override protected Query blendTermQuery(Term term, MappedFieldType fieldType) { if (queryBuilder == null) { @@ -304,6 +341,14 @@ protected Query blendTermsQuery(Term[] terms, MappedFieldType fieldType) { return queryBuilder.blendTerms(terms, fieldType); } + @Override + protected Query blendPhraseQuery(PhraseQuery query, MappedFieldType fieldType) { + if (queryBuilder == null) { + return super.blendPhraseQuery(query, fieldType); + } + return queryBuilder.blendPhrase(query, fieldType); + } + static final class FieldAndFieldType { final MappedFieldType fieldType; final float boost; diff --git a/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java b/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java index 5695094553de9..1f033b5fb4187 100644 --- a/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java +++ b/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java @@ -19,12 +19,16 @@ package org.elasticsearch.index.search; +import org.apache.lucene.analysis.MockSynonymAnalyzer; import org.apache.lucene.index.Term; import org.apache.lucene.queries.BlendedTermQuery; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.SynonymQuery; import org.apache.lucene.search.TermQuery; @@ -43,7 +47,11 @@ import org.junit.Before; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import static org.elasticsearch.index.query.QueryBuilders.multiMatchQuery; import static org.hamcrest.Matchers.equalTo; @@ -220,4 +228,45 @@ public void testMultiMatchCrossFieldsWithSynonyms() throws IOException { assertThat(parsedQuery, equalTo(expectedQuery)); } + + public void testMultiMatchCrossFieldsWithSynonymsPhrase() throws IOException { + QueryShardContext queryShardContext = indexService.newQueryShardContext( + randomInt(20), null, () -> { throw new UnsupportedOperationException(); }, null); + MultiMatchQuery parser = new MultiMatchQuery(queryShardContext); + parser.setAnalyzer(new MockSynonymAnalyzer()); + Map fieldNames = new HashMap<>(); + fieldNames.put("name.first", 1.0f); + fieldNames.put("name.last", 1.0f); + Query query = parser.parse(MultiMatchQueryBuilder.Type.CROSS_FIELDS, fieldNames, "guinea pig", null); + + Term[] terms = new Term[2]; + terms[0] = new Term("name.first", "cavy"); + terms[1] = new Term("name.last", "cavy"); + float[] boosts = new float[2]; + Arrays.fill(boosts, 1.0f); + + List phraseDisjuncts = new ArrayList<>(); + phraseDisjuncts.add( + new PhraseQuery.Builder() + .add(new Term("name.first", "guinea")) + .add(new Term("name.first", "pig")) + .build() + ); + phraseDisjuncts.add( + new PhraseQuery.Builder() + .add(new Term("name.last", "guinea")) + .add(new Term("name.last", "pig")) + .build() + ); + BooleanQuery expected = new BooleanQuery.Builder() + .add( + new BooleanQuery.Builder() + .add(new DisjunctionMaxQuery(phraseDisjuncts, 0.0f), BooleanClause.Occur.SHOULD) + .add(BlendedTermQuery.dismaxBlendedQuery(terms, boosts, 1.0f), BooleanClause.Occur.SHOULD) + .build(), + BooleanClause.Occur.SHOULD + ) + .build(); + assertEquals(expected, query); + } } From ee7eac8dc19f2a5f77318040a9eb96c3d0a3d257 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Mon, 15 Jan 2018 10:20:30 -0700 Subject: [PATCH 16/30] `MockTcpTransport` to connect asynchronously (#28203) The method `initiateChannel` on `TcpTransport` is explicit in that channels can be connect asynchronously. All production implementations do connect asynchronously. Only the blocking `MockTcpTransport` connects in a synchronous manner. This avoids testing some of the blocking code in `TcpTransport` that waits on connections to complete. Additionally, it requires a more extensive method signature than required for other transports. This commit modifies the `MockTcpTransport` to make these connections asynchronously on a different thread. Additionally, it simplifies that `initiateChannel` method signature. --- .../transport/netty4/Netty4Transport.java | 8 +-- .../transport/nio/NioTransport.java | 7 +-- .../elasticsearch/transport/TcpTransport.java | 13 ++-- .../transport/TcpTransportTests.java | 6 +- .../transport/MockTcpTransport.java | 61 +++++++++++-------- .../transport/nio/MockNioTransport.java | 7 +-- 6 files changed, 46 insertions(+), 56 deletions(-) diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java index 67b1607aa8a3a..f4818a2e56752 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java @@ -40,7 +40,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -51,12 +50,10 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TcpChannel; import org.elasticsearch.transport.TcpTransport; import org.elasticsearch.transport.TransportRequestOptions; @@ -239,9 +236,8 @@ protected final void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) } @Override - protected NettyTcpChannel initiateChannel(DiscoveryNode node, TimeValue connectTimeout, ActionListener listener) - throws IOException { - ChannelFuture channelFuture = bootstrap.connect(node.getAddress().address()); + protected NettyTcpChannel initiateChannel(InetSocketAddress address, ActionListener listener) throws IOException { + ChannelFuture channelFuture = bootstrap.connect(address); Channel channel = channelFuture.channel(); if (channel == null) { Netty4Utils.maybeDie(channelFuture.cause()); diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java index 42063878b4b2f..9917bf79f593b 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java @@ -21,14 +21,12 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.recycler.Recycler; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.util.concurrent.EsExecutors; @@ -93,9 +91,8 @@ protected TcpNioServerSocketChannel bind(String name, InetSocketAddress address) } @Override - protected TcpNioSocketChannel initiateChannel(DiscoveryNode node, TimeValue connectTimeout, ActionListener connectListener) - throws IOException { - TcpNioSocketChannel channel = nioGroup.openChannel(node.getAddress().address(), clientChannelFactory); + protected TcpNioSocketChannel initiateChannel(InetSocketAddress address, ActionListener connectListener) throws IOException { + TcpNioSocketChannel channel = nioGroup.openChannel(address, clientChannelFactory); channel.addConnectListener(ActionListener.toBiConsumer(connectListener)); return channel; } diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index 54bfcaa6027d7..727ce2f157b31 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -604,7 +604,7 @@ public final NodeChannels openConnection(DiscoveryNode node, ConnectionProfile c try { PlainActionFuture connectFuture = PlainActionFuture.newFuture(); connectionFutures.add(connectFuture); - TcpChannel channel = initiateChannel(node, connectionProfile.getConnectTimeout(), connectFuture); + TcpChannel channel = initiateChannel(node.getAddress().address(), connectFuture); logger.trace(() -> new ParameterizedMessage("Tcp transport client channel opened: {}", channel)); channels.add(channel); } catch (Exception e) { @@ -1057,17 +1057,14 @@ protected void serverAcceptedChannel(TcpChannel channel) { protected abstract TcpChannel bind(String name, InetSocketAddress address) throws IOException; /** - * Initiate a single tcp socket channel to a node. Implementations do not have to observe the connectTimeout. - * It is provided for synchronous connection implementations. + * Initiate a single tcp socket channel. * - * @param node the node - * @param connectTimeout the connection timeout - * @param connectListener listener to be called when connection complete + * @param address address for the initiated connection + * @param connectListener listener to be called when connection complete * @return the pending connection * @throws IOException if an I/O exception occurs while opening the channel */ - protected abstract TcpChannel initiateChannel(DiscoveryNode node, TimeValue connectTimeout, ActionListener connectListener) - throws IOException; + protected abstract TcpChannel initiateChannel(InetSocketAddress address, ActionListener connectListener) throws IOException; /** * Called to tear down internal resources diff --git a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java index f63cd1c7a3e93..2cedb5419e08e 100644 --- a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressorFactory; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -41,7 +40,6 @@ import java.io.IOException; import java.io.StreamCorruptedException; import java.net.InetSocketAddress; -import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; @@ -49,7 +47,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.core.IsInstanceOf.instanceOf; import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyZeroInteractions; /** Unit tests for {@link TcpTransport} */ public class TcpTransportTests extends ESTestCase { @@ -193,8 +190,7 @@ protected FakeChannel bind(String name, InetSocketAddress address) throws IOExce } @Override - protected FakeChannel initiateChannel(DiscoveryNode node, TimeValue connectTimeout, ActionListener connectListener) - throws IOException { + protected FakeChannel initiateChannel(InetSocketAddress address, ActionListener connectListener) throws IOException { return new FakeChannel(messageCaptor); } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java index 91b2a2f79e310..570827bd54ee4 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java @@ -21,7 +21,6 @@ import org.apache.lucene.util.IOUtils; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.InputStreamStreamInput; @@ -30,7 +29,6 @@ import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.CancellableThreads; import org.elasticsearch.common.util.concurrent.AbstractRunnable; @@ -49,7 +47,6 @@ import java.net.ServerSocket; import java.net.Socket; import java.net.SocketException; -import java.net.SocketTimeoutException; import java.util.Collections; import java.util.HashSet; import java.util.Set; @@ -61,7 +58,6 @@ import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Consumer; /** * This is a socket based blocking TcpTransport implementation that is used for tests @@ -164,28 +160,32 @@ private void readMessage(MockChannel mockChannel, StreamInput input) throws IOEx } @Override - protected MockChannel initiateChannel(DiscoveryNode node, TimeValue connectTimeout, ActionListener connectListener) - throws IOException { - InetSocketAddress address = node.getAddress().address(); + protected MockChannel initiateChannel(InetSocketAddress address, ActionListener connectListener) throws IOException { final MockSocket socket = new MockSocket(); + final MockChannel channel = new MockChannel(socket, address, "none"); + boolean success = false; try { configureSocket(socket); - try { - socket.connect(address, Math.toIntExact(connectTimeout.millis())); - } catch (SocketTimeoutException ex) { - throw new ConnectTransportException(node, "connect_timeout[" + connectTimeout + "]", ex); - } - MockChannel channel = new MockChannel(socket, address, "none", (c) -> {}); - channel.loopRead(executor); success = true; - connectListener.onResponse(null); - return channel; } finally { if (success == false) { IOUtils.close(socket); } + } + + executor.submit(() -> { + try { + socket.connect(address); + channel.loopRead(executor); + connectListener.onResponse(null); + } catch (Exception ex) { + connectListener.onFailure(ex); + } + }); + + return channel; } @Override @@ -218,7 +218,6 @@ public final class MockChannel implements Closeable, TcpChannel { private final Socket activeChannel; private final String profile; private final CancellableThreads cancellableThreads = new CancellableThreads(); - private final Closeable onClose; private final CompletableFuture closeFuture = new CompletableFuture<>(); /** @@ -227,14 +226,12 @@ public final class MockChannel implements Closeable, TcpChannel { * @param socket The client socket. Mut not be null. * @param localAddress Address associated with the corresponding local server socket. Must not be null. * @param profile The associated profile name. - * @param onClose Callback to execute when this channel is closed. */ - public MockChannel(Socket socket, InetSocketAddress localAddress, String profile, Consumer onClose) { + public MockChannel(Socket socket, InetSocketAddress localAddress, String profile) { this.localAddress = localAddress; this.activeChannel = socket; this.serverSocket = null; this.profile = profile; - this.onClose = () -> onClose.accept(this); synchronized (openChannels) { openChannels.add(this); } @@ -246,12 +243,11 @@ public MockChannel(Socket socket, InetSocketAddress localAddress, String profile * @param serverSocket The associated server socket. Must not be null. * @param profile The associated profile name. */ - public MockChannel(ServerSocket serverSocket, String profile) { + MockChannel(ServerSocket serverSocket, String profile) { this.localAddress = (InetSocketAddress) serverSocket.getLocalSocketAddress(); this.serverSocket = serverSocket; this.profile = profile; this.activeChannel = null; - this.onClose = null; synchronized (openChannels) { openChannels.add(this); } @@ -266,8 +262,19 @@ public void accept(Executor executor) throws IOException { synchronized (this) { if (isOpen.get()) { incomingChannel = new MockChannel(incomingSocket, - new InetSocketAddress(incomingSocket.getLocalAddress(), incomingSocket.getPort()), profile, - workerChannels::remove); + new InetSocketAddress(incomingSocket.getLocalAddress(), incomingSocket.getPort()), profile); + MockChannel finalIncomingChannel = incomingChannel; + incomingChannel.addCloseListener(new ActionListener() { + @Override + public void onResponse(Void aVoid) { + workerChannels.remove(finalIncomingChannel); + } + + @Override + public void onFailure(Exception e) { + workerChannels.remove(finalIncomingChannel); + } + }); serverAcceptedChannel(incomingChannel); //establish a happens-before edge between closing and accepting a new connection workerChannels.add(incomingChannel); @@ -287,7 +294,7 @@ public void accept(Executor executor) throws IOException { } } - public void loopRead(Executor executor) { + void loopRead(Executor executor) { executor.execute(new AbstractRunnable() { @Override public void onFailure(Exception e) { @@ -312,7 +319,7 @@ protected void doRun() throws Exception { }); } - public synchronized void close0() throws IOException { + synchronized void close0() throws IOException { // establish a happens-before edge between closing and accepting a new connection // we have to sync this entire block to ensure that our openChannels checks work correctly. // The close block below will close all worker channels but if one of the worker channels runs into an exception @@ -325,7 +332,7 @@ public synchronized void close0() throws IOException { removedChannel = openChannels.remove(this); } IOUtils.close(serverSocket, activeChannel, () -> IOUtils.close(workerChannels), - () -> cancellableThreads.cancel("channel closed"), onClose); + () -> cancellableThreads.cancel("channel closed")); assert removedChannel: "Channel was not removed or removed twice?"; } } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java index 5911d10fa2973..a8876453b5b2f 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java @@ -21,13 +21,11 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.recycler.Recycler; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.indices.breaker.CircuitBreakerService; @@ -83,9 +81,8 @@ protected MockServerChannel bind(String name, InetSocketAddress address) throws } @Override - protected MockSocketChannel initiateChannel(DiscoveryNode node, TimeValue connectTimeout, ActionListener connectListener) - throws IOException { - MockSocketChannel channel = nioGroup.openChannel(node.getAddress().address(), clientChannelFactory); + protected MockSocketChannel initiateChannel(InetSocketAddress address, ActionListener connectListener) throws IOException { + MockSocketChannel channel = nioGroup.openChannel(address, clientChannelFactory); channel.addConnectListener(ActionListener.toBiConsumer(connectListener)); return channel; } From bd11e6c44193260d1219c6537e19ee4f236f8e1e Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Mon, 15 Jan 2018 18:30:38 +0100 Subject: [PATCH 17/30] Fix NPE on composite aggregation with sub-aggregations that need scores (#28129) The composite aggregation defers the collection of sub-aggregations to a second pass that visits documents only if they appear in the top buckets. Though the scorer for sub-aggregations is not set on this second pass and generates an NPE if any sub-aggregation tries to access the score. This change creates a scorer for the second pass and makes sure that sub-aggs can use it safely to check the score of the collected documents. --- .../bucket/composite/CompositeAggregator.java | 23 ++++++ .../composite/CompositeAggregatorTests.java | 73 ++++++++++++++++++- .../aggregations/AggregatorTestCase.java | 39 +++++++--- 3 files changed, 123 insertions(+), 12 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java index 9612ba2f895bc..3467aaf318baf 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java @@ -23,6 +23,9 @@ import org.apache.lucene.search.CollectionTerminatedException; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Weight; import org.apache.lucene.util.RoaringDocIdSet; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -87,6 +90,12 @@ public InternalAggregation buildAggregation(long zeroBucket) throws IOException // Replay all documents that contain at least one top bucket (collected during the first pass). grow(keys.size()+1); + final boolean needsScores = needsScores(); + Weight weight = null; + if (needsScores) { + Query query = context.query(); + weight = context.searcher().createNormalizedWeight(query, true); + } for (LeafContext context : contexts) { DocIdSetIterator docIdSetIterator = context.docIdSet.iterator(); if (docIdSetIterator == null) { @@ -95,7 +104,21 @@ public InternalAggregation buildAggregation(long zeroBucket) throws IOException final CompositeValuesSource.Collector collector = array.getLeafCollector(context.ctx, getSecondPassCollector(context.subCollector)); int docID; + DocIdSetIterator scorerIt = null; + if (needsScores) { + Scorer scorer = weight.scorer(context.ctx); + // We don't need to check if the scorer is null + // since we are sure that there are documents to replay (docIdSetIterator it not empty). + scorerIt = scorer.iterator(); + context.subCollector.setScorer(scorer); + } while ((docID = docIdSetIterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { + if (needsScores) { + assert scorerIt.docID() < docID; + scorerIt.advance(docID); + // aggregations should only be replayed on matching documents + assert scorerIt.docID() == docID; + } collector.collect(docID); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java index 339f9bda65a0a..172aebbc0e5dc 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java @@ -50,6 +50,8 @@ import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.search.aggregations.metrics.tophits.TopHits; +import org.elasticsearch.search.aggregations.metrics.tophits.TopHitsAggregationBuilder; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.IndexSettingsModule; import org.joda.time.DateTimeZone; @@ -1065,8 +1067,73 @@ public void testWithKeywordAndDateHistogram() throws IOException { ); } - private void testSearchCase(Query query, - Sort sort, + public void testWithKeywordAndTopHits() throws Exception { + final List>> dataset = new ArrayList<>(); + dataset.addAll( + Arrays.asList( + createDocument("keyword", "a"), + createDocument("keyword", "c"), + createDocument("keyword", "a"), + createDocument("keyword", "d"), + createDocument("keyword", "c") + ) + ); + final Sort sort = new Sort(new SortedSetSortField("keyword", false)); + testSearchCase(new MatchAllDocsQuery(), sort, dataset, + () -> { + TermsValuesSourceBuilder terms = new TermsValuesSourceBuilder("keyword") + .field("keyword"); + return new CompositeAggregationBuilder("name", Collections.singletonList(terms)) + .subAggregation(new TopHitsAggregationBuilder("top_hits").storedField("_none_")); + }, (result) -> { + assertEquals(3, result.getBuckets().size()); + assertEquals("{keyword=a}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(0).getDocCount()); + TopHits topHits = result.getBuckets().get(0).getAggregations().get("top_hits"); + assertNotNull(topHits); + assertEquals(topHits.getHits().getHits().length, 2); + assertEquals(topHits.getHits().getTotalHits(), 2L); + assertEquals("{keyword=c}", result.getBuckets().get(1).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(1).getDocCount()); + topHits = result.getBuckets().get(1).getAggregations().get("top_hits"); + assertNotNull(topHits); + assertEquals(topHits.getHits().getHits().length, 2); + assertEquals(topHits.getHits().getTotalHits(), 2L); + assertEquals("{keyword=d}", result.getBuckets().get(2).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(2).getDocCount()); + topHits = result.getBuckets().get(2).getAggregations().get("top_hits"); + assertNotNull(topHits); + assertEquals(topHits.getHits().getHits().length, 1); + assertEquals(topHits.getHits().getTotalHits(), 1L);; + } + ); + + testSearchCase(new MatchAllDocsQuery(), sort, dataset, + () -> { + TermsValuesSourceBuilder terms = new TermsValuesSourceBuilder("keyword") + .field("keyword"); + return new CompositeAggregationBuilder("name", Collections.singletonList(terms)) + .aggregateAfter(Collections.singletonMap("keyword", "a")) + .subAggregation(new TopHitsAggregationBuilder("top_hits").storedField("_none_")); + }, (result) -> { + assertEquals(2, result.getBuckets().size()); + assertEquals("{keyword=c}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(0).getDocCount()); + TopHits topHits = result.getBuckets().get(0).getAggregations().get("top_hits"); + assertNotNull(topHits); + assertEquals(topHits.getHits().getHits().length, 2); + assertEquals(topHits.getHits().getTotalHits(), 2L); + assertEquals("{keyword=d}", result.getBuckets().get(1).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(1).getDocCount()); + topHits = result.getBuckets().get(1).getAggregations().get("top_hits"); + assertNotNull(topHits); + assertEquals(topHits.getHits().getHits().length, 1); + assertEquals(topHits.getHits().getTotalHits(), 1L); + } + ); + } + + private void testSearchCase(Query query, Sort sort, List>> dataset, Supplier create, Consumer verify) throws IOException { @@ -1107,7 +1174,7 @@ private void executeTestCase(boolean reduced, IndexSearcher indexSearcher = newSearcher(indexReader, sort == null, sort == null); CompositeAggregationBuilder aggregationBuilder = create.get(); if (sort != null) { - CompositeAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, indexSettings, FIELD_TYPES); + CompositeAggregator aggregator = createAggregator(query, aggregationBuilder, indexSearcher, indexSettings, FIELD_TYPES); assertTrue(aggregator.canEarlyTerminate()); } final InternalComposite composite; diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java index f34b1c6e79f69..720d701e64ced 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java @@ -103,16 +103,27 @@ protected AggregatorFactory createAggregatorFactory(AggregationBuilder aggreg new MultiBucketConsumer(DEFAULT_MAX_BUCKETS), fieldTypes); } - /** Create a factory for the given aggregation builder. */ + protected AggregatorFactory createAggregatorFactory(AggregationBuilder aggregationBuilder, IndexSearcher indexSearcher, IndexSettings indexSettings, MultiBucketConsumer bucketConsumer, MappedFieldType... fieldTypes) throws IOException { + return createAggregatorFactory(null, aggregationBuilder, indexSearcher, indexSettings, bucketConsumer, fieldTypes); + } + + /** Create a factory for the given aggregation builder. */ + protected AggregatorFactory createAggregatorFactory(Query query, + AggregationBuilder aggregationBuilder, + IndexSearcher indexSearcher, + IndexSettings indexSettings, + MultiBucketConsumer bucketConsumer, + MappedFieldType... fieldTypes) throws IOException { SearchContext searchContext = createSearchContext(indexSearcher, indexSettings); CircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService(); when(searchContext.aggregations()) .thenReturn(new SearchContextAggregations(AggregatorFactories.EMPTY, bucketConsumer)); + when(searchContext.query()).thenReturn(query); when(searchContext.bigArrays()).thenReturn(new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), circuitBreakerService)); // TODO: now just needed for top_hits, this will need to be revised for other agg unit tests: MapperService mapperService = mapperServiceMock(); @@ -146,19 +157,20 @@ protected A createAggregator(AggregationBuilder aggregati new MultiBucketConsumer(DEFAULT_MAX_BUCKETS), fieldTypes); } - protected A createAggregator(AggregationBuilder aggregationBuilder, + protected A createAggregator(Query query, + AggregationBuilder aggregationBuilder, IndexSearcher indexSearcher, IndexSettings indexSettings, MappedFieldType... fieldTypes) throws IOException { - return createAggregator(aggregationBuilder, indexSearcher, indexSettings, + return createAggregator(query, aggregationBuilder, indexSearcher, indexSettings, new MultiBucketConsumer(DEFAULT_MAX_BUCKETS), fieldTypes); } - protected A createAggregator(AggregationBuilder aggregationBuilder, + protected A createAggregator(Query query, AggregationBuilder aggregationBuilder, IndexSearcher indexSearcher, MultiBucketConsumer bucketConsumer, MappedFieldType... fieldTypes) throws IOException { - return createAggregator(aggregationBuilder, indexSearcher, createIndexSettings(), bucketConsumer, fieldTypes); + return createAggregator(query, aggregationBuilder, indexSearcher, createIndexSettings(), bucketConsumer, fieldTypes); } protected A createAggregator(AggregationBuilder aggregationBuilder, @@ -166,8 +178,17 @@ protected A createAggregator(AggregationBuilder aggregati IndexSettings indexSettings, MultiBucketConsumer bucketConsumer, MappedFieldType... fieldTypes) throws IOException { + return createAggregator(null, aggregationBuilder, indexSearcher, indexSettings, bucketConsumer, fieldTypes); + } + + protected A createAggregator(Query query, + AggregationBuilder aggregationBuilder, + IndexSearcher indexSearcher, + IndexSettings indexSettings, + MultiBucketConsumer bucketConsumer, + MappedFieldType... fieldTypes) throws IOException { @SuppressWarnings("unchecked") - A aggregator = (A) createAggregatorFactory(aggregationBuilder, indexSearcher, indexSettings, bucketConsumer, fieldTypes) + A aggregator = (A) createAggregatorFactory(query, aggregationBuilder, indexSearcher, indexSettings, bucketConsumer, fieldTypes) .create(null, true); return aggregator; } @@ -262,7 +283,7 @@ protected A search(IndexSe int maxBucket, MappedFieldType... fieldTypes) throws IOException { MultiBucketConsumer bucketConsumer = new MultiBucketConsumer(maxBucket); - C a = createAggregator(builder, searcher, bucketConsumer, fieldTypes); + C a = createAggregator(query, builder, searcher, bucketConsumer, fieldTypes); a.preCollection(); searcher.search(query, a); a.postCollection(); @@ -310,11 +331,11 @@ protected A searchAndReduc Query rewritten = searcher.rewrite(query); Weight weight = searcher.createWeight(rewritten, true, 1f); MultiBucketConsumer bucketConsumer = new MultiBucketConsumer(maxBucket); - C root = createAggregator(builder, searcher, bucketConsumer, fieldTypes); + C root = createAggregator(query, builder, searcher, bucketConsumer, fieldTypes); for (ShardSearcher subSearcher : subSearchers) { MultiBucketConsumer shardBucketConsumer = new MultiBucketConsumer(maxBucket); - C a = createAggregator(builder, subSearcher, shardBucketConsumer, fieldTypes); + C a = createAggregator(query, builder, subSearcher, shardBucketConsumer, fieldTypes); a.preCollection(); subSearcher.search(weight, a); a.postCollection(); From 77a7e2480b6745855d0bbfc6020ce70378b6267e Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Mon, 15 Jan 2018 18:34:10 +0100 Subject: [PATCH 18/30] Allow update of `eager_global_ordinals` on `_parent`. (#28014) A bug introduced in #24407 currently prevents `eager_global_ordinals` from being updated. This new approach should fix the issue while still allowing mapping updates to not specify the `_parent` field if it doesn't need updating, which was the goal of #24407. --- .../index/mapper/ParentFieldMapper.java | 11 +++++----- .../index/mapper/ParentFieldMapperTests.java | 20 +++++++++++++++++++ 2 files changed, 26 insertions(+), 5 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ParentFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ParentFieldMapper.java index 73109a3ecd8f9..34eaf569ca949 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ParentFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ParentFieldMapper.java @@ -303,15 +303,16 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { ParentFieldMapper fieldMergeWith = (ParentFieldMapper) mergeWith; - ParentFieldType currentFieldType = (ParentFieldType) fieldType.clone(); - super.doMerge(mergeWith, updateAllTypes); if (fieldMergeWith.parentType != null && Objects.equals(parentType, fieldMergeWith.parentType) == false) { throw new IllegalArgumentException("The _parent field's type option can't be changed: [" + parentType + "]->[" + fieldMergeWith.parentType + "]"); } - - if (active()) { - fieldType = currentFieldType; + // If fieldMergeWith is not active it means the user provided a mapping + // update that does not explicitly configure the _parent field, so we + // ignore it. + if (fieldMergeWith.active()) { + super.doMerge(mergeWith, updateAllTypes); } + } /** diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java index d0e17b808c596..d21827ee18cea 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java @@ -41,6 +41,7 @@ import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.InternalSettingsPlugin; +import java.io.IOException; import java.util.Collection; import java.util.Collections; import java.util.HashSet; @@ -138,4 +139,23 @@ private static int getNumberOfFieldWithParentPrefix(ParseContext.Document doc) { return numFieldWithParentPrefix; } + public void testUpdateEagerGlobalOrds() throws IOException { + String parentMapping = XContentFactory.jsonBuilder().startObject().startObject("parent_type") + .endObject().endObject().string(); + String childMapping = XContentFactory.jsonBuilder().startObject().startObject("child_type") + .startObject("_parent").field("type", "parent_type").endObject() + .endObject().endObject().string(); + IndexService indexService = createIndex("test", Settings.builder().put("index.version.created", Version.V_5_6_0).build()); + indexService.mapperService().merge("parent_type", new CompressedXContent(parentMapping), MergeReason.MAPPING_UPDATE, false); + indexService.mapperService().merge("child_type", new CompressedXContent(childMapping), MergeReason.MAPPING_UPDATE, false); + + assertTrue(indexService.mapperService().documentMapper("child_type").parentFieldMapper().fieldType().eagerGlobalOrdinals()); + + String childMappingUpdate = XContentFactory.jsonBuilder().startObject().startObject("child_type") + .startObject("_parent").field("type", "parent_type").field("eager_global_ordinals", false).endObject() + .endObject().endObject().string(); + indexService.mapperService().merge("child_type", new CompressedXContent(childMappingUpdate), MergeReason.MAPPING_UPDATE, false); + + assertFalse(indexService.mapperService().documentMapper("child_type").parentFieldMapper().fieldType().eagerGlobalOrdinals()); + } } From 05e851f0b0fb6e28a8c4e0191ef6f54626d16465 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Mon, 15 Jan 2018 18:34:38 +0100 Subject: [PATCH 19/30] Ignore the `-snapshot` suffix when comparing the Lucene version in the build and the docs. (#27927) Currently if the Lucene version is `X.Y.Z-snapshot-{gitrev}`, then we will expect the docs to have `X.Y.Z-snapshot` as a Lucene version. I would like to change it to `X.Y.Z` so that this doesn't need changing when we move from a snapshot to a final release. --- qa/verify-version-constants/build.gradle | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/qa/verify-version-constants/build.gradle b/qa/verify-version-constants/build.gradle index 111c4ccf20e50..1d31db6898b7b 100644 --- a/qa/verify-version-constants/build.gradle +++ b/qa/verify-version-constants/build.gradle @@ -76,10 +76,8 @@ task verifyDocsLuceneVersion { throw new GradleException('Could not find lucene version in docs version file') } String expectedLuceneVersion = VersionProperties.lucene - if (expectedLuceneVersion.contains('-snapshot-')) { - expectedLuceneVersion = expectedLuceneVersion.substring(0, expectedLuceneVersion.lastIndexOf('-')) - expectedLuceneVersion = expectedLuceneVersion.toUpperCase(Locale.ROOT) - } + // remove potential -snapshot-{gitrev} suffix + expectedLuceneVersion -= ~/-snapshot-[0-9a-f]+$/ if (docsLuceneVersion != expectedLuceneVersion) { throw new GradleException("Lucene version in docs [${docsLuceneVersion}] does not match version.properties [${expectedLuceneVersion}]") } From a16f80a8321776aea6a105315ffc503014e45732 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Mon, 15 Jan 2018 18:35:27 +0100 Subject: [PATCH 20/30] Fix casts in HotThreads. (#27578) Even though an overflow would be very unlikely, it's better to use the longs directly in the comparator. --- .../elasticsearch/monitor/jvm/HotThreads.java | 36 ++++++++----------- 1 file changed, 15 insertions(+), 21 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java b/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java index 1714d00abb206..3b6415437f97c 100644 --- a/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java +++ b/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java @@ -35,6 +35,7 @@ import java.util.Locale; import java.util.Map; import java.util.concurrent.TimeUnit; +import java.util.function.ToLongFunction; public class HotThreads { @@ -187,19 +188,19 @@ private String innerDetect() throws Exception { List hotties = new ArrayList<>(threadInfos.values()); final int busiestThreads = Math.min(this.busiestThreads, hotties.size()); // skip that for now - CollectionUtil.introSort(hotties, new Comparator() { - @Override - public int compare(MyThreadInfo o1, MyThreadInfo o2) { - if ("cpu".equals(type)) { - return (int) (o2.cpuTime - o1.cpuTime); - } else if ("wait".equals(type)) { - return (int) (o2.waitedTime - o1.waitedTime); - } else if ("block".equals(type)) { - return (int) (o2.blockedTime - o1.blockedTime); - } - throw new IllegalArgumentException("expected thread type to be either 'cpu', 'wait', or 'block', but was " + type); - } - }); + final ToLongFunction getter; + if ("cpu".equals(type)) { + getter = o -> o.cpuTime; + } else if ("wait".equals(type)) { + getter = o -> o.waitedTime; + } else if ("block".equals(type)) { + getter = o -> o.blockedTime; + } else { + throw new IllegalArgumentException("expected thread type to be either 'cpu', 'wait', or 'block', but was " + type); + } + + CollectionUtil.introSort(hotties, Comparator.comparingLong(getter).reversed()); + // analyse N stack traces for M busiest threads long[] ids = new long[busiestThreads]; for (int i = 0; i < busiestThreads; i++) { @@ -215,14 +216,7 @@ public int compare(MyThreadInfo o1, MyThreadInfo o2) { Thread.sleep(threadElementsSnapshotDelay.millis()); } for (int t = 0; t < busiestThreads; t++) { - long time = 0; - if ("cpu".equals(type)) { - time = hotties.get(t).cpuTime; - } else if ("wait".equals(type)) { - time = hotties.get(t).waitedTime; - } else if ("block".equals(type)) { - time = hotties.get(t).blockedTime; - } + long time = getter.applyAsLong(hotties.get(t)); String threadName = null; for (ThreadInfo[] info : allInfos) { if (info != null && info[t] != null) { From 0a92e43f6252911a0a0fab44af6e0075f57d7bad Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Mon, 15 Jan 2018 18:36:32 +0100 Subject: [PATCH 21/30] Avoid doing redundant work when checking for self references. (#26927) Currently we test all maps, arrays or iterables. However, in the case that maps contain sub maps for instance, we will test the sub maps again even though the work has already been done for the top-level map. Relates #26907 --- .../common/xcontent/XContentBuilder.java | 49 ++++++++++--------- .../common/xcontent/BaseXContentTestCase.java | 1 - 2 files changed, 25 insertions(+), 25 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java index f0427ce246669..070510e13ff69 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java @@ -773,32 +773,23 @@ public XContentBuilder field(String name, Object value) throws IOException { } public XContentBuilder array(String name, Object... values) throws IOException { - return field(name).values(values); + return field(name).values(values, true); } - XContentBuilder values(Object[] values) throws IOException { + private XContentBuilder values(Object[] values, boolean ensureNoSelfReferences) throws IOException { if (values == null) { return nullValue(); } - // checks that the array of object does not contain references to itself because - // iterating over entries will cause a stackoverflow error - ensureNoSelfReferences(values); - - startArray(); - for (Object o : values) { - value(o); - } - endArray(); - return this; + return value(Arrays.asList(values), ensureNoSelfReferences); } public XContentBuilder value(Object value) throws IOException { - unknownValue(value); + unknownValue(value, true); return this; } - private void unknownValue(Object value) throws IOException { + private void unknownValue(Object value, boolean ensureNoSelfReferences) throws IOException { if (value == null) { nullValue(); return; @@ -810,11 +801,11 @@ private void unknownValue(Object value) throws IOException { //Path implements Iterable and causes endless recursion and a StackOverFlow if treated as an Iterable here value((Path) value); } else if (value instanceof Map) { - map((Map) value); + map((Map) value, ensureNoSelfReferences); } else if (value instanceof Iterable) { - value((Iterable) value); + value((Iterable) value, ensureNoSelfReferences); } else if (value instanceof Object[]) { - values((Object[]) value); + values((Object[]) value, ensureNoSelfReferences); } else if (value instanceof Calendar) { value((Calendar) value); } else if (value instanceof ReadableInstant) { @@ -863,18 +854,25 @@ public XContentBuilder field(String name, Map values) throws IOE } public XContentBuilder map(Map values) throws IOException { + return map(values, true); + } + + private XContentBuilder map(Map values, boolean ensureNoSelfReferences) throws IOException { if (values == null) { return nullValue(); } // checks that the map does not contain references to itself because // iterating over map entries will cause a stackoverflow error - ensureNoSelfReferences(values); + if (ensureNoSelfReferences) { + ensureNoSelfReferences(values); + } startObject(); for (Map.Entry value : values.entrySet()) { field(value.getKey()); - unknownValue(value.getValue()); + // pass ensureNoSelfReferences=false as we already performed the check at a higher level + unknownValue(value.getValue(), false); } endObject(); return this; @@ -884,7 +882,7 @@ public XContentBuilder field(String name, Iterable values) throws IOException return field(name).value(values); } - private XContentBuilder value(Iterable values) throws IOException { + private XContentBuilder value(Iterable values, boolean ensureNoSelfReferences) throws IOException { if (values == null) { return nullValue(); } @@ -895,11 +893,14 @@ private XContentBuilder value(Iterable values) throws IOException { } else { // checks that the iterable does not contain references to itself because // iterating over entries will cause a stackoverflow error - ensureNoSelfReferences(values); + if (ensureNoSelfReferences) { + ensureNoSelfReferences(values); + } startArray(); for (Object value : values) { - unknownValue(value); + // pass ensureNoSelfReferences=false as we already performed the check at a higher level + unknownValue(value, false); } endArray(); } @@ -1076,9 +1077,9 @@ private static void ensureNoSelfReferences(final Object value, final Set Iterable it; if (value instanceof Map) { - it = ((Map) value).values(); + it = ((Map) value).values(); } else if ((value instanceof Iterable) && (value instanceof Path == false)) { - it = (Iterable) value; + it = (Iterable) value; } else if (value instanceof Object[]) { it = Arrays.asList((Object[]) value); } else { diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java b/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java index e468751cf4aba..e368163a4e95c 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java +++ b/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java @@ -534,7 +534,6 @@ public void testObjects() throws Exception { final String expected = o.getKey(); assertResult(expected, () -> builder().startObject().field("objects", o.getValue()).endObject()); assertResult(expected, () -> builder().startObject().field("objects").value(o.getValue()).endObject()); - assertResult(expected, () -> builder().startObject().field("objects").values(o.getValue()).endObject()); assertResult(expected, () -> builder().startObject().array("objects", o.getValue()).endObject()); } } From b82017cbfeb8eff0af7bf2c6f8e93db91d172e73 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Mon, 15 Jan 2018 19:35:54 +0100 Subject: [PATCH 22/30] Fix daitch_mokotoff phonetic filter to use the dedicated Lucene filter (#28225) This commit changes the phonetic filter factory to use a DaitchMokotoffSoundexFilter instead of a PhoneticFilter with a daitch_mokotoff encoder when daitch_mokotoff is selected. The latter does not hanlde branching when computing the soundex and fails to encode multiple variations when possible. Closes #28211 --- .../index/analysis/PhoneticTokenFilterFactory.java | 9 ++++++++- .../index/analysis/SimplePhoneticAnalysisTests.java | 11 +++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/PhoneticTokenFilterFactory.java b/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/PhoneticTokenFilterFactory.java index d02ac2ae2be70..b63ad561a5add 100644 --- a/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/PhoneticTokenFilterFactory.java +++ b/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/PhoneticTokenFilterFactory.java @@ -33,6 +33,7 @@ import org.apache.commons.codec.language.bm.RuleType; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.phonetic.BeiderMorseFilter; +import org.apache.lucene.analysis.phonetic.DaitchMokotoffSoundexFilter; import org.apache.lucene.analysis.phonetic.DoubleMetaphoneFilter; import org.apache.lucene.analysis.phonetic.PhoneticFilter; import org.elasticsearch.common.settings.Settings; @@ -53,6 +54,7 @@ public class PhoneticTokenFilterFactory extends AbstractTokenFilterFactory { private List languageset; private NameType nametype; private RuleType ruletype; + private boolean isDaitchMokotoff; public PhoneticTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); @@ -60,6 +62,7 @@ public PhoneticTokenFilterFactory(IndexSettings indexSettings, Environment envir this.nametype = null; this.ruletype = null; this.maxcodelength = 0; + this.isDaitchMokotoff = false; this.replace = settings.getAsBoolean("replace", true); // weird, encoder is null at last step in SimplePhoneticAnalysisTests, so we set it to metaphone as default String encodername = settings.get("encoder", "metaphone"); @@ -106,7 +109,8 @@ public PhoneticTokenFilterFactory(IndexSettings indexSettings, Environment envir } else if ("nysiis".equalsIgnoreCase(encodername)) { this.encoder = new Nysiis(); } else if ("daitch_mokotoff".equalsIgnoreCase(encodername)) { - this.encoder = new DaitchMokotoffSoundex(); + this.encoder = null; + this.isDaitchMokotoff = true; } else { throw new IllegalArgumentException("unknown encoder [" + encodername + "] for phonetic token filter"); } @@ -115,6 +119,9 @@ public PhoneticTokenFilterFactory(IndexSettings indexSettings, Environment envir @Override public TokenStream create(TokenStream tokenStream) { if (encoder == null) { + if (isDaitchMokotoff) { + return new DaitchMokotoffSoundexFilter(tokenStream, !replace); + } if (ruletype != null && nametype != null) { LanguageSet langset = null; if (languageset != null && languageset.size() > 0) { diff --git a/plugins/analysis-phonetic/src/test/java/org/elasticsearch/index/analysis/SimplePhoneticAnalysisTests.java b/plugins/analysis-phonetic/src/test/java/org/elasticsearch/index/analysis/SimplePhoneticAnalysisTests.java index e3877faee3146..7fad525b33c3e 100644 --- a/plugins/analysis-phonetic/src/test/java/org/elasticsearch/index/analysis/SimplePhoneticAnalysisTests.java +++ b/plugins/analysis-phonetic/src/test/java/org/elasticsearch/index/analysis/SimplePhoneticAnalysisTests.java @@ -22,6 +22,7 @@ import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.WhitespaceTokenizer; +import org.apache.lucene.analysis.phonetic.DaitchMokotoffSoundexFilter; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; @@ -72,4 +73,14 @@ public void testPhoneticTokenFilterBeiderMorseWithLanguage() throws IOException "rmba", "rmbalt", "rmbo", "rmbolt", "rmbu", "rmbult" }; BaseTokenStreamTestCase.assertTokenStreamContents(filterFactory.create(tokenizer), expected); } + + public void testPhoneticTokenFilterDaitchMotokoff() throws IOException { + TokenFilterFactory filterFactory = analysis.tokenFilter.get("daitch_mokotoff"); + Tokenizer tokenizer = new WhitespaceTokenizer(); + tokenizer.setReader(new StringReader("chauptman")); + String[] expected = new String[] { "473660", "573660" }; + assertThat(filterFactory.create(tokenizer), instanceOf(DaitchMokotoffSoundexFilter.class)); + BaseTokenStreamTestCase.assertTokenStreamContents(filterFactory.create(tokenizer), expected); + } + } From 18463e7e9f5243648eae1f4ee99a15c4c1b52b36 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Mon, 15 Jan 2018 11:28:31 -0800 Subject: [PATCH 23/30] Painless: Add whitelist extensions (#28161) This commit adds a PainlessExtension which may be plugged in via SPI to add additional classes, methods and members to the painless whitelist on a per context basis. An example plugin adding and using a whitelist is also added. --- .../org/elasticsearch/painless/Compiler.java | 1 + .../elasticsearch/painless/Definition.java | 24 +------- .../painless/PainlessPlugin.java | 30 +++++++++- .../painless/PainlessScriptEngine.java | 19 +++---- .../painless/spi/PainlessExtension.java | 30 ++++++++++ .../painless/{ => spi}/Whitelist.java | 22 ++++++- .../painless/{ => spi}/WhitelistLoader.java | 7 ++- .../plugin-metadata/plugin-security.policy | 3 + .../painless/{ => spi}/java.lang.txt | 0 .../painless/{ => spi}/java.math.txt | 0 .../painless/{ => spi}/java.text.txt | 0 .../painless/{ => spi}/java.time.chrono.txt | 0 .../painless/{ => spi}/java.time.format.txt | 0 .../painless/{ => spi}/java.time.temporal.txt | 0 .../painless/{ => spi}/java.time.txt | 0 .../painless/{ => spi}/java.time.zone.txt | 0 .../painless/{ => spi}/java.util.function.txt | 0 .../painless/{ => spi}/java.util.regex.txt | 0 .../painless/{ => spi}/java.util.stream.txt | 0 .../painless/{ => spi}/java.util.txt | 0 .../painless/{ => spi}/joda.time.txt | 0 .../painless/{ => spi}/org.elasticsearch.txt | 0 .../painless/AnalyzerCasterTests.java | 8 +-- .../painless/BaseClassTests.java | 8 +-- .../elasticsearch/painless/DebugTests.java | 5 +- .../org/elasticsearch/painless/Debugger.java | 5 +- .../painless/DefBootstrapTests.java | 4 +- .../elasticsearch/painless/FactoryTests.java | 16 +++--- .../painless/NeedsScoreTests.java | 13 +++-- .../painless/PainlessDocGenerator.java | 6 +- .../painless/ScriptTestCase.java | 17 +++--- .../painless/SimilarityScriptTests.java | 13 +++-- .../painless/node/NodeToStringTests.java | 6 +- .../examples/painless-whitelist/build.gradle | 4 ++ .../ExampleWhitelistExtension.java | 42 ++++++++++++++ .../ExampleWhitelistedClass.java | 57 +++++++++++++++++++ .../painlesswhitelist/MyWhitelistPlugin.java | 1 + ...asticsearch.painless.spi.PainlessExtension | 1 + .../painlesswhitelist/example_whitelist.txt | 42 ++++++++++++++ .../test/painless_whitelist/20_whitelist.yml | 26 +++++++++ 40 files changed, 319 insertions(+), 91 deletions(-) create mode 100644 modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/PainlessExtension.java rename modules/lang-painless/src/main/java/org/elasticsearch/painless/{ => spi}/Whitelist.java (93%) rename modules/lang-painless/src/main/java/org/elasticsearch/painless/{ => spi}/WhitelistLoader.java (98%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/java.lang.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/java.math.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/java.text.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/java.time.chrono.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/java.time.format.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/java.time.temporal.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/java.time.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/java.time.zone.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/java.util.function.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/java.util.regex.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/java.util.stream.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/java.util.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/joda.time.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/org.elasticsearch.txt (100%) create mode 100644 plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/ExampleWhitelistExtension.java create mode 100644 plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/ExampleWhitelistedClass.java create mode 100644 plugins/examples/painless-whitelist/src/main/resources/META-INF/services/org.elasticsearch.painless.spi.PainlessExtension create mode 100644 plugins/examples/painless-whitelist/src/main/resources/org/elasticsearch/example/painlesswhitelist/example_whitelist.txt create mode 100644 plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/20_whitelist.yml diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java index ad5e80ba16edd..8102016828c30 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java @@ -22,6 +22,7 @@ import org.elasticsearch.bootstrap.BootstrapInfo; import org.elasticsearch.painless.antlr.Walker; import org.elasticsearch.painless.node.SSource; +import org.elasticsearch.painless.spi.Whitelist; import org.objectweb.asm.util.Printer; import java.lang.reflect.Constructor; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java index 7d8b4ff4e614e..7729c5319ea81 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java @@ -20,6 +20,7 @@ package org.elasticsearch.painless; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.painless.spi.Whitelist; import java.lang.invoke.MethodHandle; import java.lang.invoke.MethodHandles; @@ -46,29 +47,6 @@ public final class Definition { private static final Pattern TYPE_NAME_PATTERN = Pattern.compile("^[_a-zA-Z][._a-zA-Z0-9]*$"); - public static final String[] DEFINITION_FILES = new String[] { - "org.elasticsearch.txt", - "java.lang.txt", - "java.math.txt", - "java.text.txt", - "java.time.txt", - "java.time.chrono.txt", - "java.time.format.txt", - "java.time.temporal.txt", - "java.time.zone.txt", - "java.util.txt", - "java.util.function.txt", - "java.util.regex.txt", - "java.util.stream.txt", - "joda.time.txt" - }; - - /** - * Whitelist that is "built in" to Painless and required by all scripts. - */ - public static final Definition DEFINITION = new Definition( - Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Definition.class, DEFINITION_FILES))); - /** Some native types as constants: */ public final Type voidType; public final Type booleanType; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java index 842af8717a34b..795d81bb6e058 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java @@ -22,28 +22,56 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.painless.spi.PainlessExtension; +import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.plugins.ExtensiblePlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.ScriptPlugin; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptEngine; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.HashMap; import java.util.List; +import java.util.Map; +import java.util.ServiceLoader; /** * Registers Painless as a plugin. */ public final class PainlessPlugin extends Plugin implements ScriptPlugin, ExtensiblePlugin { + private final Map, List> extendedWhitelists = new HashMap<>(); + @Override public ScriptEngine getScriptEngine(Settings settings, Collection> contexts) { - return new PainlessScriptEngine(settings, contexts); + Map, List> contextsWithWhitelists = new HashMap<>(); + for (ScriptContext context : contexts) { + // we might have a context that only uses the base whitelists, so would not have been filled in by reloadSPI + List whitelists = extendedWhitelists.get(context); + if (whitelists == null) { + whitelists = new ArrayList<>(Whitelist.BASE_WHITELISTS); + } + contextsWithWhitelists.put(context, whitelists); + } + return new PainlessScriptEngine(settings, contextsWithWhitelists); } @Override public List> getSettings() { return Arrays.asList(CompilerSettings.REGEX_ENABLED); } + + @Override + public void reloadSPI(ClassLoader loader) { + for (PainlessExtension extension : ServiceLoader.load(PainlessExtension.class, loader)) { + for (Map.Entry, List> entry : extension.getContextWhitelists().entrySet()) { + List existing = extendedWhitelists.computeIfAbsent(entry.getKey(), + c -> new ArrayList<>(Whitelist.BASE_WHITELISTS)); + existing.addAll(entry.getValue()); + } + } + } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java index ac01f45a7fdd6..95a38bf22c653 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java @@ -19,12 +19,12 @@ package org.elasticsearch.painless; -import org.apache.logging.log4j.core.tools.Generate; import org.apache.lucene.index.LeafReaderContext; import org.elasticsearch.SpecialPermission; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.painless.Compiler.Loader; +import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptEngine; @@ -45,7 +45,6 @@ import java.security.ProtectionDomain; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -82,7 +81,7 @@ public final class PainlessScriptEngine extends AbstractComponent implements Scr /** * Default compiler settings to be used. Note that {@link CompilerSettings} is mutable but this instance shouldn't be mutated outside - * of {@link PainlessScriptEngine#PainlessScriptEngine(Settings, Collection)}. + * of {@link PainlessScriptEngine#PainlessScriptEngine(Settings, Map)}. */ private final CompilerSettings defaultCompilerSettings = new CompilerSettings(); @@ -92,23 +91,19 @@ public final class PainlessScriptEngine extends AbstractComponent implements Scr * Constructor. * @param settings The settings to initialize the engine with. */ - public PainlessScriptEngine(Settings settings, Collection> contexts) { + public PainlessScriptEngine(Settings settings, Map, List> contexts) { super(settings); defaultCompilerSettings.setRegexesEnabled(CompilerSettings.REGEX_ENABLED.get(settings)); Map, Compiler> contextsToCompilers = new HashMap<>(); - // Placeholder definition used for all contexts until SPI is fully integrated. Reduces memory foot print - // by re-using the same definition since caching isn't implemented at this time. - Definition definition = new Definition( - Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Definition.class, Definition.DEFINITION_FILES))); - - for (ScriptContext context : contexts) { + for (Map.Entry, List> entry : contexts.entrySet()) { + ScriptContext context = entry.getKey(); if (context.instanceClazz.equals(SearchScript.class) || context.instanceClazz.equals(ExecutableScript.class)) { - contextsToCompilers.put(context, new Compiler(GenericElasticsearchScript.class, definition)); + contextsToCompilers.put(context, new Compiler(GenericElasticsearchScript.class, new Definition(entry.getValue()))); } else { - contextsToCompilers.put(context, new Compiler(context.instanceClazz, definition)); + contextsToCompilers.put(context, new Compiler(context.instanceClazz, new Definition(entry.getValue()))); } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/PainlessExtension.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/PainlessExtension.java new file mode 100644 index 0000000000000..9434e6986c0a3 --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/PainlessExtension.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless.spi; + +import java.util.List; +import java.util.Map; + +import org.elasticsearch.script.ScriptContext; + +public interface PainlessExtension { + + Map, List> getContextWhitelists(); +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Whitelist.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/Whitelist.java similarity index 93% rename from modules/lang-painless/src/main/java/org/elasticsearch/painless/Whitelist.java rename to modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/Whitelist.java index 678b8a4c1ae38..e715eb0090c7f 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Whitelist.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/Whitelist.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.painless; +package org.elasticsearch.painless.spi; import java.util.Collections; import java.util.List; @@ -34,6 +34,26 @@ */ public final class Whitelist { + private static final String[] BASE_WHITELIST_FILES = new String[] { + "org.elasticsearch.txt", + "java.lang.txt", + "java.math.txt", + "java.text.txt", + "java.time.txt", + "java.time.chrono.txt", + "java.time.format.txt", + "java.time.temporal.txt", + "java.time.zone.txt", + "java.util.txt", + "java.util.function.txt", + "java.util.regex.txt", + "java.util.stream.txt", + "joda.time.txt" + }; + + public static final List BASE_WHITELISTS = + Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Whitelist.class, BASE_WHITELIST_FILES)); + /** * Struct represents the equivalent of a Java class in Painless complete with super classes, * constructors, methods, and fields. In Painless a class is known as a struct primarily to avoid diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/WhitelistLoader.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java similarity index 98% rename from modules/lang-painless/src/main/java/org/elasticsearch/painless/WhitelistLoader.java rename to modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java index 93ea951f453aa..8817bfa274c60 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/WhitelistLoader.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.painless; +package org.elasticsearch.painless.spi; import java.io.InputStreamReader; import java.io.LineNumberReader; @@ -25,6 +25,8 @@ import java.lang.reflect.Field; import java.lang.reflect.Method; import java.nio.charset.StandardCharsets; +import java.security.AccessController; +import java.security.PrivilegedAction; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -296,8 +298,9 @@ public static Whitelist loadFromResourceFiles(Class resource, String... filep throw new RuntimeException("error in [" + filepath + "] at line [" + number + "]", exception); } } + ClassLoader loader = AccessController.doPrivileged((PrivilegedAction)resource::getClassLoader); - return new Whitelist(resource.getClassLoader(), whitelistStructs); + return new Whitelist(loader, whitelistStructs); } private WhitelistLoader() {} diff --git a/modules/lang-painless/src/main/plugin-metadata/plugin-security.policy b/modules/lang-painless/src/main/plugin-metadata/plugin-security.policy index e45c1b86ceb2c..b383c6da3f12c 100644 --- a/modules/lang-painless/src/main/plugin-metadata/plugin-security.policy +++ b/modules/lang-painless/src/main/plugin-metadata/plugin-security.policy @@ -20,4 +20,7 @@ grant { // needed to generate runtime classes permission java.lang.RuntimePermission "createClassLoader"; + + // needed to find the classloader to load whitelisted classes from + permission java.lang.RuntimePermission "getClassLoader"; }; diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.lang.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.lang.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.lang.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.lang.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.math.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.math.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.math.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.math.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.text.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.text.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.text.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.text.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.chrono.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.chrono.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.chrono.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.chrono.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.format.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.format.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.format.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.format.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.temporal.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.temporal.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.temporal.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.temporal.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.zone.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.zone.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.zone.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.zone.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.function.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.util.function.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.function.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.util.function.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.regex.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.util.regex.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.regex.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.util.regex.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.stream.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.util.stream.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.stream.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.util.stream.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.util.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.util.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/joda.time.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/joda.time.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/joda.time.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/joda.time.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/AnalyzerCasterTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/AnalyzerCasterTests.java index 58ae31a45c93a..919b0881c0794 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/AnalyzerCasterTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/AnalyzerCasterTests.java @@ -21,16 +21,12 @@ import org.elasticsearch.painless.Definition.Cast; import org.elasticsearch.painless.Definition.Type; +import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.test.ESTestCase; -import java.util.Collections; - -import static org.elasticsearch.painless.Definition.DEFINITION_FILES; - public class AnalyzerCasterTests extends ESTestCase { - private static final Definition definition = new Definition( - Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Definition.class, DEFINITION_FILES))); + private static final Definition definition = new Definition(Whitelist.BASE_WHITELISTS); private static void assertCast(Type actual, Type expected, boolean mustBeExplicit) { Location location = new Location("dummy", 0); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BaseClassTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BaseClassTests.java index 2ba8692b8af59..59cafa96ddcb9 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BaseClassTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BaseClassTests.java @@ -19,13 +19,12 @@ package org.elasticsearch.painless; -import org.elasticsearch.script.ScriptContext; - -import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Map; +import org.elasticsearch.painless.spi.Whitelist; + import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; import static org.hamcrest.Matchers.containsString; @@ -37,8 +36,7 @@ */ public class BaseClassTests extends ScriptTestCase { - private final Definition definition = new Definition( - Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Definition.class, Definition.DEFINITION_FILES))); + private final Definition definition = new Definition(Whitelist.BASE_WHITELISTS); public abstract static class Gets { diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DebugTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DebugTests.java index a55b48f0189b3..279438e74a7c3 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DebugTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DebugTests.java @@ -22,10 +22,10 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.script.ScriptException; import java.io.IOException; -import java.util.Collections; import java.util.Map; import static java.util.Collections.singletonList; @@ -35,8 +35,7 @@ import static org.hamcrest.Matchers.not; public class DebugTests extends ScriptTestCase { - private final Definition definition = new Definition( - Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Definition.class, Definition.DEFINITION_FILES))); + private final Definition definition = new Definition(Whitelist.BASE_WHITELISTS); public void testExplain() { // Debug.explain can explain an object diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/Debugger.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/Debugger.java index 52ec783db4ef4..e29986a3c87de 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/Debugger.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/Debugger.java @@ -20,11 +20,11 @@ package org.elasticsearch.painless; import org.apache.lucene.util.IOUtils; +import org.elasticsearch.painless.spi.Whitelist; import org.objectweb.asm.util.Textifier; import java.io.PrintWriter; import java.io.StringWriter; -import java.util.Collections; /** quick and dirty tools for debugging */ final class Debugger { @@ -40,8 +40,7 @@ static String toString(Class iface, String source, CompilerSettings settings) PrintWriter outputWriter = new PrintWriter(output); Textifier textifier = new Textifier(); try { - new Compiler(iface, new Definition( - Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Definition.class, Definition.DEFINITION_FILES)))) + new Compiler(iface, new Definition(Whitelist.BASE_WHITELISTS)) .compile("", source, settings, textifier); } catch (Exception e) { textifier.print(outputWriter); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefBootstrapTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefBootstrapTests.java index dccc9c0aeb505..8fd96d67d5b53 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefBootstrapTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefBootstrapTests.java @@ -27,11 +27,11 @@ import java.util.Collections; import java.util.HashMap; +import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.test.ESTestCase; public class DefBootstrapTests extends ESTestCase { - private final Definition definition = new Definition( - Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Definition.class, Definition.DEFINITION_FILES))); + private final Definition definition = new Definition(Whitelist.BASE_WHITELISTS); /** calls toString() on integers, twice */ public void testOneType() throws Throwable { diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/FactoryTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/FactoryTests.java index b15a2747bd088..556ef8dd3c6d3 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/FactoryTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/FactoryTests.java @@ -19,21 +19,23 @@ package org.elasticsearch.painless; +import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.TemplateScript; -import java.util.Collection; import java.util.Collections; +import java.util.List; import java.util.Map; public class FactoryTests extends ScriptTestCase { - protected Collection> scriptContexts() { - Collection> contexts = super.scriptContexts(); - contexts.add(StatefulFactoryTestScript.CONTEXT); - contexts.add(FactoryTestScript.CONTEXT); - contexts.add(EmptyTestScript.CONTEXT); - contexts.add(TemplateScript.CONTEXT); + @Override + protected Map, List> scriptContexts() { + Map, List> contexts = super.scriptContexts(); + contexts.put(StatefulFactoryTestScript.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(FactoryTestScript.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(EmptyTestScript.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(TemplateScript.CONTEXT, Whitelist.BASE_WHITELISTS); return contexts; } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/NeedsScoreTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/NeedsScoreTests.java index db254b734a81a..50a377b881878 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/NeedsScoreTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/NeedsScoreTests.java @@ -22,14 +22,17 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.query.QueryShardContext; -import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.script.ExecutableScript; +import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.SearchScript; import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.test.ESSingleNodeTestCase; -import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; /** * Test that needsScores() is reported correctly depending on whether _score is used @@ -40,8 +43,10 @@ public class NeedsScoreTests extends ESSingleNodeTestCase { public void testNeedsScores() { IndexService index = createIndex("test", Settings.EMPTY, "type", "d", "type=double"); - PainlessScriptEngine service = new PainlessScriptEngine(Settings.EMPTY, - Arrays.asList(SearchScript.CONTEXT, ExecutableScript.CONTEXT)); + Map, List> contexts = new HashMap<>(); + contexts.put(SearchScript.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(ExecutableScript.CONTEXT, Whitelist.BASE_WHITELISTS); + PainlessScriptEngine service = new PainlessScriptEngine(Settings.EMPTY, contexts); QueryShardContext shardContext = index.newQueryShardContext(0, null, () -> 0, null); SearchLookup lookup = new SearchLookup(index.mapperService(), shardContext::getForField, null); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java index edd600c5664f2..87b1677102635 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java @@ -27,7 +27,7 @@ import org.elasticsearch.painless.Definition.Method; import org.elasticsearch.painless.Definition.Struct; import org.elasticsearch.painless.Definition.Type; -import org.elasticsearch.painless.api.Augmentation; +import org.elasticsearch.painless.spi.Whitelist; import java.io.IOException; import java.io.PrintStream; @@ -36,7 +36,6 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; -import java.util.Collections; import java.util.Comparator; import java.util.List; import java.util.Map; @@ -68,8 +67,7 @@ public static void main(String[] args) throws IOException { Files.newOutputStream(indexPath, StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE), false, StandardCharsets.UTF_8.name())) { emitGeneratedWarning(indexStream); - List types = new Definition(Collections.singletonList( - WhitelistLoader.loadFromResourceFiles(Definition.class, Definition.DEFINITION_FILES))). + List types = new Definition(Whitelist.BASE_WHITELISTS). allSimpleTypes().stream().sorted(comparing(t -> t.name)).collect(toList()); for (Type type : types) { if (type.clazz.isPrimitive()) { diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java index 730dd298f8a54..ea1d2275b3e8d 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.lucene.ScorerAware; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.painless.antlr.Walker; +import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptException; @@ -31,10 +32,8 @@ import org.elasticsearch.test.ESTestCase; import org.junit.Before; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import static org.elasticsearch.painless.node.SSource.MainMethodReserved; @@ -63,11 +62,10 @@ protected Settings scriptEngineSettings() { /** * Script contexts used to build the script engine. Override to customize which script contexts are available. */ - protected Collection> scriptContexts() { - Collection> contexts = new ArrayList<>(); - contexts.add(SearchScript.CONTEXT); - contexts.add(ExecutableScript.CONTEXT); - + protected Map, List> scriptContexts() { + Map, List> contexts = new HashMap<>(); + contexts.put(SearchScript.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(ExecutableScript.CONTEXT, Whitelist.BASE_WHITELISTS); return contexts; } @@ -92,8 +90,7 @@ public Object exec(String script, Map vars, boolean picky) { public Object exec(String script, Map vars, Map compileParams, Scorer scorer, boolean picky) { // test for ambiguity errors before running the actual script if picky is true if (picky) { - Definition definition = new Definition( - Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Definition.class, Definition.DEFINITION_FILES))); + Definition definition = new Definition(Whitelist.BASE_WHITELISTS); ScriptClassInfo scriptClassInfo = new ScriptClassInfo(definition, GenericElasticsearchScript.class); CompilerSettings pickySettings = new CompilerSettings(); pickySettings.setPicky(true); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/SimilarityScriptTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/SimilarityScriptTests.java index d8f43fb066867..0795ab7777526 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/SimilarityScriptTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/SimilarityScriptTests.java @@ -37,20 +37,25 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; import org.elasticsearch.index.similarity.ScriptedSimilarity; +import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.SimilarityScript; import org.elasticsearch.script.SimilarityWeightScript; import java.io.IOException; -import java.util.Arrays; -import java.util.Collection; import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; public class SimilarityScriptTests extends ScriptTestCase { @Override - protected Collection> scriptContexts() { - return Arrays.asList(SimilarityScript.CONTEXT, SimilarityWeightScript.CONTEXT); + protected Map, List> scriptContexts() { + Map, List> contexts = new HashMap<>(); + contexts.put(SimilarityScript.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(SimilarityWeightScript.CONTEXT, Whitelist.BASE_WHITELISTS); + return contexts; } public void testBasics() throws IOException { diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java index 9e3477b1cfe02..424b0c286ecff 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java @@ -33,12 +33,11 @@ import org.elasticsearch.painless.Location; import org.elasticsearch.painless.Operation; import org.elasticsearch.painless.ScriptClassInfo; -import org.elasticsearch.painless.WhitelistLoader; +import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.painless.antlr.Walker; import org.elasticsearch.test.ESTestCase; import java.util.Arrays; -import java.util.Collections; import java.util.List; import java.util.Map; @@ -50,8 +49,7 @@ * Tests {@link Object#toString} implementations on all extensions of {@link ANode}. */ public class NodeToStringTests extends ESTestCase { - private final Definition definition = new Definition( - Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Definition.class, Definition.DEFINITION_FILES))); + private final Definition definition = new Definition(Whitelist.BASE_WHITELISTS); public void testEAssignment() { assertToString( diff --git a/plugins/examples/painless-whitelist/build.gradle b/plugins/examples/painless-whitelist/build.gradle index 2213aea16f6cd..12bbff8b0419e 100644 --- a/plugins/examples/painless-whitelist/build.gradle +++ b/plugins/examples/painless-whitelist/build.gradle @@ -26,6 +26,10 @@ esplugin { extendedPlugins = ['lang-painless'] } +dependencies { + compileOnly project(':modules:lang-painless') +} + integTestCluster { distribution = 'zip' } diff --git a/plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/ExampleWhitelistExtension.java b/plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/ExampleWhitelistExtension.java new file mode 100644 index 0000000000000..9e3bc66e7d58d --- /dev/null +++ b/plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/ExampleWhitelistExtension.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.example.painlesswhitelist; + +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import org.elasticsearch.painless.spi.PainlessExtension; +import org.elasticsearch.painless.spi.Whitelist; +import org.elasticsearch.painless.spi.WhitelistLoader; +import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.SearchScript; + +/** An extension of painless which adds a whitelist. */ +public class ExampleWhitelistExtension implements PainlessExtension { + + private static final Whitelist WHITELIST = + WhitelistLoader.loadFromResourceFiles(ExampleWhitelistExtension.class, "example_whitelist.txt"); + + @Override + public Map, List> getContextWhitelists() { + return Collections.singletonMap(SearchScript.CONTEXT, Collections.singletonList(WHITELIST)); + } +} diff --git a/plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/ExampleWhitelistedClass.java b/plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/ExampleWhitelistedClass.java new file mode 100644 index 0000000000000..14f15b383d0c8 --- /dev/null +++ b/plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/ExampleWhitelistedClass.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.example.painlesswhitelist; + +/** + * An example of a class to be whitelisted for use by painless scripts + * + * Each of the members and methods below are whitelisted for use in search scripts. + * See example_whitelist.txt. + */ +public class ExampleWhitelistedClass { + + public static final int CONSTANT = 42; + + public int publicMember; + + private int privateMember; + + public ExampleWhitelistedClass(int publicMember, int privateMember) { + this.publicMember = publicMember; + this.privateMember = privateMember; + } + + public int getPrivateMemberAccessor() { + return this.privateMember; + } + + public void setPrivateMemberAccessor(int privateMember) { + this.privateMember = privateMember; + } + + public static void staticMethod() { + // electricity + } + + // example augmentation method + public static int toInt(String x) { + return Integer.parseInt(x); + } +} diff --git a/plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/MyWhitelistPlugin.java b/plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/MyWhitelistPlugin.java index 877a05391ac77..a4ef5f6f000e1 100644 --- a/plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/MyWhitelistPlugin.java +++ b/plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/MyWhitelistPlugin.java @@ -22,4 +22,5 @@ import org.elasticsearch.plugins.Plugin; public class MyWhitelistPlugin extends Plugin { + // we don't actually need anything here, since whitelists are extended through SPI } diff --git a/plugins/examples/painless-whitelist/src/main/resources/META-INF/services/org.elasticsearch.painless.spi.PainlessExtension b/plugins/examples/painless-whitelist/src/main/resources/META-INF/services/org.elasticsearch.painless.spi.PainlessExtension new file mode 100644 index 0000000000000..9babd702c8083 --- /dev/null +++ b/plugins/examples/painless-whitelist/src/main/resources/META-INF/services/org.elasticsearch.painless.spi.PainlessExtension @@ -0,0 +1 @@ +org.elasticsearch.example.painlesswhitelist.ExampleWhitelistExtension \ No newline at end of file diff --git a/plugins/examples/painless-whitelist/src/main/resources/org/elasticsearch/example/painlesswhitelist/example_whitelist.txt b/plugins/examples/painless-whitelist/src/main/resources/org/elasticsearch/example/painlesswhitelist/example_whitelist.txt new file mode 100644 index 0000000000000..7908d35417511 --- /dev/null +++ b/plugins/examples/painless-whitelist/src/main/resources/org/elasticsearch/example/painlesswhitelist/example_whitelist.txt @@ -0,0 +1,42 @@ +# +# Licensed to Elasticsearch under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# This file contains a whitelist for an example class which may be access from painless + +class org.elasticsearch.example.painlesswhitelist.ExampleWhitelistedClass { + # constructor + (int, int) + + # static constants and methods look the same as instance members and methods + int CONSTANT + void staticMethod() + + # members lack parenthesis that methods have + int publicMember + + # getter and setter for private member + int getPrivateMemberAccessor() + void setPrivateMemberAccessor(int) +} + +class java.lang.String { + # existing classes can be "augmented" to have additional methods, which take the object + # to operate on as the first argument to a static method + int org.elasticsearch.example.painlesswhitelist.ExampleWhitelistedClass toInt() +} \ No newline at end of file diff --git a/plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/20_whitelist.yml b/plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/20_whitelist.yml new file mode 100644 index 0000000000000..bbb0b44ef1d45 --- /dev/null +++ b/plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/20_whitelist.yml @@ -0,0 +1,26 @@ +# Example test using whitelisted members and methods + +"Whitelisted custom class": + - do: + index: + index: test + type: test + id: 1 + body: { "num1": 1.0 } + - do: + indices.refresh: {} + + - do: + index: test + search: + body: + query: + match_all: {} + script_fields: + sNum1: + script: + source: "def e = new ExampleWhitelistedClass(6, 42); ExampleWhitelistedClass.staticMethod(); return e.publicMember + e.privateMemberAccessor + ExampleWhitelistedClass.CONSTANT + '2'.toInt()" + lang: painless + + - match: { hits.total: 1 } + - match: { hits.hits.0.fields.sNum1.0: 92 } From 5ed25f1e12b4b3e249da107745a99941773b389d Mon Sep 17 00:00:00 2001 From: Nicholas Knize Date: Wed, 6 Dec 2017 11:58:20 -0600 Subject: [PATCH 24/30] [GEO] Add WKT Support to GeoBoundingBoxQueryBuilder Add WKT BBOX parsing support to GeoBoundingBoxQueryBuilder. --- .../query-dsl/geo-bounding-box-query.asciidoc | 25 ++++ .../common/geo/parsers/GeoWKTParser.java | 21 ++- .../query/GeoBoundingBoxQueryBuilder.java | 135 +++++++++++------- .../common/geo/GeoWKTShapeParserTests.java | 12 ++ .../GeoBoundingBoxQueryBuilderTests.java | 44 ++++++ 5 files changed, 181 insertions(+), 56 deletions(-) diff --git a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc index e8db949bbc6b8..a1b427acf2718 100644 --- a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc +++ b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc @@ -180,6 +180,31 @@ GET /_search -------------------------------------------------- // CONSOLE +[float] +===== Bounding Box as Well-Known Text (WKT) + +[source,js] +-------------------------------------------------- +GET /_search +{ + "query": { + "bool" : { + "must" : { + "match_all" : {} + }, + "filter" : { + "geo_bounding_box" : { + "pin.location" : { + "wkt" : "BBOX (-74.1, -71.12, 40.73, 40.01)" + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + [float] ===== Geohash diff --git a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java index 005caed53a7e9..38643df017943 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java +++ b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java @@ -63,6 +63,12 @@ private GeoWKTParser() {} public static ShapeBuilder parse(XContentParser parser) throws IOException, ElasticsearchParseException { + return parseExpectedType(parser, null); + } + + /** throws an exception if the parsed geometry type does not match the expected shape type */ + public static ShapeBuilder parseExpectedType(XContentParser parser, final GeoShapeType shapeType) + throws IOException, ElasticsearchParseException { FastStringReader reader = new FastStringReader(parser.text()); try { // setup the tokenizer; configured to read words w/o numbers @@ -77,7 +83,7 @@ public static ShapeBuilder parse(XContentParser parser) tokenizer.wordChars('.', '.'); tokenizer.whitespaceChars(0, ' '); tokenizer.commentChar('#'); - ShapeBuilder builder = parseGeometry(tokenizer); + ShapeBuilder builder = parseGeometry(tokenizer, shapeType); checkEOF(tokenizer); return builder; } finally { @@ -86,8 +92,14 @@ public static ShapeBuilder parse(XContentParser parser) } /** parse geometry from the stream tokenizer */ - private static ShapeBuilder parseGeometry(StreamTokenizer stream) throws IOException, ElasticsearchParseException { + private static ShapeBuilder parseGeometry(StreamTokenizer stream, GeoShapeType shapeType) + throws IOException, ElasticsearchParseException { final GeoShapeType type = GeoShapeType.forName(nextWord(stream)); + if (shapeType != null && shapeType != GeoShapeType.GEOMETRYCOLLECTION) { + if (type.wktName().equals(shapeType.wktName()) == false) { + throw new ElasticsearchParseException("Expected geometry type [{}] but found [{}]", shapeType, type); + } + } switch (type) { case POINT: return parsePoint(stream); @@ -228,9 +240,10 @@ private static GeometryCollectionBuilder parseGeometryCollection(StreamTokenizer if (nextEmptyOrOpen(stream).equals(EMPTY)) { return null; } - GeometryCollectionBuilder builder = new GeometryCollectionBuilder().shape(parseGeometry(stream)); + GeometryCollectionBuilder builder = new GeometryCollectionBuilder().shape( + parseGeometry(stream, GeoShapeType.GEOMETRYCOLLECTION)); while (nextCloserOrComma(stream).equals(COMMA)) { - builder.shape(parseGeometry(stream)); + builder.shape(parseGeometry(stream, null)); } return builder; } diff --git a/server/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java index c0e57cc45afd9..47dcbaa351454 100644 --- a/server/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java @@ -31,7 +31,10 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.geo.GeoHashUtils; import org.elasticsearch.common.geo.GeoPoint; +import org.elasticsearch.common.geo.GeoShapeType; import org.elasticsearch.common.geo.GeoUtils; +import org.elasticsearch.common.geo.builders.EnvelopeBuilder; +import org.elasticsearch.common.geo.parsers.GeoWKTParser; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -62,7 +65,6 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder GeoWKTParser.parseExpectedType(parser, GeoShapeType.POLYGON)); + assertThat(e, hasToString(containsString("Expected geometry type [polygon] but found [point]"))); + } } diff --git a/server/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java index 133057fb8d026..aeaca328ceb7b 100644 --- a/server/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java @@ -406,6 +406,50 @@ public void testFromJson() throws IOException { assertEquals(json, GeoExecType.MEMORY, parsed.type()); } + public void testFromWKT() throws IOException { + String wkt = + "{\n" + + " \"geo_bounding_box\" : {\n" + + " \"pin.location\" : {\n" + + " \"wkt\" : \"BBOX (-74.1, -71.12, 40.73, 40.01)\"\n" + + " },\n" + + " \"validation_method\" : \"STRICT\",\n" + + " \"type\" : \"MEMORY\",\n" + + " \"ignore_unmapped\" : false,\n" + + " \"boost\" : 1.0\n" + + " }\n" + + "}"; + + // toXContent generates the query in geojson only; for now we need to test against the expected + // geojson generated content + String expectedJson = + "{\n" + + " \"geo_bounding_box\" : {\n" + + " \"pin.location\" : {\n" + + " \"top_left\" : [ -74.1, 40.73 ],\n" + + " \"bottom_right\" : [ -71.12, 40.01 ]\n" + + " },\n" + + " \"validation_method\" : \"STRICT\",\n" + + " \"type\" : \"MEMORY\",\n" + + " \"ignore_unmapped\" : false,\n" + + " \"boost\" : 1.0\n" + + " }\n" + + "}"; + + // parse with wkt + GeoBoundingBoxQueryBuilder parsed = (GeoBoundingBoxQueryBuilder) parseQuery(wkt); + // check the builder's generated geojson content against the expected json output + checkGeneratedJson(expectedJson, parsed); + double delta = 0d; + assertEquals(expectedJson, "pin.location", parsed.fieldName()); + assertEquals(expectedJson, -74.1, parsed.topLeft().getLon(), delta); + assertEquals(expectedJson, 40.73, parsed.topLeft().getLat(), delta); + assertEquals(expectedJson, -71.12, parsed.bottomRight().getLon(), delta); + assertEquals(expectedJson, 40.01, parsed.bottomRight().getLat(), delta); + assertEquals(expectedJson, 1.0, parsed.boost(), delta); + assertEquals(expectedJson, GeoExecType.MEMORY, parsed.type()); + } + @Override public void testMustRewrite() throws IOException { assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); From 6c297ad7c8cc85a0d30b7a38f1eecd835a650c70 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Mon, 15 Jan 2018 18:13:47 -0500 Subject: [PATCH 25/30] TEST: Update logging for testAckedIndexing - Log the response of indexing requests - Correct logging setting for discovery package --- .../org/elasticsearch/discovery/ClusterDisruptionIT.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java index 8d21c6306382b..55f5b70e70299 100644 --- a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java @@ -81,7 +81,8 @@ public class ClusterDisruptionIT extends AbstractDisruptionTestCase { *

* This test is a superset of tests run in the Jepsen test suite, with the exception of versioned updates */ - @TestLogging("_root:DEBUG,org.elasticsearch.action.bulk:TRACE,org.elasticsearch.action.get:TRACE,discovery:TRACE," + + @TestLogging("_root:DEBUG,org.elasticsearch.action.bulk:TRACE,org.elasticsearch.action.get:TRACE," + + "org.elasticsearch.discovery:TRACE,org.elasticsearch.action.support.replication:TRACE," + "org.elasticsearch.cluster.service:TRACE,org.elasticsearch.indices.recovery:TRACE," + "org.elasticsearch.indices.cluster:TRACE,org.elasticsearch.index.shard:TRACE") public void testAckedIndexing() throws Exception { @@ -137,7 +138,7 @@ public void testAckedIndexing() throws Exception { .get(timeout); assertEquals(DocWriteResponse.Result.CREATED, response.getResult()); ackedDocs.put(id, node); - logger.trace("[{}] indexed id [{}] through node [{}]", name, id, node); + logger.trace("[{}] indexed id [{}] through node [{}], response [{}]", name, id, node, response); } catch (ElasticsearchException e) { exceptedExceptions.add(e); final String docId = id; From 71ba314c733fe5f2a175e2b8e8d871d61e3e3202 Mon Sep 17 00:00:00 2001 From: fbsolo Date: Tue, 16 Jan 2018 00:35:35 -0800 Subject: [PATCH 26/30] [Docs] Changes to ingest.asciidoc (#28212) --- docs/reference/ingest.asciidoc | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/docs/reference/ingest.asciidoc b/docs/reference/ingest.asciidoc index da1164930bc1e..18349beab6ab1 100644 --- a/docs/reference/ingest.asciidoc +++ b/docs/reference/ingest.asciidoc @@ -3,26 +3,27 @@ [partintro] -- -You can use ingest node to pre-process documents before the actual indexing takes place. -This pre-processing happens by an ingest node that intercepts bulk and index requests, applies the -transformations, and then passes the documents back to the index or bulk APIs. +Use an ingest node to pre-process documents before the actual document indexing happens. +The ingest node intercepts bulk and index requests, it applies transformations, and it then +passes the documents back to the index or bulk APIs. -You can enable ingest on any node or even have dedicated ingest nodes. Ingest is enabled by default -on all nodes. To disable ingest on a node, configure the following setting in the `elasticsearch.yml` file: +All nodes enable ingest by default, so any node can handle ingest tasks. You can also create +dedicated ingest nodes. To disable ingest for a node, configure the following setting in the +elasticsearch.yml file: [source,yaml] -------------------------------------------------- node.ingest: false -------------------------------------------------- -To pre-process documents before indexing, you <> that specifies -a series of <>. Each processor transforms the document in some way. -For example, you may have a pipeline that consists of one processor that removes a field from -the document followed by another processor that renames a field. Configured pipelines are then stored -in the <>. +To pre-process documents before indexing, <> that specifies a series of +<>. Each processor transforms the document in some specific way. For example, a +pipeline might have one processor that removes a field from the document, followed by +another processor that renames a field. The <> then stores +the configured pipelines. -To use a pipeline, you simply specify the `pipeline` parameter on an index or bulk request to -tell the ingest node which pipeline to use. For example: +To use a pipeline, simply specify the `pipeline` parameter on an index or bulk request. This +way, the ingest node knows which pipeline to use. For example: [source,js] -------------------------------------------------- From 0c4e2cbc19a9dad671b135d8f473943119677409 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Tue, 16 Jan 2018 09:50:06 +0100 Subject: [PATCH 27/30] Fallback to TransportMasterNodeAction for cluster health retries (#28195) ClusterHealthAction does not use the regular retry logic, possibly causing StackOverflowErrors. Relates #28169 --- .../admin/cluster/health/TransportClusterHealthAction.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java index f4c7748d43924..541738d6be7cc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -30,6 +30,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.NotMasterException; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -125,7 +126,8 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS @Override public void onNoLongerMaster(String source) { logger.trace("stopped being master while waiting for events with priority [{}]. retrying.", request.waitForEvents()); - doExecute(task, request, listener); + // TransportMasterNodeAction implements the retry logic, which is triggered by passing a NotMasterException + listener.onFailure(new NotMasterException("no longer master. source: [" + source + "]")); } @Override From 196c7b80dc2e8bebd9d9023be13639a2078f3d15 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Tue, 16 Jan 2018 09:58:58 +0100 Subject: [PATCH 28/30] Never return null from Strings.tokenizeToStringArray (#28224) This method has a different contract than all the other methods in this class, returning null instead of an empty array when receiving a null input. While switching over some methods from delimitedListToStringArray to this method tokenizeToStringArray, this resulted in unexpected nulls in some places of our code. Relates #28213 --- .../src/main/java/org/elasticsearch/common/Strings.java | 5 ++++- .../allocation/decider/FilterAllocationDeciderTests.java | 8 ++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/common/Strings.java b/server/src/main/java/org/elasticsearch/common/Strings.java index 6c2fc4e1ec153..02a0852b0a03a 100644 --- a/server/src/main/java/org/elasticsearch/common/Strings.java +++ b/server/src/main/java/org/elasticsearch/common/Strings.java @@ -474,6 +474,9 @@ public static String[] split(String toSplit, String delimiter) { * @see #delimitedListToStringArray */ public static String[] tokenizeToStringArray(final String s, final String delimiters) { + if (s == null) { + return EMPTY_ARRAY; + } return toStringArray(tokenizeToCollection(s, delimiters, ArrayList::new)); } @@ -536,7 +539,7 @@ public static String[] delimitedListToStringArray(String str, String delimiter) */ public static String[] delimitedListToStringArray(String str, String delimiter, String charsToDelete) { if (str == null) { - return new String[0]; + return EMPTY_ARRAY; } if (delimiter == null) { return new String[]{str}; diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java index c4105771229bc..8381f2f960b75 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java @@ -194,6 +194,14 @@ public void testInvalidIPFilter() { assertEquals("invalid IP address [" + invalidIP + "] for [" + filterSetting.getKey() + ipKey + "]", e.getMessage()); } + public void testNull() { + Setting filterSetting = randomFrom(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING, + IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING, IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING); + + IndexMetaData.builder("test") + .settings(settings(Version.CURRENT).putNull(filterSetting.getKey() + "name")).numberOfShards(2).numberOfReplicas(0).build(); + } + public void testWildcardIPFilter() { String ipKey = randomFrom("_ip", "_host_ip", "_publish_ip"); Setting filterSetting = randomFrom(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING, From efe2e521180f989218898867c5509d860fc46312 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Tue, 16 Jan 2018 10:50:07 +0100 Subject: [PATCH 29/30] Fix eclipse build. (#28236) Relates #28191 --- libs/elasticsearch-core/src/main/eclipse-build.gradle | 2 ++ libs/elasticsearch-core/src/test/eclipse-build.gradle | 6 ++++++ settings.gradle | 5 +++++ 3 files changed, 13 insertions(+) create mode 100644 libs/elasticsearch-core/src/main/eclipse-build.gradle create mode 100644 libs/elasticsearch-core/src/test/eclipse-build.gradle diff --git a/libs/elasticsearch-core/src/main/eclipse-build.gradle b/libs/elasticsearch-core/src/main/eclipse-build.gradle new file mode 100644 index 0000000000000..9c84a4d6bd84b --- /dev/null +++ b/libs/elasticsearch-core/src/main/eclipse-build.gradle @@ -0,0 +1,2 @@ +// this is just shell gradle file for eclipse to have separate projects for elasticsearch-core src and tests +apply from: '../../build.gradle' diff --git a/libs/elasticsearch-core/src/test/eclipse-build.gradle b/libs/elasticsearch-core/src/test/eclipse-build.gradle new file mode 100644 index 0000000000000..f43f019941bb2 --- /dev/null +++ b/libs/elasticsearch-core/src/test/eclipse-build.gradle @@ -0,0 +1,6 @@ +// this is just shell gradle file for eclipse to have separate projects for elasticsearch-core src and tests +apply from: '../../build.gradle' + +dependencies { + testCompile project(':libs:elasticsearch-core') +} diff --git a/settings.gradle b/settings.gradle index b844af52df76b..46ecb3dad1c97 100644 --- a/settings.gradle +++ b/settings.gradle @@ -110,6 +110,7 @@ if (isEclipse) { // eclipse cannot handle an intermediate dependency between main and test, so we must create separate projects // for server-src and server-tests projects << 'server-tests' + projects << 'libs:elasticsearch-core-tests' projects << 'libs:elasticsearch-nio-tests' } @@ -128,6 +129,10 @@ if (isEclipse) { project(":server").buildFileName = 'eclipse-build.gradle' project(":server-tests").projectDir = new File(rootProject.projectDir, 'server/src/test') project(":server-tests").buildFileName = 'eclipse-build.gradle' + project(":libs:elasticsearch-core").projectDir = new File(rootProject.projectDir, 'libs/elasticsearch-core/src/main') + project(":libs:elasticsearch-core").buildFileName = 'eclipse-build.gradle' + project(":libs:elasticsearch-core-tests").projectDir = new File(rootProject.projectDir, 'libs/elasticsearch-core/src/test') + project(":libs:elasticsearch-core-tests").buildFileName = 'eclipse-build.gradle' project(":libs:elasticsearch-nio").projectDir = new File(rootProject.projectDir, 'libs/elasticsearch-nio/src/main') project(":libs:elasticsearch-nio").buildFileName = 'eclipse-build.gradle' project(":libs:elasticsearch-nio-tests").projectDir = new File(rootProject.projectDir, 'libs/elasticsearch-nio/src/test') From 67c1f1c856cad9624087931e7ca1285e16cd55f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Tue, 16 Jan 2018 12:05:03 +0100 Subject: [PATCH 30/30] [Docs] Fix Java Api index administration usage (#28133) The Java API documentation for index administration currenty is wrong because the PutMappingRequestBuilder#setSource(Object... source) and CreateIndexRequestBuilder#addMapping(String type, Object... source) methods delegate to methods that check that the input arguments are valid key/value pairs: https://www.elastic.co/guide/en/elasticsearch/client/java-api/current/java-admin-indices.html This changes the docs so the java api code examples are included from documentation integration tests so we detect compile and runtime issues earlier. Closes #28131 --- .../admin/indices/put-mapping.asciidoc | 57 +++------------- .../admin/indices/create/CreateIndexIT.java | 68 +++++++++++++++++++ 2 files changed, 77 insertions(+), 48 deletions(-) diff --git a/docs/java-api/admin/indices/put-mapping.asciidoc b/docs/java-api/admin/indices/put-mapping.asciidoc index e52c66d96c3bb..97cfcf589b9d8 100644 --- a/docs/java-api/admin/indices/put-mapping.asciidoc +++ b/docs/java-api/admin/indices/put-mapping.asciidoc @@ -1,21 +1,13 @@ [[java-admin-indices-put-mapping]] +:base-dir: {docdir}/../../core/src/test/java/org/elasticsearch/action/admin/indices/create + ==== Put Mapping The PUT mapping API allows you to add a new type while creating an index: -[source,java] +["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -client.admin().indices().prepareCreate("twitter") <1> - .addMapping("tweet", "{\n" + <2> - " \"tweet\": {\n" + - " \"properties\": {\n" + - " \"message\": {\n" + - " \"type\": \"text\"\n" + - " }\n" + - " }\n" + - " }\n" + - " }") - .get(); +include-tagged::{base-dir}/CreateIndexIT.java[addMapping-create-index-request] -------------------------------------------------- <1> <> called `twitter` <2> It also adds a `tweet` mapping type. @@ -23,32 +15,9 @@ client.admin().indices().prepareCreate("twitter") <1> The PUT mapping API also allows to add a new type to an existing index: -[source,java] +["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -client.admin().indices().preparePutMapping("twitter") <1> - .setType("user") <2> - .setSource("{\n" + <3> - " \"properties\": {\n" + - " \"name\": {\n" + - " \"type\": \"text\"\n" + - " }\n" + - " }\n" + - "}") - .get(); - -// You can also provide the type in the source document -client.admin().indices().preparePutMapping("twitter") - .setType("user") - .setSource("{\n" + - " \"user\":{\n" + <4> - " \"properties\": {\n" + - " \"name\": {\n" + - " \"type\": \"text\"\n" + - " }\n" + - " }\n" + - " }\n" + - "}") - .get(); +include-tagged::{base-dir}/CreateIndexIT.java[putMapping-request-source] -------------------------------------------------- <1> Puts a mapping on existing index called `twitter` <2> Adds a `user` mapping type. @@ -57,20 +26,12 @@ client.admin().indices().preparePutMapping("twitter") You can use the same API to update an existing mapping: -[source,java] +["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -client.admin().indices().preparePutMapping("twitter") <1> - .setType("user") <2> - .setSource("{\n" + <3> - " \"properties\": {\n" + - " \"user_name\": {\n" + - " \"type\": \"text\"\n" + - " }\n" + - " }\n" + - "}") - .get(); +include-tagged::{base-dir}/CreateIndexIT.java[putMapping-request-source-append] -------------------------------------------------- <1> Puts a mapping on existing index called `twitter` <2> Updates the `user` mapping type. <3> This `user` has now a new field `user_name` +:base-dir!: \ No newline at end of file diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java index 14d6647071453..2ebb84ef92a72 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.admin.indices.create; import com.carrotsearch.hppc.cursors.ObjectCursor; + import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.UnavailableShardsException; @@ -28,6 +29,7 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; @@ -35,6 +37,7 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.query.RangeQueryBuilder; @@ -400,4 +403,69 @@ public Settings onNodeStopped(String nodeName) throws Exception { assertThat(e, hasToString(containsString("unknown setting [index.foo]"))); } + /** + * This test method is used to generate the Put Mapping Java Indices API documentation + * at "docs/java-api/admin/indices/put-mapping.asciidoc" so the documentation gets tested + * so that it compiles and runs without throwing errors at runtime. + */ + public void testPutMappingDocumentation() throws Exception { + Client client = client(); + // tag::addMapping-create-index-request + client.admin().indices().prepareCreate("twitter") // <1> + .addMapping("tweet", "{\n" + // <2> + " \"tweet\": {\n" + + " \"properties\": {\n" + + " \"message\": {\n" + + " \"type\": \"text\"\n" + + " }\n" + + " }\n" + + " }\n" + + " }", XContentType.JSON) + .get(); + // end::addMapping-create-index-request + + // we need to delete in order to create a fresh new index with another type + client.admin().indices().prepareDelete("twitter").get(); + client.admin().indices().prepareCreate("twitter").get(); + + // tag::putMapping-request-source + client.admin().indices().preparePutMapping("twitter") // <1> + .setType("user") // <2> + .setSource("{\n" + // <3> + " \"properties\": {\n" + + " \"name\": {\n" + + " \"type\": \"text\"\n" + + " }\n" + + " }\n" + + "}", XContentType.JSON) + .get(); + + // You can also provide the type in the source document + client.admin().indices().preparePutMapping("twitter") + .setType("user") + .setSource("{\n" + + " \"user\":{\n" + // <4> + " \"properties\": {\n" + + " \"name\": {\n" + + " \"type\": \"text\"\n" + + " }\n" + + " }\n" + + " }\n" + + "}", XContentType.JSON) + .get(); + // end::putMapping-request-source + + // tag::putMapping-request-source-append + client.admin().indices().preparePutMapping("twitter") // <1> + .setType("user") // <2> + .setSource("{\n" + // <3> + " \"properties\": {\n" + + " \"user_name\": {\n" + + " \"type\": \"text\"\n" + + " }\n" + + " }\n" + + "}", XContentType.JSON) + .get(); + // end::putMapping-request-source-append + } }