From 509978012b9e50b73eb31829382b1eebf686c799 Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Wed, 25 May 2022 16:27:17 -0700 Subject: [PATCH 01/34] Fix testSetAdditionalRolesCanAddDeprecatedMasterRole() by removing the initial assertion (#3441) Signed-off-by: Tianli Feng --- .../java/org/opensearch/cluster/node/DiscoveryNodeTests.java | 3 --- 1 file changed, 3 deletions(-) diff --git a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java index 3a058a282be9c..1b7f698ae1f5c 100644 --- a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java +++ b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java @@ -179,9 +179,6 @@ public void testDiscoveryNodeIsRemoteClusterClientUnset() { // as a workaround for making the new CLUSTER_MANAGER_ROLE has got the same abbreviation 'm'. // The test validate this behavior. public void testSetAdditionalRolesCanAddDeprecatedMasterRole() { - // Validate MASTER_ROLE is not in DiscoveryNodeRole.BUILT_IN_ROLES - assertFalse(DiscoveryNode.getPossibleRoleNames().contains(DiscoveryNodeRole.MASTER_ROLE.roleName())); - DiscoveryNode.setAdditionalRoles(Collections.emptySet()); assertTrue(DiscoveryNode.getPossibleRoleNames().contains(DiscoveryNodeRole.MASTER_ROLE.roleName())); } From 296fa092c25801eec7f9aa26088cd122305cb23a Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Wed, 25 May 2022 16:44:23 -0700 Subject: [PATCH 02/34] Replace internal usages of 'master' term in 'server/src/test' directory (#2520) * Replace the non-inclusive terminology "master" with "cluster manager" in code comments, internal variable/method/class names, in `server/src/test` directory. * Backwards compatibility is not impacted. * Add a new unit test `testDeprecatedMasterNodeFilter()` to validate using `master:true` or `master:false` can filter the node in [Cluster Stats](https://opensearch.org/docs/latest/opensearch/rest-api/cluster-stats/) API, after the `master` role is deprecated in PR https://github.com/opensearch-project/OpenSearch/pull/2424 Signed-off-by: Tianli Feng --- .../discovery/ClusterDisruptionIT.java | 2 +- .../discovery/ClusterManagerDisruptionIT.java | 10 +- .../discovery/DiscoveryDisruptionIT.java | 8 +- ...tAddVotingConfigExclusionsActionTests.java | 8 +- .../health/ClusterHealthResponsesTests.java | 4 +- .../node/tasks/CancellableTasksTests.java | 4 +- .../node/tasks/TaskManagerTestCase.java | 4 +- .../reroute/ClusterRerouteRequestTests.java | 2 +- .../TransportMultiSearchActionTests.java | 2 +- .../TransportBroadcastByNodeActionTests.java | 34 +-- .../TransportMasterNodeActionTests.java | 55 +++-- .../TransportMasterNodeActionUtils.java | 8 +- .../cluster/ClusterChangedEventTests.java | 37 ++-- .../opensearch/cluster/ClusterStateTests.java | 41 ++-- ...rnalClusterInfoServiceSchedulingTests.java | 58 ++--- .../action/shard/ShardStateActionTests.java | 61 +++--- .../ClusterBootstrapServiceTests.java | 6 +- .../ClusterFormationFailureHelperTests.java | 28 ++- .../coordination/CoordinationStateTests.java | 6 +- .../coordination/CoordinatorTests.java | 42 ++-- .../coordination/FollowersCheckerTests.java | 2 +- .../coordination/JoinTaskExecutorTests.java | 15 +- .../NoMasterBlockServiceTests.java | 18 +- .../cluster/coordination/NodeJoinTests.java | 111 +++++----- .../coordination/PublicationTests.java | 2 +- .../coordination/ReconfiguratorTests.java | 14 +- .../health/ClusterHealthAllocationTests.java | 7 +- .../health/ClusterStateHealthTests.java | 6 +- .../metadata/AutoExpandReplicasTests.java | 4 +- .../cluster/node/DiscoveryNodesTests.java | 58 +++-- .../routing/BatchedRerouteServiceTests.java | 6 +- .../routing/OperationRoutingTests.java | 6 +- .../allocation/FailedNodeRoutingTests.java | 2 +- .../allocation/InSyncAllocationIdTests.java | 6 +- .../decider/DiskThresholdDeciderTests.java | 10 +- ...storeInProgressAllocationDeciderTests.java | 6 +- .../service/ClusterApplierServiceTests.java | 18 +- .../cluster/service/MasterServiceTests.java | 105 ++++----- .../discovery/AbstractDisruptionTestCase.java | 34 +-- .../discovery/DiscoveryModuleTests.java | 6 +- ...shakingTransportAddressConnectorTests.java | 4 +- .../opensearch/discovery/PeerFinderTests.java | 52 ++--- .../opensearch/env/NodeEnvironmentTests.java | 26 +-- .../env/NodeRepurposeCommandTests.java | 75 ++++--- .../gateway/AsyncShardFetchTests.java | 2 +- .../gateway/GatewayServiceTests.java | 4 +- .../IncrementalClusterStateWriterTests.java | 36 +-- .../index/seqno/ReplicationTrackerTests.java | 12 +- .../indices/IndicesServiceTests.java | 2 +- .../indices/cluster/ClusterStateChanges.java | 7 +- ...ClusterStateServiceRandomUpdatesTests.java | 14 +- .../PersistentTasksClusterServiceTests.java | 14 +- .../cluster/RestNodesInfoActionTests.java | 4 +- .../InternalSnapshotsInfoServiceTests.java | 10 +- .../snapshots/SnapshotResiliencyTests.java | 207 +++++++++--------- .../snapshots/SnapshotsServiceTests.java | 8 +- .../MockEventuallyConsistentRepository.java | 2 +- .../transport/RemoteClusterServiceTests.java | 6 +- .../SniffConnectionStrategyTests.java | 20 +- 59 files changed, 736 insertions(+), 625 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java index 7a8b6b447a68d..915aef5cb1d25 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java @@ -386,7 +386,7 @@ public void onFailure(Exception e) { ); if (isolatedNode.equals(nonClusterManagerNode)) { - assertNoMaster(nonClusterManagerNode); + assertNoClusterManager(nonClusterManagerNode); } else { ensureStableCluster(2, nonClusterManagerNode); } diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java index 4515e0828be2e..61f50ace17b62 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java @@ -91,7 +91,7 @@ public void testClusterManagerNodeGCs() throws Exception { logger.info("waiting for nodes to de-elect cluster-manager [{}]", oldClusterManagerNode); for (String node : oldNonClusterManagerNodesSet) { - assertDifferentMaster(node, oldClusterManagerNode); + assertDifferentClusterManager(node, oldClusterManagerNode); } logger.info("waiting for nodes to elect a new cluster-manager"); @@ -107,7 +107,7 @@ public void testClusterManagerNodeGCs() throws Exception { // make sure all nodes agree on cluster-manager String newClusterManager = internalCluster().getMasterName(); assertThat(newClusterManager, not(equalTo(oldClusterManagerNode))); - assertMaster(newClusterManager, nodes); + assertClusterManager(newClusterManager, nodes); } /** @@ -137,7 +137,7 @@ public void testIsolateClusterManagerAndVerifyClusterStateConsensus() throws Exc ensureStableCluster(2, nonIsolatedNode); // make sure isolated need picks up on things. - assertNoMaster(isolatedNode, TimeValue.timeValueSeconds(40)); + assertNoClusterManager(isolatedNode, TimeValue.timeValueSeconds(40)); // restore isolation networkDisruption.stopDisrupting(); @@ -227,7 +227,7 @@ public void testVerifyApiBlocksDuringPartition() throws Exception { // continuously ping until network failures have been resolved. However // It may a take a bit before the node detects it has been cut off from the elected cluster-manager logger.info("waiting for isolated node [{}] to have no cluster-manager", isolatedNode); - assertNoMaster(isolatedNode, NoMasterBlockService.NO_MASTER_BLOCK_WRITES, TimeValue.timeValueSeconds(30)); + assertNoClusterManager(isolatedNode, NoMasterBlockService.NO_MASTER_BLOCK_WRITES, TimeValue.timeValueSeconds(30)); logger.info("wait until elected cluster-manager has been removed and a new 2 node cluster was from (via [{}])", isolatedNode); ensureStableCluster(2, nonIsolatedNode); @@ -273,7 +273,7 @@ public void testVerifyApiBlocksDuringPartition() throws Exception { // continuously ping until network failures have been resolved. However // It may a take a bit before the node detects it has been cut off from the elected cluster-manager logger.info("waiting for isolated node [{}] to have no cluster-manager", isolatedNode); - assertNoMaster(isolatedNode, NoMasterBlockService.NO_MASTER_BLOCK_ALL, TimeValue.timeValueSeconds(30)); + assertNoClusterManager(isolatedNode, NoMasterBlockService.NO_MASTER_BLOCK_ALL, TimeValue.timeValueSeconds(30)); // make sure we have stable cluster & cross partition recoveries are canceled by the removal of the missing node // the unresponsive partition causes recoveries to only time out after 15m (default) and these will cause diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java index a4667d62a878c..c6e4d95449d42 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java @@ -81,7 +81,7 @@ public void testClusterJoinDespiteOfPublishingIssues() throws Exception { ); nonClusterManagerTransportService.addFailToSendNoConnectRule(clusterManagerTranspotService); - assertNoMaster(nonClusterManagerNode); + assertNoClusterManager(nonClusterManagerNode); logger.info( "blocking cluster state publishing from cluster-manager [{}] to non cluster-manager [{}]", @@ -166,7 +166,7 @@ public void testElectClusterManagerWithLatestVersion() throws Exception { logger.info("--> forcing a complete election to make sure \"preferred\" cluster-manager is elected"); isolateAllNodes.startDisrupting(); for (String node : nodes) { - assertNoMaster(node); + assertNoClusterManager(node); } internalCluster().clearDisruptionScheme(); ensureStableCluster(3); @@ -194,7 +194,7 @@ public void testElectClusterManagerWithLatestVersion() throws Exception { logger.info("--> forcing a complete election again"); isolateAllNodes.startDisrupting(); for (String node : nodes) { - assertNoMaster(node); + assertNoClusterManager(node); } isolateAllNodes.stopDisrupting(); @@ -242,7 +242,7 @@ public void testNodeNotReachableFromClusterManager() throws Exception { ensureStableCluster(2, clusterManagerNode); logger.info("waiting for [{}] to have no cluster-manager", nonClusterManagerNode); - assertNoMaster(nonClusterManagerNode); + assertNoClusterManager(nonClusterManagerNode); logger.info("healing partition and checking cluster reforms"); clusterManagerTransportService.clearAllRules(); diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java index bff0689a153b3..dfd6d059cc3a8 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java @@ -252,7 +252,7 @@ public void testWithdrawsVotesFromNodesMatchingWildcard() throws InterruptedExce assertWarnings(AddVotingConfigExclusionsRequest.DEPRECATION_MESSAGE); } - public void testWithdrawsVotesFromAllMasterEligibleNodes() throws InterruptedException { + public void testWithdrawsVotesFromAllClusterManagerEligibleNodes() throws InterruptedException { final CountDownLatch countDownLatch = new CountDownLatch(2); clusterStateObserver.waitForNextChange(new AdjustConfigurationForExclusions(countDownLatch)); @@ -349,14 +349,14 @@ public void testReturnsErrorIfNoMatchingNodeDescriptions() throws InterruptedExc assertWarnings(AddVotingConfigExclusionsRequest.DEPRECATION_MESSAGE); } - public void testOnlyMatchesMasterEligibleNodes() throws InterruptedException { + public void testOnlyMatchesClusterManagerEligibleNodes() throws InterruptedException { final CountDownLatch countDownLatch = new CountDownLatch(1); final SetOnce exceptionHolder = new SetOnce<>(); transportService.sendRequest( localNode, AddVotingConfigExclusionsAction.NAME, - makeRequestWithNodeDescriptions("_all", "master:false"), + makeRequestWithNodeDescriptions("_all", "cluster_manager:false"), expectError(e -> { exceptionHolder.set(e); countDownLatch.countDown(); @@ -368,7 +368,7 @@ public void testOnlyMatchesMasterEligibleNodes() throws InterruptedException { assertThat(rootCause, instanceOf(IllegalArgumentException.class)); assertThat( rootCause.getMessage(), - equalTo("add voting config exclusions request for [_all, master:false] matched no cluster-manager-eligible nodes") + equalTo("add voting config exclusions request for [_all, cluster_manager:false] matched no cluster-manager-eligible nodes") ); assertWarnings(AddVotingConfigExclusionsRequest.DEPRECATION_MESSAGE); } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponsesTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponsesTests.java index 844dfe9c6c00f..b33f5c7bd5bc7 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponsesTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponsesTests.java @@ -110,9 +110,9 @@ public void testClusterHealth() throws IOException { assertThat(clusterHealth.getActiveShardsPercent(), is(allOf(greaterThanOrEqualTo(0.0), lessThanOrEqualTo(100.0)))); } - public void testClusterHealthVerifyMasterNodeDiscovery() throws IOException { + public void testClusterHealthVerifyClusterManagerNodeDiscovery() throws IOException { DiscoveryNode localNode = new DiscoveryNode("node", OpenSearchTestCase.buildNewFakeTransportAddress(), Version.CURRENT); - // set the node information to verify master_node discovery in ClusterHealthResponse + // set the node information to verify cluster_manager_node discovery in ClusterHealthResponse ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).masterNodeId(localNode.getId())) .build(); diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksTests.java index d3be6170526fc..5b2b4f361083b 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksTests.java @@ -474,12 +474,12 @@ public void onFailure(Exception e) { for (int i = 1; i < testNodes.length; i++) { discoveryNodes[i - 1] = testNodes[i].discoveryNode(); } - DiscoveryNode master = discoveryNodes[0]; + DiscoveryNode clusterManager = discoveryNodes[0]; for (int i = 1; i < testNodes.length; i++) { // Notify only nodes that should remain in the cluster setState( testNodes[i].clusterService, - ClusterStateCreationUtils.state(testNodes[i].discoveryNode(), master, discoveryNodes) + ClusterStateCreationUtils.state(testNodes[i].discoveryNode(), clusterManager, discoveryNodes) ); } if (randomBoolean()) { diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java index c8411b31e0709..4383b21aa7e74 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java @@ -257,9 +257,9 @@ public static void connectNodes(TestNode... nodes) { for (int i = 0; i < nodes.length; i++) { discoveryNodes[i] = nodes[i].discoveryNode(); } - DiscoveryNode master = discoveryNodes[0]; + DiscoveryNode clusterManager = discoveryNodes[0]; for (TestNode node : nodes) { - setState(node.clusterService, ClusterStateCreationUtils.state(node.discoveryNode(), master, discoveryNodes)); + setState(node.clusterService, ClusterStateCreationUtils.state(node.discoveryNode(), clusterManager, discoveryNodes)); } for (TestNode nodeA : nodes) { for (TestNode nodeB : nodes) { diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java index 6f62883ff436c..d48eb1619d36c 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java @@ -161,7 +161,7 @@ public void testEqualsAndHashCode() { assertEquals(request, copy); assertEquals(request.hashCode(), copy.hashCode()); - // Changing masterNodeTime makes requests not equal + // Changing clusterManagerNodeTimeout makes requests not equal copy.masterNodeTimeout(timeValueMillis(request.masterNodeTimeout().millis() + 1)); assertNotEquals(request, copy); assertNotEquals(request.hashCode(), copy.hashCode()); diff --git a/server/src/test/java/org/opensearch/action/search/TransportMultiSearchActionTests.java b/server/src/test/java/org/opensearch/action/search/TransportMultiSearchActionTests.java index 09ab2438bd106..5fd5e7315e553 100644 --- a/server/src/test/java/org/opensearch/action/search/TransportMultiSearchActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/TransportMultiSearchActionTests.java @@ -260,7 +260,7 @@ public void testDefaultMaxConcurrentSearches() { } builder.add( new DiscoveryNode( - "master", + "cluster_manager", buildNewFakeTransportAddress(), Collections.emptyMap(), Collections.singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE), diff --git a/server/src/test/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java b/server/src/test/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java index 2830a42dfae76..930fe4ad6049d 100644 --- a/server/src/test/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java @@ -366,17 +366,17 @@ public void testOneRequestIsSentToEachNodeHoldingAShard() { } } - // simulate the master being removed from the cluster but before a new master is elected - // as such, the shards assigned to the master will still show up in the cluster state as assigned to a node but - // that node will not be in the local cluster state on any node that has detected the master as failing + // simulate the cluster-manager being removed from the cluster but before a new cluster-manager is elected + // as such, the shards assigned to the cluster-manager will still show up in the cluster state as assigned to a node but + // that node will not be in the local cluster state on any node that has detected the cluster-manager as failing // in this case, such a shard should be treated as unassigned - public void testRequestsAreNotSentToFailedMaster() { + public void testRequestsAreNotSentToFailedClusterManager() { Request request = new Request(new String[] { TEST_INDEX }); PlainActionFuture listener = new PlainActionFuture<>(); - DiscoveryNode masterNode = clusterService.state().nodes().getMasterNode(); + DiscoveryNode clusterManagerNode = clusterService.state().nodes().getMasterNode(); DiscoveryNodes.Builder builder = DiscoveryNodes.builder(clusterService.state().getNodes()); - builder.remove(masterNode.getId()); + builder.remove(clusterManagerNode.getId()); setState(clusterService, ClusterState.builder(clusterService.state()).nodes(builder)); @@ -384,11 +384,11 @@ public void testRequestsAreNotSentToFailedMaster() { Map> capturedRequests = transport.getCapturedRequestsByTargetNodeAndClear(); - // the master should not be in the list of nodes that requests were sent to + // the cluster manager should not be in the list of nodes that requests were sent to ShardsIterator shardIt = clusterService.state().routingTable().allShards(new String[] { TEST_INDEX }); Set set = new HashSet<>(); for (ShardRouting shard : shardIt) { - if (!shard.currentNodeId().equals(masterNode.getId())) { + if (!shard.currentNodeId().equals(clusterManagerNode.getId())) { set.add(shard.currentNodeId()); } } @@ -399,7 +399,7 @@ public void testRequestsAreNotSentToFailedMaster() { // check requests were sent to the right nodes assertEquals(set, capturedRequests.keySet()); for (Map.Entry> entry : capturedRequests.entrySet()) { - // check one request was sent to each non-master node + // check one request was sent to each non-cluster-manager node assertEquals(1, entry.getValue().size()); } } @@ -456,13 +456,13 @@ public void testResultAggregation() throws ExecutionException, InterruptedExcept Request request = new Request(new String[] { TEST_INDEX }); PlainActionFuture listener = new PlainActionFuture<>(); - // simulate removing the master - final boolean simulateFailedMasterNode = rarely(); - DiscoveryNode failedMasterNode = null; - if (simulateFailedMasterNode) { - failedMasterNode = clusterService.state().nodes().getMasterNode(); + // simulate removing the cluster-manager + final boolean simulateFailedClusterManagerNode = rarely(); + DiscoveryNode failedClusterManagerNode = null; + if (simulateFailedClusterManagerNode) { + failedClusterManagerNode = clusterService.state().nodes().getMasterNode(); DiscoveryNodes.Builder builder = DiscoveryNodes.builder(clusterService.state().getNodes()); - builder.remove(failedMasterNode.getId()); + builder.remove(failedClusterManagerNode.getId()); builder.masterNodeId(null); setState(clusterService, ClusterState.builder(clusterService.state()).nodes(builder)); @@ -511,8 +511,8 @@ public void testResultAggregation() throws ExecutionException, InterruptedExcept transport.handleResponse(requestId, nodeResponse); } } - if (simulateFailedMasterNode) { - totalShards += map.get(failedMasterNode.getId()).size(); + if (simulateFailedClusterManagerNode) { + totalShards += map.get(failedClusterManagerNode.getId()).size(); } Response response = listener.get(); diff --git a/server/src/test/java/org/opensearch/action/support/master/TransportMasterNodeActionTests.java b/server/src/test/java/org/opensearch/action/support/master/TransportMasterNodeActionTests.java index 1dd44f3186657..512749346588e 100644 --- a/server/src/test/java/org/opensearch/action/support/master/TransportMasterNodeActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/master/TransportMasterNodeActionTests.java @@ -240,7 +240,7 @@ protected ClusterBlockException checkBlock(Request request, ClusterState state) } public void testLocalOperationWithoutBlocks() throws ExecutionException, InterruptedException { - final boolean masterOperationFailure = randomBoolean(); + final boolean clusterManagerOperationFailure = randomBoolean(); Request request = new Request(); PlainActionFuture listener = new PlainActionFuture<>(); @@ -253,7 +253,7 @@ public void testLocalOperationWithoutBlocks() throws ExecutionException, Interru new Action("internal:testAction", transportService, clusterService, threadPool) { @Override protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) { - if (masterOperationFailure) { + if (clusterManagerOperationFailure) { listener.onFailure(exception); } else { listener.onResponse(response); @@ -262,7 +262,7 @@ protected void masterOperation(Task task, Request request, ClusterState state, A }.execute(request, listener); assertTrue(listener.isDone()); - if (masterOperationFailure) { + if (clusterManagerOperationFailure) { try { listener.get(); fail("Expected exception but returned proper result"); @@ -376,7 +376,7 @@ protected boolean localExecute(Request request) { listener.get(); } - public void testMasterNotAvailable() throws ExecutionException, InterruptedException { + public void testClusterManagerNotAvailable() throws ExecutionException, InterruptedException { Request request = new Request().masterNodeTimeout(TimeValue.timeValueSeconds(0)); setState(clusterService, ClusterStateCreationUtils.state(localNode, null, allNodes)); PlainActionFuture listener = new PlainActionFuture<>(); @@ -385,7 +385,7 @@ public void testMasterNotAvailable() throws ExecutionException, InterruptedExcep assertListenerThrows("MasterNotDiscoveredException should be thrown", listener, MasterNotDiscoveredException.class); } - public void testMasterBecomesAvailable() throws ExecutionException, InterruptedException { + public void testClusterManagerBecomesAvailable() throws ExecutionException, InterruptedException { Request request = new Request(); setState(clusterService, ClusterStateCreationUtils.state(localNode, null, allNodes)); PlainActionFuture listener = new PlainActionFuture<>(); @@ -396,7 +396,7 @@ public void testMasterBecomesAvailable() throws ExecutionException, InterruptedE listener.get(); } - public void testDelegateToMaster() throws ExecutionException, InterruptedException { + public void testDelegateToClusterManager() throws ExecutionException, InterruptedException { Request request = new Request(); setState(clusterService, ClusterStateCreationUtils.state(localNode, remoteNode, allNodes)); @@ -415,15 +415,15 @@ public void testDelegateToMaster() throws ExecutionException, InterruptedExcepti assertThat(listener.get(), equalTo(response)); } - public void testDelegateToFailingMaster() throws ExecutionException, InterruptedException { + public void testDelegateToFailingClusterManager() throws ExecutionException, InterruptedException { boolean failsWithConnectTransportException = randomBoolean(); - boolean rejoinSameMaster = failsWithConnectTransportException && randomBoolean(); + boolean rejoinSameClusterManager = failsWithConnectTransportException && randomBoolean(); Request request = new Request().masterNodeTimeout(TimeValue.timeValueSeconds(failsWithConnectTransportException ? 60 : 0)); - DiscoveryNode masterNode = this.remoteNode; + DiscoveryNode clusterManagerNode = this.remoteNode; setState( clusterService, // use a random base version so it can go down when simulating a restart. - ClusterState.builder(ClusterStateCreationUtils.state(localNode, masterNode, allNodes)).version(randomIntBetween(0, 10)) + ClusterState.builder(ClusterStateCreationUtils.state(localNode, clusterManagerNode, allNodes)).version(randomIntBetween(0, 10)) ); PlainActionFuture listener = new PlainActionFuture<>(); @@ -436,14 +436,16 @@ public void testDelegateToFailingMaster() throws ExecutionException, Interrupted assertThat(capturedRequest.request, equalTo(request)); assertThat(capturedRequest.action, equalTo("internal:testAction")); - if (rejoinSameMaster) { + if (rejoinSameClusterManager) { transport.handleRemoteError( capturedRequest.requestId, - randomBoolean() ? new ConnectTransportException(masterNode, "Fake error") : new NodeClosedException(masterNode) + randomBoolean() + ? new ConnectTransportException(clusterManagerNode, "Fake error") + : new NodeClosedException(clusterManagerNode) ); assertFalse(listener.isDone()); if (randomBoolean()) { - // simulate master node removal + // simulate cluster-manager node removal final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(clusterService.state().nodes()); nodesBuilder.masterNodeId(null); setState(clusterService, ClusterState.builder(clusterService.state()).nodes(nodesBuilder)); @@ -452,15 +454,19 @@ public void testDelegateToFailingMaster() throws ExecutionException, Interrupted // reset the same state to increment a version simulating a join of an existing node // simulating use being disconnected final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(clusterService.state().nodes()); - nodesBuilder.masterNodeId(masterNode.getId()); + nodesBuilder.masterNodeId(clusterManagerNode.getId()); setState(clusterService, ClusterState.builder(clusterService.state()).nodes(nodesBuilder)); } else { - // simulate master restart followed by a state recovery - this will reset the cluster state version + // simulate cluster-manager restart followed by a state recovery - this will reset the cluster state version final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(clusterService.state().nodes()); - nodesBuilder.remove(masterNode); - masterNode = new DiscoveryNode(masterNode.getId(), masterNode.getAddress(), masterNode.getVersion()); - nodesBuilder.add(masterNode); - nodesBuilder.masterNodeId(masterNode.getId()); + nodesBuilder.remove(clusterManagerNode); + clusterManagerNode = new DiscoveryNode( + clusterManagerNode.getId(), + clusterManagerNode.getAddress(), + clusterManagerNode.getVersion() + ); + nodesBuilder.add(clusterManagerNode); + nodesBuilder.masterNodeId(clusterManagerNode.getId()); final ClusterState.Builder builder = ClusterState.builder(clusterService.state()).nodes(nodesBuilder); setState(clusterService, builder.version(0)); } @@ -472,7 +478,7 @@ public void testDelegateToFailingMaster() throws ExecutionException, Interrupted assertThat(capturedRequest.request, equalTo(request)); assertThat(capturedRequest.action, equalTo("internal:testAction")); } else if (failsWithConnectTransportException) { - transport.handleRemoteError(capturedRequest.requestId, new ConnectTransportException(masterNode, "Fake error")); + transport.handleRemoteError(capturedRequest.requestId, new ConnectTransportException(clusterManagerNode, "Fake error")); assertFalse(listener.isDone()); setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, allNodes)); assertTrue(listener.isDone()); @@ -495,7 +501,7 @@ public void testDelegateToFailingMaster() throws ExecutionException, Interrupted } } - public void testMasterFailoverAfterStepDown() throws ExecutionException, InterruptedException { + public void testClusterManagerFailoverAfterStepDown() throws ExecutionException, InterruptedException { Request request = new Request().masterNodeTimeout(TimeValue.timeValueHours(1)); PlainActionFuture listener = new PlainActionFuture<>(); @@ -506,7 +512,8 @@ public void testMasterFailoverAfterStepDown() throws ExecutionException, Interru new Action("internal:testAction", transportService, clusterService, threadPool) { @Override protected void masterOperation(Request request, ClusterState state, ActionListener listener) throws Exception { - // The other node has become master, simulate failures of this node while publishing cluster state through ZenDiscovery + // The other node has become cluster-manager, simulate failures of this node while publishing cluster state through + // ZenDiscovery setState(clusterService, ClusterStateCreationUtils.state(localNode, remoteNode, allNodes)); Exception failure = randomBoolean() ? new FailedToCommitClusterStateException("Fake error") @@ -526,8 +533,8 @@ protected void masterOperation(Request request, ClusterState state, ActionListen assertThat(listener.get(), equalTo(response)); } - // Validate TransportMasterNodeAction.testDelegateToMaster() works correctly on node with the deprecated MASTER_ROLE. - public void testDelegateToMasterOnNodeWithDeprecatedMasterRole() throws ExecutionException, InterruptedException { + // Validate TransportMasterNodeAction.testDelegateToClusterManager() works correctly on node with the deprecated MASTER_ROLE. + public void testDelegateToClusterManagerOnNodeWithDeprecatedMasterRole() throws ExecutionException, InterruptedException { DiscoveryNode localNode = new DiscoveryNode( "local_node", buildNewFakeTransportAddress(), diff --git a/server/src/test/java/org/opensearch/action/support/master/TransportMasterNodeActionUtils.java b/server/src/test/java/org/opensearch/action/support/master/TransportMasterNodeActionUtils.java index d1faeccc83ac4..391103eb5cebd 100644 --- a/server/src/test/java/org/opensearch/action/support/master/TransportMasterNodeActionUtils.java +++ b/server/src/test/java/org/opensearch/action/support/master/TransportMasterNodeActionUtils.java @@ -42,13 +42,13 @@ public class TransportMasterNodeActionUtils { * Allows to directly call {@link TransportMasterNodeAction#masterOperation(MasterNodeRequest, ClusterState, ActionListener)} which is * a protected method. */ - public static , Response extends ActionResponse> void runMasterOperation( - TransportMasterNodeAction masterNodeAction, + public static , Response extends ActionResponse> void runClusterManagerOperation( + TransportMasterNodeAction clusterManagerNodeAction, Request request, ClusterState clusterState, ActionListener actionListener ) throws Exception { - assert masterNodeAction.checkBlock(request, clusterState) == null; - masterNodeAction.masterOperation(request, clusterState, actionListener); + assert clusterManagerNodeAction.checkBlock(request, clusterState) == null; + clusterManagerNodeAction.masterOperation(request, clusterState, actionListener); } } diff --git a/server/src/test/java/org/opensearch/cluster/ClusterChangedEventTests.java b/server/src/test/java/org/opensearch/cluster/ClusterChangedEventTests.java index e0a12fc1d312b..16f21a48d7ab8 100644 --- a/server/src/test/java/org/opensearch/cluster/ClusterChangedEventTests.java +++ b/server/src/test/java/org/opensearch/cluster/ClusterChangedEventTests.java @@ -107,19 +107,19 @@ public void testBasicProperties() { } /** - * Test whether the ClusterChangedEvent returns the correct value for whether the local node is master, + * Test whether the ClusterChangedEvent returns the correct value for whether the local node is cluster-manager, * based on what was set on the cluster state. */ - public void testLocalNodeIsMaster() { + public void testLocalNodeIsClusterManager() { final int numNodesInCluster = 3; ClusterState previousState = createSimpleClusterState(); ClusterState newState = createState(numNodesInCluster, true, initialIndices); ClusterChangedEvent event = new ClusterChangedEvent("_na_", newState, previousState); - assertTrue("local node should be master", event.localNodeMaster()); + assertTrue("local node should be cluster-manager", event.localNodeMaster()); newState = createState(numNodesInCluster, false, initialIndices); event = new ClusterChangedEvent("_na_", newState, previousState); - assertFalse("local node should not be master", event.localNodeMaster()); + assertFalse("local node should not be cluster-manager", event.localNodeMaster()); } /** @@ -314,8 +314,8 @@ public void testChangedCustomMetadataSet() { assertTrue(changedCustomMetadataTypeSet.contains(customMetadata1.getWriteableName())); } - // Validate the above test case testLocalNodeIsMaster() passes when the deprecated 'master' role is assigned to the local node. - public void testLocalNodeIsMasterWithDeprecatedMasterRole() { + // Validate the above test case testLocalNodeIsClusterManager() passes when the deprecated 'master' role is assigned to the local node. + public void testLocalNodeIsClusterManagerWithDeprecatedMasterRole() { final DiscoveryNodes.Builder builderLocalIsMaster = DiscoveryNodes.builder(); final DiscoveryNode node0 = newNode("node_0", Set.of(DiscoveryNodeRole.MASTER_ROLE)); final DiscoveryNode node1 = newNode("node_1", Set.of(DiscoveryNodeRole.DATA_ROLE)); @@ -390,18 +390,18 @@ private static ClusterState createSimpleClusterState() { } // Create a basic cluster state with a given set of indices - private static ClusterState createState(final int numNodes, final boolean isLocalMaster, final List indices) { + private static ClusterState createState(final int numNodes, final boolean isLocalClusterManager, final List indices) { final Metadata metadata = createMetadata(indices); return ClusterState.builder(TEST_CLUSTER_NAME) - .nodes(createDiscoveryNodes(numNodes, isLocalMaster)) + .nodes(createDiscoveryNodes(numNodes, isLocalClusterManager)) .metadata(metadata) .routingTable(createRoutingTable(1, metadata)) .build(); } // Create a non-initialized cluster state - private static ClusterState createNonInitializedState(final int numNodes, final boolean isLocalMaster) { - final ClusterState withoutBlock = createState(numNodes, isLocalMaster, Collections.emptyList()); + private static ClusterState createNonInitializedState(final int numNodes, final boolean isLocalClusterManager) { + final ClusterState withoutBlock = createState(numNodes, isLocalClusterManager, Collections.emptyList()); return ClusterState.builder(withoutBlock) .blocks(ClusterBlocks.builder().addGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK).build()) .build(); @@ -463,28 +463,29 @@ private static ClusterState nextState( } // Create the discovery nodes for a cluster state. For our testing purposes, we want - // the first to be master, the second to be master eligible, the third to be a data node, - // and the remainder can be any kinds of nodes (master eligible, data, or both). - private static DiscoveryNodes createDiscoveryNodes(final int numNodes, final boolean isLocalMaster) { + // the first to be cluster-manager, the second to be cluster-manager eligible, the third to be a data node, + // and the remainder can be any kinds of nodes (cluster-manager eligible, data, or both). + private static DiscoveryNodes createDiscoveryNodes(final int numNodes, final boolean isLocalClusterManager) { assert (numNodes >= 3) : "the initial cluster state for event change tests should have a minimum of 3 nodes " - + "so there are a minimum of 2 master nodes for testing master change events."; + + "so there are a minimum of 2 cluster-manager nodes for testing cluster-manager change events."; final DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); - final int localNodeIndex = isLocalMaster ? 0 : randomIntBetween(1, numNodes - 1); // randomly assign the local node if not master + final int localNodeIndex = isLocalClusterManager ? 0 : randomIntBetween(1, numNodes - 1); // randomly assign the local node if not + // cluster-manager for (int i = 0; i < numNodes; i++) { final String nodeId = NODE_ID_PREFIX + i; Set roles = new HashSet<>(); if (i == 0) { - // the master node + // the cluster-manager node builder.masterNodeId(nodeId); roles.add(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); } else if (i == 1) { - // the alternate master node + // the alternate cluster-manager node roles.add(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); } else if (i == 2) { // we need at least one data node roles.add(DiscoveryNodeRole.DATA_ROLE); } else { - // remaining nodes can be anything (except for master) + // remaining nodes can be anything (except for cluster-manager) if (randomBoolean()) { roles.add(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); } diff --git a/server/src/test/java/org/opensearch/cluster/ClusterStateTests.java b/server/src/test/java/org/opensearch/cluster/ClusterStateTests.java index 8904e4391a89f..3155954d020a4 100644 --- a/server/src/test/java/org/opensearch/cluster/ClusterStateTests.java +++ b/server/src/test/java/org/opensearch/cluster/ClusterStateTests.java @@ -86,31 +86,34 @@ public void testSupersedes() { final DiscoveryNode node2 = new DiscoveryNode("node2", buildNewFakeTransportAddress(), emptyMap(), emptySet(), version); final DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).add(node2).build(); ClusterName name = ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY); - ClusterState noMaster1 = ClusterState.builder(name).version(randomInt(5)).nodes(nodes).build(); - ClusterState noMaster2 = ClusterState.builder(name).version(randomInt(5)).nodes(nodes).build(); - ClusterState withMaster1a = ClusterState.builder(name) + ClusterState noClusterManager1 = ClusterState.builder(name).version(randomInt(5)).nodes(nodes).build(); + ClusterState noClusterManager2 = ClusterState.builder(name).version(randomInt(5)).nodes(nodes).build(); + ClusterState withClusterManager1a = ClusterState.builder(name) .version(randomInt(5)) .nodes(DiscoveryNodes.builder(nodes).masterNodeId(node1.getId())) .build(); - ClusterState withMaster1b = ClusterState.builder(name) + ClusterState withClusterManager1b = ClusterState.builder(name) .version(randomInt(5)) .nodes(DiscoveryNodes.builder(nodes).masterNodeId(node1.getId())) .build(); - ClusterState withMaster2 = ClusterState.builder(name) + ClusterState withClusterManager2 = ClusterState.builder(name) .version(randomInt(5)) .nodes(DiscoveryNodes.builder(nodes).masterNodeId(node2.getId())) .build(); // states with no cluster-manager should never supersede anything - assertFalse(noMaster1.supersedes(noMaster2)); - assertFalse(noMaster1.supersedes(withMaster1a)); + assertFalse(noClusterManager1.supersedes(noClusterManager2)); + assertFalse(noClusterManager1.supersedes(withClusterManager1a)); - // states should never supersede states from another master - assertFalse(withMaster1a.supersedes(withMaster2)); - assertFalse(withMaster1a.supersedes(noMaster1)); + // states should never supersede states from another cluster-manager + assertFalse(withClusterManager1a.supersedes(withClusterManager2)); + assertFalse(withClusterManager1a.supersedes(noClusterManager1)); - // state from the same master compare by version - assertThat(withMaster1a.supersedes(withMaster1b), equalTo(withMaster1a.version() > withMaster1b.version())); + // state from the same cluster-manager compare by version + assertThat( + withClusterManager1a.supersedes(withClusterManager1b), + equalTo(withClusterManager1a.version() > withClusterManager1b.version()) + ); } public void testBuilderRejectsNullCustom() { @@ -146,8 +149,8 @@ public void testToXContent() throws IOException { + " \"cluster_uuid\" : \"clusterUUID\",\n" + " \"version\" : 0,\n" + " \"state_uuid\" : \"stateUUID\",\n" - + " \"master_node\" : \"masterNodeId\",\n" - + " \"cluster_manager_node\" : \"masterNodeId\",\n" + + " \"master_node\" : \"clusterManagerNodeId\",\n" + + " \"cluster_manager_node\" : \"clusterManagerNodeId\",\n" + " \"blocks\" : {\n" + " \"global\" : {\n" + " \"1\" : {\n" @@ -352,8 +355,8 @@ public void testToXContent_FlatSettingTrue_ReduceMappingFalse() throws IOExcepti + " \"cluster_uuid\" : \"clusterUUID\",\n" + " \"version\" : 0,\n" + " \"state_uuid\" : \"stateUUID\",\n" - + " \"master_node\" : \"masterNodeId\",\n" - + " \"cluster_manager_node\" : \"masterNodeId\",\n" + + " \"master_node\" : \"clusterManagerNodeId\",\n" + + " \"cluster_manager_node\" : \"clusterManagerNodeId\",\n" + " \"blocks\" : {\n" + " \"global\" : {\n" + " \"1\" : {\n" @@ -551,8 +554,8 @@ public void testToXContent_FlatSettingFalse_ReduceMappingTrue() throws IOExcepti + " \"cluster_uuid\" : \"clusterUUID\",\n" + " \"version\" : 0,\n" + " \"state_uuid\" : \"stateUUID\",\n" - + " \"master_node\" : \"masterNodeId\",\n" - + " \"cluster_manager_node\" : \"masterNodeId\",\n" + + " \"master_node\" : \"clusterManagerNodeId\",\n" + + " \"cluster_manager_node\" : \"clusterManagerNodeId\",\n" + " \"blocks\" : {\n" + " \"global\" : {\n" + " \"1\" : {\n" @@ -868,7 +871,7 @@ private ClusterState buildClusterState() throws IOException { .stateUUID("stateUUID") .nodes( DiscoveryNodes.builder() - .masterNodeId("masterNodeId") + .masterNodeId("clusterManagerNodeId") .add(new DiscoveryNode("nodeId1", new TransportAddress(InetAddress.getByName("127.0.0.1"), 111), Version.CURRENT)) .build() ) diff --git a/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java b/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java index c6279d0029009..251703a933525 100644 --- a/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java +++ b/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java @@ -68,8 +68,8 @@ public class InternalClusterInfoServiceSchedulingTests extends OpenSearchTestCas public void testScheduling() { final DiscoveryNode discoveryNode = new DiscoveryNode("test", buildNewFakeTransportAddress(), Version.CURRENT); - final DiscoveryNodes noMaster = DiscoveryNodes.builder().add(discoveryNode).localNodeId(discoveryNode.getId()).build(); - final DiscoveryNodes localMaster = DiscoveryNodes.builder(noMaster).masterNodeId(discoveryNode.getId()).build(); + final DiscoveryNodes noClusterManager = DiscoveryNodes.builder().add(discoveryNode).localNodeId(discoveryNode.getId()).build(); + final DiscoveryNodes localClusterManager = DiscoveryNodes.builder(noClusterManager).masterNodeId(discoveryNode.getId()).build(); final Settings.Builder settingsBuilder = Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), discoveryNode.getName()); if (randomBoolean()) { @@ -87,14 +87,14 @@ protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() { } }; - final MasterService masterService = new FakeThreadPoolMasterService( + final MasterService clusterManagerService = new FakeThreadPoolMasterService( "test", - "masterService", + "clusterManagerService", threadPool, - r -> { fail("master service should not run any tasks"); } + r -> { fail("cluster-manager service should not run any tasks"); } ); - final ClusterService clusterService = new ClusterService(settings, clusterSettings, masterService, clusterApplierService); + final ClusterService clusterService = new ClusterService(settings, clusterSettings, clusterManagerService, clusterApplierService); final FakeClusterInfoServiceClient client = new FakeClusterInfoServiceClient(threadPool); final InternalClusterInfoService clusterInfoService = new InternalClusterInfoService(settings, clusterService, threadPool, client); @@ -102,34 +102,34 @@ protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() { clusterInfoService.addListener(ignored -> {}); clusterService.setNodeConnectionsService(ClusterServiceUtils.createNoOpNodeConnectionsService()); - clusterApplierService.setInitialState(ClusterState.builder(new ClusterName("cluster")).nodes(noMaster).build()); - masterService.setClusterStatePublisher((clusterChangedEvent, publishListener, ackListener) -> fail("should not publish")); - masterService.setClusterStateSupplier(clusterApplierService::state); + clusterApplierService.setInitialState(ClusterState.builder(new ClusterName("cluster")).nodes(noClusterManager).build()); + clusterManagerService.setClusterStatePublisher((clusterChangedEvent, publishListener, ackListener) -> fail("should not publish")); + clusterManagerService.setClusterStateSupplier(clusterApplierService::state); clusterService.start(); - final AtomicBoolean becameMaster1 = new AtomicBoolean(); + final AtomicBoolean becameClusterManager1 = new AtomicBoolean(); clusterApplierService.onNewClusterState( - "become master 1", - () -> ClusterState.builder(new ClusterName("cluster")).nodes(localMaster).build(), - setFlagOnSuccess(becameMaster1) + "become cluster-manager 1", + () -> ClusterState.builder(new ClusterName("cluster")).nodes(localClusterManager).build(), + setFlagOnSuccess(becameClusterManager1) ); - runUntilFlag(deterministicTaskQueue, becameMaster1); + runUntilFlag(deterministicTaskQueue, becameClusterManager1); - final AtomicBoolean failMaster1 = new AtomicBoolean(); + final AtomicBoolean failClusterManager1 = new AtomicBoolean(); clusterApplierService.onNewClusterState( - "fail master 1", - () -> ClusterState.builder(new ClusterName("cluster")).nodes(noMaster).build(), - setFlagOnSuccess(failMaster1) + "fail cluster-manager 1", + () -> ClusterState.builder(new ClusterName("cluster")).nodes(noClusterManager).build(), + setFlagOnSuccess(failClusterManager1) ); - runUntilFlag(deterministicTaskQueue, failMaster1); + runUntilFlag(deterministicTaskQueue, failClusterManager1); - final AtomicBoolean becameMaster2 = new AtomicBoolean(); + final AtomicBoolean becameClusterManager2 = new AtomicBoolean(); clusterApplierService.onNewClusterState( - "become master 2", - () -> ClusterState.builder(new ClusterName("cluster")).nodes(localMaster).build(), - setFlagOnSuccess(becameMaster2) + "become cluster-manager 2", + () -> ClusterState.builder(new ClusterName("cluster")).nodes(localClusterManager).build(), + setFlagOnSuccess(becameClusterManager2) ); - runUntilFlag(deterministicTaskQueue, becameMaster2); + runUntilFlag(deterministicTaskQueue, becameClusterManager2); for (int i = 0; i < 3; i++) { final int initialRequestCount = client.requestCount; @@ -139,13 +139,13 @@ protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() { assertThat(client.requestCount, equalTo(initialRequestCount + 2)); // should have run two client requests per interval } - final AtomicBoolean failMaster2 = new AtomicBoolean(); + final AtomicBoolean failClusterManager2 = new AtomicBoolean(); clusterApplierService.onNewClusterState( - "fail master 2", - () -> ClusterState.builder(new ClusterName("cluster")).nodes(noMaster).build(), - setFlagOnSuccess(failMaster2) + "fail cluster-manager 2", + () -> ClusterState.builder(new ClusterName("cluster")).nodes(noClusterManager).build(), + setFlagOnSuccess(failClusterManager2) ); - runUntilFlag(deterministicTaskQueue, failMaster2); + runUntilFlag(deterministicTaskQueue, failClusterManager2); runFor(deterministicTaskQueue, INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.get(settings).millis()); deterministicTaskQueue.runAllRunnableTasks(); diff --git a/server/src/test/java/org/opensearch/cluster/action/shard/ShardStateActionTests.java b/server/src/test/java/org/opensearch/cluster/action/shard/ShardStateActionTests.java index 744c833fa54e9..cf34af718c660 100644 --- a/server/src/test/java/org/opensearch/cluster/action/shard/ShardStateActionTests.java +++ b/server/src/test/java/org/opensearch/cluster/action/shard/ShardStateActionTests.java @@ -112,16 +112,16 @@ private static class TestShardStateAction extends ShardStateAction { super(clusterService, transportService, allocationService, rerouteService, THREAD_POOL); } - private Runnable onBeforeWaitForNewMasterAndRetry; + private Runnable onBeforeWaitForNewClusterManagerAndRetry; - public void setOnBeforeWaitForNewMasterAndRetry(Runnable onBeforeWaitForNewMasterAndRetry) { - this.onBeforeWaitForNewMasterAndRetry = onBeforeWaitForNewMasterAndRetry; + public void setOnBeforeWaitForNewClusterManagerAndRetry(Runnable onBeforeWaitForNewClusterManagerAndRetry) { + this.onBeforeWaitForNewClusterManagerAndRetry = onBeforeWaitForNewClusterManagerAndRetry; } - private Runnable onAfterWaitForNewMasterAndRetry; + private Runnable onAfterWaitForNewClusterManagerAndRetry; - public void setOnAfterWaitForNewMasterAndRetry(Runnable onAfterWaitForNewMasterAndRetry) { - this.onAfterWaitForNewMasterAndRetry = onAfterWaitForNewMasterAndRetry; + public void setOnAfterWaitFornewClusterManagerAndRetry(Runnable onAfterWaitFornewClusterManagerAndRetry) { + this.onAfterWaitForNewClusterManagerAndRetry = onAfterWaitFornewClusterManagerAndRetry; } @Override @@ -132,9 +132,9 @@ protected void waitForNewClusterManagerAndRetry( ActionListener listener, Predicate changePredicate ) { - onBeforeWaitForNewMasterAndRetry.run(); + onBeforeWaitForNewClusterManagerAndRetry.run(); super.waitForNewClusterManagerAndRetry(actionName, observer, request, listener, changePredicate); - onAfterWaitForNewMasterAndRetry.run(); + onAfterWaitForNewClusterManagerAndRetry.run(); } } @@ -160,8 +160,8 @@ public void setUp() throws Exception { transportService.start(); transportService.acceptIncomingRequests(); shardStateAction = new TestShardStateAction(clusterService, transportService, null, null); - shardStateAction.setOnBeforeWaitForNewMasterAndRetry(() -> {}); - shardStateAction.setOnAfterWaitForNewMasterAndRetry(() -> {}); + shardStateAction.setOnBeforeWaitForNewClusterManagerAndRetry(() -> {}); + shardStateAction.setOnAfterWaitFornewClusterManagerAndRetry(() -> {}); } @Override @@ -196,7 +196,7 @@ public void testSuccess() throws InterruptedException { // for the right shard assertEquals(shardEntry.shardId, shardRouting.shardId()); assertEquals(shardEntry.allocationId, shardRouting.allocationId().getId()); - // sent to the master + // sent to the cluster-manager assertEquals(clusterService.state().nodes().getMasterNode().getId(), capturedRequests[0].node.getId()); transport.handleResponse(capturedRequests[0].requestId, TransportResponse.Empty.INSTANCE); @@ -205,20 +205,20 @@ public void testSuccess() throws InterruptedException { assertNull(listener.failure.get()); } - public void testNoMaster() throws InterruptedException { + public void testNoClusterManager() throws InterruptedException { final String index = "test"; setState(clusterService, ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5))); - DiscoveryNodes.Builder noMasterBuilder = DiscoveryNodes.builder(clusterService.state().nodes()); - noMasterBuilder.masterNodeId(null); - setState(clusterService, ClusterState.builder(clusterService.state()).nodes(noMasterBuilder)); + DiscoveryNodes.Builder noClusterManagerBuilder = DiscoveryNodes.builder(clusterService.state().nodes()); + noClusterManagerBuilder.masterNodeId(null); + setState(clusterService, ClusterState.builder(clusterService.state()).nodes(noClusterManagerBuilder)); CountDownLatch latch = new CountDownLatch(1); AtomicInteger retries = new AtomicInteger(); AtomicBoolean success = new AtomicBoolean(); - setUpMasterRetryVerification(1, retries, latch, requestId -> {}); + setUpClusterManagerRetryVerification(1, retries, latch, requestId -> {}); ShardRouting failedShard = getRandomShardRouting(index); shardStateAction.localShardFailed(failedShard, "test", getSimulatedFailure(), new ActionListener() { @@ -242,7 +242,7 @@ public void onFailure(Exception e) { assertTrue(success.get()); } - public void testMasterChannelException() throws InterruptedException { + public void testClusterManagerChannelException() throws InterruptedException { final String index = "test"; setState(clusterService, ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5))); @@ -268,7 +268,7 @@ public void testMasterChannelException() throws InterruptedException { }; final int numberOfRetries = randomIntBetween(1, 256); - setUpMasterRetryVerification(numberOfRetries, retries, latch, retryLoop); + setUpClusterManagerRetryVerification(numberOfRetries, retries, latch, retryLoop); ShardRouting failedShard = getRandomShardRouting(index); shardStateAction.localShardFailed(failedShard, "test", getSimulatedFailure(), new ActionListener() { @@ -413,8 +413,8 @@ public void testRemoteShardFailedConcurrently() throws Exception { } Thread[] clientThreads = new Thread[between(1, 6)]; int iterationsPerThread = scaledRandomIntBetween(50, 500); - Phaser barrier = new Phaser(clientThreads.length + 2); // one for master thread, one for the main thread - Thread masterThread = new Thread(() -> { + Phaser barrier = new Phaser(clientThreads.length + 2); // one for cluster-manager thread, one for the main thread + Thread clusterManagerThread = new Thread(() -> { barrier.arriveAndAwaitAdvance(); while (shutdown.get() == false) { for (CapturingTransport.CapturedRequest request : transport.getCapturedRequestsAndClear()) { @@ -426,7 +426,7 @@ public void testRemoteShardFailedConcurrently() throws Exception { } } }); - masterThread.start(); + clusterManagerThread.start(); AtomicInteger notifiedResponses = new AtomicInteger(); for (int t = 0; t < clientThreads.length; t++) { @@ -463,7 +463,7 @@ public void onFailure(Exception e) { } assertBusy(() -> assertThat(notifiedResponses.get(), equalTo(clientThreads.length * iterationsPerThread))); shutdown.set(true); - masterThread.join(); + clusterManagerThread.join(); } public void testShardStarted() throws InterruptedException { @@ -496,14 +496,19 @@ private ShardRouting getRandomShardRouting(String index) { return shardRouting; } - private void setUpMasterRetryVerification(int numberOfRetries, AtomicInteger retries, CountDownLatch latch, LongConsumer retryLoop) { - shardStateAction.setOnBeforeWaitForNewMasterAndRetry(() -> { - DiscoveryNodes.Builder masterBuilder = DiscoveryNodes.builder(clusterService.state().nodes()); - masterBuilder.masterNodeId(clusterService.state().nodes().getMasterNodes().iterator().next().value.getId()); - setState(clusterService, ClusterState.builder(clusterService.state()).nodes(masterBuilder)); + private void setUpClusterManagerRetryVerification( + int numberOfRetries, + AtomicInteger retries, + CountDownLatch latch, + LongConsumer retryLoop + ) { + shardStateAction.setOnBeforeWaitForNewClusterManagerAndRetry(() -> { + DiscoveryNodes.Builder clusterManagerBuilder = DiscoveryNodes.builder(clusterService.state().nodes()); + clusterManagerBuilder.masterNodeId(clusterService.state().nodes().getMasterNodes().iterator().next().value.getId()); + setState(clusterService, ClusterState.builder(clusterService.state()).nodes(clusterManagerBuilder)); }); - shardStateAction.setOnAfterWaitForNewMasterAndRetry(() -> verifyRetry(numberOfRetries, retries, latch, retryLoop)); + shardStateAction.setOnAfterWaitFornewClusterManagerAndRetry(() -> verifyRetry(numberOfRetries, retries, latch, retryLoop)); } private void verifyRetry(int numberOfRetries, AtomicInteger retries, CountDownLatch latch, LongConsumer retryLoop) { diff --git a/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java b/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java index 812bf9425968a..b2b7c167ec7c7 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java @@ -169,7 +169,7 @@ public void testDoesNothingByDefaultIfClusterManagerNodesConfigured() { testDoesNothingWithSettings(builder().putList(INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey())); } - public void testDoesNothingByDefaultOnMasterIneligibleNodes() { + public void testDoesNothingByDefaultOnClusterManagerIneligibleNodes() { localNode = new DiscoveryNode( "local", randomAlphaOfLength(10), @@ -401,7 +401,7 @@ public void testDoesNotBootstrapIfAlreadyBootstrapped() { deterministicTaskQueue.runAllTasks(); } - public void testDoesNotBootstrapsOnNonMasterNode() { + public void testDoesNotBootstrapsOnNonClusterManagerNode() { localNode = new DiscoveryNode( "local", randomAlphaOfLength(10), @@ -676,7 +676,7 @@ public void testFailBootstrapWithBothSingleNodeDiscoveryAndInitialClusterManager ); } - public void testFailBootstrapNonMasterEligibleNodeWithSingleNodeDiscovery() { + public void testFailBootstrapNonClusterManagerEligibleNodeWithSingleNodeDiscovery() { final Settings.Builder settings = Settings.builder() .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE) .put(NODE_NAME_SETTING.getKey(), localNode.getName()) diff --git a/server/src/test/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelperTests.java b/server/src/test/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelperTests.java index 4fb96145732a5..0a534c34b4f86 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelperTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelperTests.java @@ -173,7 +173,7 @@ public void testScheduling() { assertThat(logLastFailedJoinAttemptWarningCount.get(), is(5L)); } - public void testDescriptionOnMasterIneligibleNodes() { + public void testDescriptionOnClusterManagerIneligibleNodes() { final DiscoveryNode localNode = new DiscoveryNode("local", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) .version(12L) @@ -284,7 +284,7 @@ public void testDescriptionOnUnhealthyNodes() { is("this node is unhealthy: unhealthy-info") ); - final DiscoveryNode masterNode = new DiscoveryNode( + final DiscoveryNode clusterManagerNode = new DiscoveryNode( "local", buildNewFakeTransportAddress(), emptyMap(), @@ -293,7 +293,7 @@ public void testDescriptionOnUnhealthyNodes() { ); clusterState = ClusterState.builder(ClusterName.DEFAULT) .version(12L) - .nodes(DiscoveryNodes.builder().add(masterNode).localNodeId(masterNode.getId())) + .nodes(DiscoveryNodes.builder().add(clusterManagerNode).localNodeId(clusterManagerNode.getId())) .build(); assertThat( @@ -851,9 +851,13 @@ public void testDescriptionAfterBootstrapping() { ) ); - final DiscoveryNode otherMasterNode = new DiscoveryNode("other-master", buildNewFakeTransportAddress(), Version.CURRENT); - final DiscoveryNode otherNonMasterNode = new DiscoveryNode( - "other-non-master", + final DiscoveryNode otherClusterManagerNode = new DiscoveryNode( + "other-cluster-manager", + buildNewFakeTransportAddress(), + Version.CURRENT + ); + final DiscoveryNode otherNonClusterManagerNode = new DiscoveryNode( + "other-non-cluster-manager", buildNewFakeTransportAddress(), emptyMap(), new HashSet<>( @@ -866,7 +870,13 @@ public void testDescriptionAfterBootstrapping() { String[] configNodeIds = new String[] { "n1", "n2" }; final ClusterState stateWithOtherNodes = ClusterState.builder(ClusterName.DEFAULT) - .nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).add(otherMasterNode).add(otherNonMasterNode)) + .nodes( + DiscoveryNodes.builder() + .add(localNode) + .localNodeId(localNode.getId()) + .add(otherClusterManagerNode) + .add(otherNonClusterManagerNode) + ) .metadata( Metadata.builder() .coordinationMetadata( @@ -897,13 +907,13 @@ public void testDescriptionAfterBootstrapping() { + "discovery will continue using [] from hosts providers and [" + localNode + ", " - + otherMasterNode + + otherClusterManagerNode + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0", "cluster-manager not discovered or elected yet, an election requires two nodes with ids [n1, n2], " + "have discovered [] which is not a quorum; " + "discovery will continue using [] from hosts providers and [" - + otherMasterNode + + otherClusterManagerNode + ", " + localNode + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0" diff --git a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java index 6ddbc909747f7..c4db0641717c6 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java @@ -586,7 +586,7 @@ public void testHandlePublishRequestWithBadTerm() { ); } - // scenario when handling a publish request from a master that we already received a newer state from + // scenario when handling a publish request from a cluster-manager that we already received a newer state from public void testHandlePublishRequestWithSameTermButOlderOrSameVersion() { VotingConfiguration initialConfig = VotingConfiguration.of(node1); ClusterState state1 = clusterState(0L, 0L, node1, initialConfig, initialConfig, 42L); @@ -613,7 +613,7 @@ public void testHandlePublishRequestWithSameTermButOlderOrSameVersion() { ); } - // scenario when handling a publish request from a fresh master + // scenario when handling a publish request from a fresh cluster-manager public void testHandlePublishRequestWithTermHigherThanLastAcceptedTerm() { VotingConfiguration initialConfig = VotingConfiguration.of(node1); StartJoinRequest startJoinRequest1 = new StartJoinRequest(node1, randomLongBetween(1, 5)); @@ -845,7 +845,7 @@ public void testVoteCollection() { assertFalse( voteCollection.addVote( - new DiscoveryNode("master-ineligible", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT) + new DiscoveryNode("cluster-manager-ineligible", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT) ) ); assertTrue(voteCollection.isEmpty()); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java index f43d6ff4e6c02..44239fdc0883f 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java @@ -107,7 +107,7 @@ public class CoordinatorTests extends AbstractCoordinatorTestCase { /** - * This test was added to verify that state recovery is properly reset on a node after it has become master and successfully + * This test was added to verify that state recovery is properly reset on a node after it has become cluster-manager and successfully * recovered a state (see {@link GatewayService}). The situation which triggers this with a decent likelihood is as follows: * 3 cluster-manager-eligible nodes (leader, follower1, follower2), the followers are shut down (leader remains), when followers come back * one of them becomes leader and publishes first state (with STATE_NOT_RECOVERED_BLOCK) to old leader, which accepts it. @@ -164,7 +164,7 @@ public void testCanUpdateClusterStateAfterStabilisation() { } } - public void testDoesNotElectNonMasterNode() { + public void testDoesNotElectNonClusterManagerNode() { try (Cluster cluster = new Cluster(randomIntBetween(1, 5), false, Settings.EMPTY)) { cluster.runRandomly(); cluster.stabilise(); @@ -191,7 +191,7 @@ public void testUnhealthyNodesGetsRemoved() { cluster.clusterNodes.add(newNode1); cluster.clusterNodes.add(newNode2); cluster.stabilise( - // The first pinging discovers the master + // The first pinging discovers the cluster-manager defaultMillis(DISCOVERY_FIND_PEERS_INTERVAL_SETTING) // One message delay to send a join + DEFAULT_DELAY_VARIABILITY @@ -627,7 +627,7 @@ public void testUnHealthyLeaderRemoved() { cluster.clusterNodes.add(newNode2); cluster.clusterNodes.add(newNode3); cluster.stabilise( - // The first pinging discovers the master + // The first pinging discovers the cluster-manager defaultMillis(DISCOVERY_FIND_PEERS_INTERVAL_SETTING) // One message delay to send a join + DEFAULT_DELAY_VARIABILITY @@ -1096,7 +1096,7 @@ public void testIncompatibleDiffResendsFullState() { * does not notice the node disconnecting, it is important for the node not to be turned back into a follower but try * and join the leader again. */ - public void testStayCandidateAfterReceivingFollowerCheckFromKnownMaster() { + public void testStayCandidateAfterReceivingFollowerCheckFromKnownClusterManager() { try (Cluster cluster = new Cluster(2, false, Settings.EMPTY)) { cluster.runRandomly(); cluster.stabilise(); @@ -1121,23 +1121,23 @@ public void testStayCandidateAfterReceivingFollowerCheckFromKnownMaster() { } } - public void testAppliesNoMasterBlockWritesByDefault() { - testAppliesNoMasterBlock(null, NO_MASTER_BLOCK_WRITES); + public void testAppliesNoClusterManagerBlockWritesByDefault() { + testAppliesNoClusterManagerBlock(null, NO_MASTER_BLOCK_WRITES); } - public void testAppliesNoMasterBlockWritesIfConfigured() { - testAppliesNoMasterBlock("write", NO_MASTER_BLOCK_WRITES); + public void testAppliesNoClusterManagerBlockWritesIfConfigured() { + testAppliesNoClusterManagerBlock("write", NO_MASTER_BLOCK_WRITES); } - public void testAppliesNoMasterBlockAllIfConfigured() { - testAppliesNoMasterBlock("all", NO_MASTER_BLOCK_ALL); + public void testAppliesNoClusterManagerBlockAllIfConfigured() { + testAppliesNoClusterManagerBlock("all", NO_MASTER_BLOCK_ALL); } - public void testAppliesNoMasterBlockMetadataWritesIfConfigured() { - testAppliesNoMasterBlock("metadata_write", NO_MASTER_BLOCK_METADATA_WRITES); + public void testAppliesNoClusterManagerBlockMetadataWritesIfConfigured() { + testAppliesNoClusterManagerBlock("metadata_write", NO_MASTER_BLOCK_METADATA_WRITES); } - private void testAppliesNoMasterBlock(String noMasterBlockSetting, ClusterBlock expectedBlock) { + private void testAppliesNoClusterManagerBlock(String noClusterManagerBlockSetting, ClusterBlock expectedBlock) { try (Cluster cluster = new Cluster(3)) { cluster.runRandomly(); cluster.stabilise(); @@ -1145,7 +1145,7 @@ private void testAppliesNoMasterBlock(String noMasterBlockSetting, ClusterBlock final ClusterNode leader = cluster.getAnyLeader(); leader.submitUpdateTask("update NO_CLUSTER_MANAGER_BLOCK_SETTING", cs -> { final Builder settingsBuilder = Settings.builder().put(cs.metadata().persistentSettings()); - settingsBuilder.put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), noMasterBlockSetting); + settingsBuilder.put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), noClusterManagerBlockSetting); return ClusterState.builder(cs) .metadata(Metadata.builder(cs.metadata()).persistentSettings(settingsBuilder.build())) .build(); @@ -1175,12 +1175,12 @@ private void testAppliesNoMasterBlock(String noMasterBlockSetting, ClusterBlock } } - public void testNodeCannotJoinIfJoinValidationFailsOnMaster() { + public void testNodeCannotJoinIfJoinValidationFailsOnClusterManager() { try (Cluster cluster = new Cluster(randomIntBetween(1, 3))) { cluster.runRandomly(); cluster.stabilise(); - // check that if node join validation fails on master, the nodes can't join + // check that if node join validation fails on cluster-manager, the nodes can't join List addedNodes = cluster.addNodes(randomIntBetween(1, 2)); final Set validatedNodes = new HashSet<>(); cluster.getAnyLeader().extraJoinValidators.add((discoveryNode, clusterState) -> { @@ -1305,7 +1305,7 @@ public void testDiscoveryUsesNodesFromLastClusterState() { } } - public void testFollowerRemovedIfUnableToSendRequestsToMaster() { + public void testFollowerRemovedIfUnableToSendRequestsToClusterManager() { try (Cluster cluster = new Cluster(3)) { cluster.runRandomly(); cluster.stabilise(); @@ -1333,7 +1333,7 @@ public void testFollowerRemovedIfUnableToSendRequestsToMaster() { cluster.clearBlackholedConnections(); cluster.stabilise( - // time for the disconnected node to find the master again + // time for the disconnected node to find the cluster-manager again defaultMillis(DISCOVERY_FIND_PEERS_INTERVAL_SETTING) * 2 // time for joining + 4 * DEFAULT_DELAY_VARIABILITY @@ -1679,7 +1679,7 @@ public String toString() { } } - public void testReconfiguresToExcludeMasterIneligibleNodesInVotingConfig() { + public void testReconfiguresToExcludeClusterManagerIneligibleNodesInVotingConfig() { try (Cluster cluster = new Cluster(3)) { cluster.runRandomly(); cluster.stabilise(); @@ -1698,7 +1698,7 @@ public void testReconfiguresToExcludeMasterIneligibleNodesInVotingConfig() { final boolean chosenNodeIsLeader = chosenNode == cluster.getAnyLeader(); final long termBeforeRestart = cluster.getAnyNode().coordinator.getCurrentTerm(); - logger.info("--> restarting [{}] as a master-ineligible node", chosenNode); + logger.info("--> restarting [{}] as a cluster-manager-ineligible node", chosenNode); chosenNode.close(); cluster.clusterNodes.replaceAll( diff --git a/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java b/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java index 7e7193fbf02ef..d5947bf444d17 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java @@ -685,7 +685,7 @@ public String executor() { } } - public void testPreferMasterNodes() { + public void testPreferClusterManagerNodes() { List nodes = randomNodes(10); DiscoveryNodes.Builder discoNodesBuilder = DiscoveryNodes.builder(); nodes.forEach(dn -> discoNodesBuilder.add(dn)); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java index 6bd2d1e70033a..fec1bb025d235 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java @@ -161,8 +161,9 @@ public void testSuccess() { } public void testUpdatesNodeWithNewRoles() throws Exception { - // Node roles vary by version, and new roles are suppressed for BWC. This means we can receive a join from a node that's already - // in the cluster but with a different set of roles: the node didn't change roles, but the cluster state came via an older master. + // Node roles vary by version, and new roles are suppressed for BWC. + // This means we can receive a join from a node that's already in the cluster but with a different set of roles: + // the node didn't change roles, but the cluster state came via an older cluster-manager. // In this case we must properly process its join to ensure that the roles are correct. final AllocationService allocationService = mock(AllocationService.class); @@ -171,7 +172,7 @@ public void testUpdatesNodeWithNewRoles() throws Exception { final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor(Settings.EMPTY, allocationService, logger, rerouteService, null); - final DiscoveryNode masterNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); + final DiscoveryNode clusterManagerNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); final DiscoveryNode actualNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); final DiscoveryNode bwcNode = new DiscoveryNode( @@ -186,7 +187,13 @@ public void testUpdatesNodeWithNewRoles() throws Exception { actualNode.getVersion() ); final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) - .nodes(DiscoveryNodes.builder().add(masterNode).localNodeId(masterNode.getId()).masterNodeId(masterNode.getId()).add(bwcNode)) + .nodes( + DiscoveryNodes.builder() + .add(clusterManagerNode) + .localNodeId(clusterManagerNode.getId()) + .masterNodeId(clusterManagerNode.getId()) + .add(bwcNode) + ) .build(); final ClusterStateTaskExecutor.ClusterTasksResult result = joinTaskExecutor.execute( diff --git a/server/src/test/java/org/opensearch/cluster/coordination/NoMasterBlockServiceTests.java b/server/src/test/java/org/opensearch/cluster/coordination/NoMasterBlockServiceTests.java index a637826951f87..a44026bbbf477 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/NoMasterBlockServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/NoMasterBlockServiceTests.java @@ -44,12 +44,12 @@ public class NoMasterBlockServiceTests extends OpenSearchTestCase { - private NoMasterBlockService noMasterBlockService; + private NoMasterBlockService noClusterManagerBlockService; private ClusterSettings clusterSettings; private void createService(Settings settings) { clusterSettings = new ClusterSettings(settings, BUILT_IN_CLUSTER_SETTINGS); - noMasterBlockService = new NoMasterBlockService(settings, clusterSettings); + noClusterManagerBlockService = new NoMasterBlockService(settings, clusterSettings); } private void assertDeprecatedWarningEmitted() { @@ -61,22 +61,22 @@ private void assertDeprecatedWarningEmitted() { public void testBlocksWritesByDefault() { createService(Settings.EMPTY); - assertThat(noMasterBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_WRITES)); + assertThat(noClusterManagerBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_WRITES)); } public void testBlocksWritesIfConfiguredBySetting() { createService(Settings.builder().put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "write").build()); - assertThat(noMasterBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_WRITES)); + assertThat(noClusterManagerBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_WRITES)); } public void testBlocksAllIfConfiguredBySetting() { createService(Settings.builder().put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "all").build()); - assertThat(noMasterBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_ALL)); + assertThat(noClusterManagerBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_ALL)); } public void testBlocksMetadataWritesIfConfiguredBySetting() { createService(Settings.builder().put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "metadata_write").build()); - assertThat(noMasterBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_METADATA_WRITES)); + assertThat(noClusterManagerBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_METADATA_WRITES)); } public void testRejectsInvalidSetting() { @@ -88,12 +88,12 @@ public void testRejectsInvalidSetting() { public void testSettingCanBeUpdated() { createService(Settings.builder().put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "all").build()); - assertThat(noMasterBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_ALL)); + assertThat(noClusterManagerBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_ALL)); clusterSettings.applySettings(Settings.builder().put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "write").build()); - assertThat(noMasterBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_WRITES)); + assertThat(noClusterManagerBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_WRITES)); clusterSettings.applySettings(Settings.builder().put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "metadata_write").build()); - assertThat(noMasterBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_METADATA_WRITES)); + assertThat(noClusterManagerBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_METADATA_WRITES)); } } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java index 43c9c77f193dd..2cf8c2c13d3b6 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java @@ -98,7 +98,7 @@ public class NodeJoinTests extends OpenSearchTestCase { private static ThreadPool threadPool; - private MasterService masterService; + private MasterService clusterManagerService; private Coordinator coordinator; private DeterministicTaskQueue deterministicTaskQueue; private Transport transport; @@ -117,7 +117,7 @@ public static void afterClass() { @After public void tearDown() throws Exception { super.tearDown(); - masterService.close(); + clusterManagerService.close(); } private static ClusterState initialState(DiscoveryNode localNode, long term, long version, VotingConfiguration config) { @@ -138,61 +138,68 @@ private static ClusterState initialState(DiscoveryNode localNode, long term, lon .build(); } - private void setupFakeMasterServiceAndCoordinator(long term, ClusterState initialState, NodeHealthService nodeHealthService) { + private void setupFakeClusterManagerServiceAndCoordinator(long term, ClusterState initialState, NodeHealthService nodeHealthService) { deterministicTaskQueue = new DeterministicTaskQueue( Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test").build(), random() ); final ThreadPool fakeThreadPool = deterministicTaskQueue.getThreadPool(); - FakeThreadPoolMasterService fakeMasterService = new FakeThreadPoolMasterService( + FakeThreadPoolMasterService fakeClusterManagerService = new FakeThreadPoolMasterService( "test_node", "test", fakeThreadPool, deterministicTaskQueue::scheduleNow ); - setupMasterServiceAndCoordinator(term, initialState, fakeMasterService, fakeThreadPool, Randomness.get(), nodeHealthService); - fakeMasterService.setClusterStatePublisher((event, publishListener, ackListener) -> { + setupClusterManagerServiceAndCoordinator( + term, + initialState, + fakeClusterManagerService, + fakeThreadPool, + Randomness.get(), + nodeHealthService + ); + fakeClusterManagerService.setClusterStatePublisher((event, publishListener, ackListener) -> { coordinator.handlePublishRequest(new PublishRequest(event.state())); publishListener.onResponse(null); }); - fakeMasterService.start(); + fakeClusterManagerService.start(); } - private void setupRealMasterServiceAndCoordinator(long term, ClusterState initialState) { - MasterService masterService = new MasterService( + private void setupRealClusterManagerServiceAndCoordinator(long term, ClusterState initialState) { + MasterService clusterManagerService = new MasterService( Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test_node").build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool ); AtomicReference clusterStateRef = new AtomicReference<>(initialState); - masterService.setClusterStatePublisher((event, publishListener, ackListener) -> { + clusterManagerService.setClusterStatePublisher((event, publishListener, ackListener) -> { clusterStateRef.set(event.state()); publishListener.onResponse(null); }); - setupMasterServiceAndCoordinator( + setupClusterManagerServiceAndCoordinator( term, initialState, - masterService, + clusterManagerService, threadPool, new Random(Randomness.get().nextLong()), () -> new StatusInfo(HEALTHY, "healthy-info") ); - masterService.setClusterStateSupplier(clusterStateRef::get); - masterService.start(); + clusterManagerService.setClusterStateSupplier(clusterStateRef::get); + clusterManagerService.start(); } - private void setupMasterServiceAndCoordinator( + private void setupClusterManagerServiceAndCoordinator( long term, ClusterState initialState, - MasterService masterService, + MasterService clusterManagerService, ThreadPool threadPool, Random random, NodeHealthService nodeHealthService ) { - if (this.masterService != null || coordinator != null) { + if (this.clusterManagerService != null || coordinator != null) { throw new IllegalStateException("method setupMasterServiceAndCoordinator can only be called once"); } - this.masterService = masterService; + this.clusterManagerService = clusterManagerService; CapturingTransport capturingTransport = new CapturingTransport() { @Override protected void onSendRequest(long requestId, String action, TransportRequest request, DiscoveryNode destination) { @@ -224,7 +231,7 @@ protected void onSendRequest(long requestId, String action, TransportRequest req transportService, writableRegistry(), OpenSearchAllocationTestCase.createAllocationService(Settings.EMPTY), - masterService, + clusterManagerService, () -> new InMemoryPersistedState(term, initialState), r -> emptyList(), new NoOpClusterApplier(), @@ -245,14 +252,14 @@ protected DiscoveryNode newNode(int i) { return newNode(i, randomBoolean()); } - protected DiscoveryNode newNode(int i, boolean master) { + protected DiscoveryNode newNode(int i, boolean clusterManager) { final Set roles; - if (master) { + if (clusterManager) { roles = singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); } else { roles = Collections.emptySet(); } - final String prefix = master ? "master_" : "data_"; + final String prefix = clusterManager ? "cluster_manager_" : "data_"; return new DiscoveryNode(prefix + i, i + "", buildNewFakeTransportAddress(), emptyMap(), roles, Version.CURRENT); } @@ -323,7 +330,7 @@ public void testJoinWithHigherTermElectsLeader() { DiscoveryNode node1 = newNode(1, true); long initialTerm = randomLongBetween(1, 10); long initialVersion = randomLongBetween(1, 10); - setupFakeMasterServiceAndCoordinator( + setupFakeClusterManagerServiceAndCoordinator( initialTerm, initialState(node0, initialTerm, initialVersion, VotingConfiguration.of(randomFrom(node0, node1))), () -> new StatusInfo(HEALTHY, "healthy-info") @@ -347,7 +354,7 @@ public void testJoinWithHigherTermButBetterStateGetsRejected() { DiscoveryNode node1 = newNode(1, true); long initialTerm = randomLongBetween(1, 10); long initialVersion = randomLongBetween(1, 10); - setupFakeMasterServiceAndCoordinator( + setupFakeClusterManagerServiceAndCoordinator( initialTerm, initialState(node0, initialTerm, initialVersion, VotingConfiguration.of(node1)), () -> new StatusInfo(HEALTHY, "healthy-info") @@ -362,12 +369,12 @@ public void testJoinWithHigherTermButBetterStateGetsRejected() { assertFalse(isLocalNodeElectedMaster()); } - public void testJoinWithHigherTermButBetterStateStillElectsMasterThroughSelfJoin() { + public void testJoinWithHigherTermButBetterStateStillElectsClusterManagerThroughSelfJoin() { DiscoveryNode node0 = newNode(0, true); DiscoveryNode node1 = newNode(1, true); long initialTerm = randomLongBetween(1, 10); long initialVersion = randomLongBetween(1, 10); - setupFakeMasterServiceAndCoordinator( + setupFakeClusterManagerServiceAndCoordinator( initialTerm, initialState(node0, initialTerm, initialVersion, VotingConfiguration.of(node0)), () -> new StatusInfo(HEALTHY, "healthy-info") @@ -384,7 +391,7 @@ public void testJoinElectedLeader() { DiscoveryNode node1 = newNode(1, true); long initialTerm = randomLongBetween(1, 10); long initialVersion = randomLongBetween(1, 10); - setupFakeMasterServiceAndCoordinator( + setupFakeClusterManagerServiceAndCoordinator( initialTerm, initialState(node0, initialTerm, initialVersion, VotingConfiguration.of(node0)), () -> new StatusInfo(HEALTHY, "healthy-info") @@ -404,7 +411,7 @@ public void testJoinElectedLeaderWithHigherTerm() { DiscoveryNode node1 = newNode(1, true); long initialTerm = randomLongBetween(1, 10); long initialVersion = randomLongBetween(1, 10); - setupFakeMasterServiceAndCoordinator( + setupFakeClusterManagerServiceAndCoordinator( initialTerm, initialState(node0, initialTerm, initialVersion, VotingConfiguration.of(node0)), () -> new StatusInfo(HEALTHY, "healthy-info") @@ -426,7 +433,7 @@ public void testJoinAccumulation() { DiscoveryNode node2 = newNode(2, true); long initialTerm = randomLongBetween(1, 10); long initialVersion = randomLongBetween(1, 10); - setupFakeMasterServiceAndCoordinator( + setupFakeClusterManagerServiceAndCoordinator( initialTerm, initialState(node0, initialTerm, initialVersion, VotingConfiguration.of(node2)), () -> new StatusInfo(HEALTHY, "healthy-info") @@ -458,7 +465,7 @@ public void testJoinFollowerWithHigherTerm() throws Exception { DiscoveryNode node1 = newNode(1, true); long initialTerm = randomLongBetween(1, 10); long initialVersion = randomLongBetween(1, 10); - setupFakeMasterServiceAndCoordinator( + setupFakeClusterManagerServiceAndCoordinator( initialTerm, initialState(node0, initialTerm, initialVersion, VotingConfiguration.of(node0)), () -> new StatusInfo(HEALTHY, "healthy-info") @@ -481,7 +488,7 @@ public void testJoinUpdateVotingConfigExclusion() throws Exception { "knownNodeName" ); - setupFakeMasterServiceAndCoordinator( + setupFakeClusterManagerServiceAndCoordinator( initialTerm, buildStateWithVotingConfigExclusion(initialNode, initialTerm, initialVersion, votingConfigExclusion), () -> new StatusInfo(HEALTHY, "healthy-info") @@ -507,7 +514,7 @@ public void testJoinUpdateVotingConfigExclusion() throws Exception { ); assertTrue( - MasterServiceTests.discoveryState(masterService) + MasterServiceTests.discoveryState(clusterManagerService) .getVotingConfigExclusions() .stream() .anyMatch( @@ -583,7 +590,7 @@ public void testJoinFollowerFails() throws Exception { DiscoveryNode node1 = newNode(1, true); long initialTerm = randomLongBetween(1, 10); long initialVersion = randomLongBetween(1, 10); - setupFakeMasterServiceAndCoordinator( + setupFakeClusterManagerServiceAndCoordinator( initialTerm, initialState(node0, initialTerm, initialVersion, VotingConfiguration.of(node0)), () -> new StatusInfo(HEALTHY, "healthy-info") @@ -604,7 +611,7 @@ public void testBecomeFollowerFailsPendingJoin() throws Exception { DiscoveryNode node1 = newNode(1, true); long initialTerm = randomLongBetween(1, 10); long initialVersion = randomLongBetween(1, 10); - setupFakeMasterServiceAndCoordinator( + setupFakeClusterManagerServiceAndCoordinator( initialTerm, initialState(node0, initialTerm, initialVersion, VotingConfiguration.of(node1)), () -> new StatusInfo(HEALTHY, "healthy-info") @@ -626,27 +633,31 @@ public void testBecomeFollowerFailsPendingJoin() throws Exception { } public void testConcurrentJoining() { - List masterNodes = IntStream.rangeClosed(1, randomIntBetween(2, 5)) + List clusterManagerNodes = IntStream.rangeClosed(1, randomIntBetween(2, 5)) .mapToObj(nodeId -> newNode(nodeId, true)) .collect(Collectors.toList()); - List otherNodes = IntStream.rangeClosed(masterNodes.size() + 1, masterNodes.size() + 1 + randomIntBetween(0, 5)) - .mapToObj(nodeId -> newNode(nodeId, false)) - .collect(Collectors.toList()); - List allNodes = Stream.concat(masterNodes.stream(), otherNodes.stream()).collect(Collectors.toList()); + List otherNodes = IntStream.rangeClosed( + clusterManagerNodes.size() + 1, + clusterManagerNodes.size() + 1 + randomIntBetween(0, 5) + ).mapToObj(nodeId -> newNode(nodeId, false)).collect(Collectors.toList()); + List allNodes = Stream.concat(clusterManagerNodes.stream(), otherNodes.stream()).collect(Collectors.toList()); - DiscoveryNode localNode = masterNodes.get(0); + DiscoveryNode localNode = clusterManagerNodes.get(0); VotingConfiguration votingConfiguration = new VotingConfiguration( - randomValueOtherThan(singletonList(localNode), () -> randomSubsetOf(randomIntBetween(1, masterNodes.size()), masterNodes)) - .stream() - .map(DiscoveryNode::getId) - .collect(Collectors.toSet()) + randomValueOtherThan( + singletonList(localNode), + () -> randomSubsetOf(randomIntBetween(1, clusterManagerNodes.size()), clusterManagerNodes) + ).stream().map(DiscoveryNode::getId).collect(Collectors.toSet()) ); logger.info("Voting configuration: {}", votingConfiguration); long initialTerm = randomLongBetween(1, 10); long initialVersion = randomLongBetween(1, 10); - setupRealMasterServiceAndCoordinator(initialTerm, initialState(localNode, initialTerm, initialVersion, votingConfiguration)); + setupRealClusterManagerServiceAndCoordinator( + initialTerm, + initialState(localNode, initialTerm, initialVersion, votingConfiguration) + ); long newTerm = initialTerm + randomLongBetween(1, 10); // we need at least a quorum of voting nodes with a correct term and worse state @@ -735,10 +746,10 @@ public void testConcurrentJoining() { throw new RuntimeException(e); } - assertTrue(MasterServiceTests.discoveryState(masterService).nodes().isLocalNodeElectedMaster()); + assertTrue(MasterServiceTests.discoveryState(clusterManagerService).nodes().isLocalNodeElectedMaster()); for (DiscoveryNode successfulNode : successfulNodes) { assertTrue(successfulNode + " joined cluster", clusterStateHasNode(successfulNode)); - assertFalse(successfulNode + " voted for master", coordinator.missingJoinVoteFrom(successfulNode)); + assertFalse(successfulNode + " voted for cluster-manager", coordinator.missingJoinVoteFrom(successfulNode)); } } @@ -749,7 +760,7 @@ public void testJoinElectedLeaderWithDeprecatedMasterRole() { DiscoveryNode node1 = new DiscoveryNode("master1", "1", buildNewFakeTransportAddress(), emptyMap(), roles, Version.CURRENT); long initialTerm = 1; long initialVersion = 1; - setupFakeMasterServiceAndCoordinator( + setupFakeClusterManagerServiceAndCoordinator( initialTerm, initialState(node0, initialTerm, initialVersion, VotingConfiguration.of(node0)), () -> new StatusInfo(HEALTHY, "healthy-info") @@ -765,10 +776,10 @@ public void testJoinElectedLeaderWithDeprecatedMasterRole() { } private boolean isLocalNodeElectedMaster() { - return MasterServiceTests.discoveryState(masterService).nodes().isLocalNodeElectedMaster(); + return MasterServiceTests.discoveryState(clusterManagerService).nodes().isLocalNodeElectedMaster(); } private boolean clusterStateHasNode(DiscoveryNode node) { - return node.equals(MasterServiceTests.discoveryState(masterService).nodes().get(node.getId())); + return node.equals(MasterServiceTests.discoveryState(clusterManagerService).nodes().get(node.getId())); } } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/PublicationTests.java b/server/src/test/java/org/opensearch/cluster/coordination/PublicationTests.java index 954e8ce79cdc8..3e86ec11ae7b3 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/PublicationTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/PublicationTests.java @@ -447,7 +447,7 @@ public void testClusterStatePublishingFailsOrTimesOutBeforeCommit() throws Inter ); } - public void testPublishingToMastersFirst() { + public void testPublishingToClusterManagersFirst() { VotingConfiguration singleNodeConfig = VotingConfiguration.of(n1); initializeCluster(singleNodeConfig); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/ReconfiguratorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/ReconfiguratorTests.java index 71d640e202f33..057455fefc4b3 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/ReconfiguratorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/ReconfiguratorTests.java @@ -223,14 +223,14 @@ private void check( boolean autoShrinkVotingConfiguration, VotingConfiguration expectedConfig ) { - final DiscoveryNode master = liveNodes.stream().sorted(Comparator.comparing(DiscoveryNode::getId)).findFirst().get(); - check(liveNodes, retired, master.getId(), config, autoShrinkVotingConfiguration, expectedConfig); + final DiscoveryNode clusterManager = liveNodes.stream().sorted(Comparator.comparing(DiscoveryNode::getId)).findFirst().get(); + check(liveNodes, retired, clusterManager.getId(), config, autoShrinkVotingConfiguration, expectedConfig); } private void check( Set liveNodes, Set retired, - String masterId, + String clusterManagerId, VotingConfiguration config, boolean autoShrinkVotingConfiguration, VotingConfiguration expectedConfig @@ -239,14 +239,14 @@ private void check( Settings.builder().put(CLUSTER_AUTO_SHRINK_VOTING_CONFIGURATION.getKey(), autoShrinkVotingConfiguration).build() ); - final DiscoveryNode master = liveNodes.stream().filter(n -> n.getId().equals(masterId)).findFirst().get(); - final VotingConfiguration adaptedConfig = reconfigurator.reconfigure(liveNodes, retired, master, config); + final DiscoveryNode clusterManager = liveNodes.stream().filter(n -> n.getId().equals(clusterManagerId)).findFirst().get(); + final VotingConfiguration adaptedConfig = reconfigurator.reconfigure(liveNodes, retired, clusterManager, config); assertEquals( new ParameterizedMessage( - "[liveNodes={}, retired={}, master={}, config={}, autoShrinkVotingConfiguration={}]", + "[liveNodes={}, retired={}, clusterManager={}, config={}, autoShrinkVotingConfiguration={}]", liveNodes, retired, - master, + clusterManager, config, autoShrinkVotingConfiguration ).getFormattedMessage(), diff --git a/server/src/test/java/org/opensearch/cluster/health/ClusterHealthAllocationTests.java b/server/src/test/java/org/opensearch/cluster/health/ClusterHealthAllocationTests.java index 2f05297146f8e..06e58672fa994 100644 --- a/server/src/test/java/org/opensearch/cluster/health/ClusterHealthAllocationTests.java +++ b/server/src/test/java/org/opensearch/cluster/health/ClusterHealthAllocationTests.java @@ -85,10 +85,13 @@ public void testClusterHealth() { assertEquals(ClusterHealthStatus.GREEN, getClusterHealthStatus(clusterState)); } - private ClusterState addNode(ClusterState clusterState, String nodeName, boolean isMaster) { + private ClusterState addNode(ClusterState clusterState, String nodeName, boolean isClusterManager) { DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.getNodes()); nodeBuilder.add( - newNode(nodeName, Collections.singleton(isMaster ? DiscoveryNodeRole.CLUSTER_MANAGER_ROLE : DiscoveryNodeRole.DATA_ROLE)) + newNode( + nodeName, + Collections.singleton(isClusterManager ? DiscoveryNodeRole.CLUSTER_MANAGER_ROLE : DiscoveryNodeRole.DATA_ROLE) + ) ); return ClusterState.builder(clusterState).nodes(nodeBuilder).build(); } diff --git a/server/src/test/java/org/opensearch/cluster/health/ClusterStateHealthTests.java b/server/src/test/java/org/opensearch/cluster/health/ClusterStateHealthTests.java index 9a6c458edeb11..bd856d5c41ace 100644 --- a/server/src/test/java/org/opensearch/cluster/health/ClusterStateHealthTests.java +++ b/server/src/test/java/org/opensearch/cluster/health/ClusterStateHealthTests.java @@ -157,11 +157,11 @@ public void testClusterHealthWaitsForClusterStateApplication() throws Interrupte } }); - logger.info("--> submit task to restore master"); + logger.info("--> submit task to restore cluster-manager"); ClusterState currentState = clusterService.getClusterApplierService().state(); clusterService.getClusterApplierService() .onNewClusterState( - "restore master", + "restore cluster-manager", () -> ClusterState.builder(currentState) .nodes(DiscoveryNodes.builder(currentState.nodes()).masterNodeId(currentState.nodes().getLocalNodeId())) .build(), @@ -184,7 +184,7 @@ public void testClusterHealthWaitsForClusterStateApplication() throws Interrupte assertFalse(listener.isDone()); - logger.info("--> realising task to restore master"); + logger.info("--> realising task to restore cluster-manager"); applyLatch.countDown(); listener.get(); } diff --git a/server/src/test/java/org/opensearch/cluster/metadata/AutoExpandReplicasTests.java b/server/src/test/java/org/opensearch/cluster/metadata/AutoExpandReplicasTests.java index aafd507aef7cd..559dd86dce4b1 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/AutoExpandReplicasTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/AutoExpandReplicasTests.java @@ -145,7 +145,7 @@ public void testAutoExpandWhenNodeLeavesAndPossiblyRejoins() throws InterruptedE try { List allNodes = new ArrayList<>(); - DiscoveryNode localNode = createNode(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); // local node is the master + DiscoveryNode localNode = createNode(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); // local node is the cluster-manager allNodes.add(localNode); int numDataNodes = randomIntBetween(3, 5); List dataNodes = new ArrayList<>(numDataNodes); @@ -246,7 +246,7 @@ public void testOnlyAutoExpandAllocationFilteringAfterAllNodesUpgraded() { VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.V_1_2_1), DiscoveryNodeRole.CLUSTER_MANAGER_ROLE, DiscoveryNodeRole.DATA_ROLE - ); // local node is the master + ); // local node is the cluster-manager allNodes.add(oldNode); ClusterState state = ClusterStateCreationUtils.state(oldNode, oldNode, allNodes.toArray(new DiscoveryNode[0])); diff --git a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodesTests.java b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodesTests.java index bc36a57fed125..80c7d8c9417fe 100644 --- a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodesTests.java +++ b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodesTests.java @@ -108,14 +108,14 @@ public void testAll() { assertThat(discoveryNodes.resolveNodes(new String[0]), arrayContainingInAnyOrder(allNodes)); assertThat(discoveryNodes.resolveNodes("_all"), arrayContainingInAnyOrder(allNodes)); - final String[] nonMasterNodes = StreamSupport.stream(discoveryNodes.getNodes().values().spliterator(), false) + final String[] nonClusterManagerNodes = StreamSupport.stream(discoveryNodes.getNodes().values().spliterator(), false) .map(n -> n.value) .filter(n -> n.isMasterNode() == false) .map(DiscoveryNode::getId) .toArray(String[]::new); - assertThat(discoveryNodes.resolveNodes("_all", "master:false"), arrayContainingInAnyOrder(nonMasterNodes)); + assertThat(discoveryNodes.resolveNodes("_all", "cluster_manager:false"), arrayContainingInAnyOrder(nonClusterManagerNodes)); - assertThat(discoveryNodes.resolveNodes("master:false", "_all"), arrayContainingInAnyOrder(allNodes)); + assertThat(discoveryNodes.resolveNodes("cluster_manager:false", "_all"), arrayContainingInAnyOrder(allNodes)); } public void testCoordinatorOnlyNodes() { @@ -135,7 +135,7 @@ public void testCoordinatorOnlyNodes() { assertThat(discoveryNodes.resolveNodes("coordinating_only:true"), arrayContainingInAnyOrder(coordinatorOnlyNodes)); assertThat( - discoveryNodes.resolveNodes("_all", "data:false", "ingest:false", "master:false"), + discoveryNodes.resolveNodes("_all", "data:false", "ingest:false", "cluster_manager:false"), arrayContainingInAnyOrder(coordinatorOnlyNodes) ); assertThat(discoveryNodes.resolveNodes("_all", "coordinating_only:false"), arrayContainingInAnyOrder(nonCoordinatorOnlyNodes)); @@ -175,7 +175,7 @@ public void testResolveNodesIds() { assertThat(resolvedNodesIds, equalTo(expectedNodesIds)); } - public void testMastersFirst() { + public void testClusterManagersFirst() { final List inputNodes = randomNodes(10); final DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); inputNodes.forEach(discoBuilder::add); @@ -254,19 +254,19 @@ public void testDeltas() { nodesB.add(node); } - DiscoveryNode masterA = randomBoolean() ? null : RandomPicks.randomFrom(random(), nodesA); - DiscoveryNode masterB = randomBoolean() ? null : RandomPicks.randomFrom(random(), nodesB); + DiscoveryNode clusterManagerA = randomBoolean() ? null : RandomPicks.randomFrom(random(), nodesA); + DiscoveryNode clusterManagerB = randomBoolean() ? null : RandomPicks.randomFrom(random(), nodesB); DiscoveryNodes.Builder builderA = DiscoveryNodes.builder(); nodesA.stream().forEach(builderA::add); - final String masterAId = masterA == null ? null : masterA.getId(); - builderA.masterNodeId(masterAId); + final String clusterManagerAId = clusterManagerA == null ? null : clusterManagerA.getId(); + builderA.masterNodeId(clusterManagerAId); builderA.localNodeId(RandomPicks.randomFrom(random(), nodesA).getId()); DiscoveryNodes.Builder builderB = DiscoveryNodes.builder(); nodesB.stream().forEach(builderB::add); - final String masterBId = masterB == null ? null : masterB.getId(); - builderB.masterNodeId(masterBId); + final String clusterManagerBId = clusterManagerB == null ? null : clusterManagerB.getId(); + builderB.masterNodeId(clusterManagerBId); builderB.localNodeId(RandomPicks.randomFrom(random(), nodesB).getId()); final DiscoveryNodes discoNodesA = builderA.build(); @@ -276,18 +276,18 @@ public void testDeltas() { DiscoveryNodes.Delta delta = discoNodesB.delta(discoNodesA); - if (masterA == null) { + if (clusterManagerA == null) { assertThat(delta.previousClusterManagerNode(), nullValue()); } else { - assertThat(delta.previousClusterManagerNode().getId(), equalTo(masterAId)); + assertThat(delta.previousClusterManagerNode().getId(), equalTo(clusterManagerAId)); } - if (masterB == null) { + if (clusterManagerB == null) { assertThat(delta.newMasterNode(), nullValue()); } else { - assertThat(delta.newMasterNode().getId(), equalTo(masterBId)); + assertThat(delta.newMasterNode().getId(), equalTo(clusterManagerBId)); } - if (Objects.equals(masterAId, masterBId)) { + if (Objects.equals(clusterManagerAId, clusterManagerBId)) { assertFalse(delta.masterNodeChanged()); } else { assertTrue(delta.masterNodeChanged()); @@ -306,6 +306,32 @@ public void testDeltas() { assertThat(delta.removedNodes().size(), equalTo(removedNodes.size())); } + // Validate using the deprecated 'master' role in the node filter can get correct result. + public void testDeprecatedMasterNodeFilter() { + final DiscoveryNodes discoveryNodes = buildDiscoveryNodes(); + + final String[] allNodes = StreamSupport.stream(discoveryNodes.spliterator(), false) + .map(DiscoveryNode::getId) + .toArray(String[]::new); + + final String[] clusterManagerNodes = StreamSupport.stream(discoveryNodes.getNodes().values().spliterator(), false) + .map(n -> n.value) + .filter(n -> n.isMasterNode() == true) + .map(DiscoveryNode::getId) + .toArray(String[]::new); + + final String[] nonClusterManagerNodes = StreamSupport.stream(discoveryNodes.getNodes().values().spliterator(), false) + .map(n -> n.value) + .filter(n -> n.isMasterNode() == false) + .map(DiscoveryNode::getId) + .toArray(String[]::new); + + assertThat(discoveryNodes.resolveNodes("cluster_manager:true"), arrayContainingInAnyOrder(clusterManagerNodes)); + assertThat(discoveryNodes.resolveNodes("master:true"), arrayContainingInAnyOrder(clusterManagerNodes)); + assertThat(discoveryNodes.resolveNodes("_all", "master:false"), arrayContainingInAnyOrder(nonClusterManagerNodes)); + assertThat(discoveryNodes.resolveNodes("master:false", "_all"), arrayContainingInAnyOrder(allNodes)); + } + private static AtomicInteger idGenerator = new AtomicInteger(); private static List randomNodes(final int numNodes) { diff --git a/server/src/test/java/org/opensearch/cluster/routing/BatchedRerouteServiceTests.java b/server/src/test/java/org/opensearch/cluster/routing/BatchedRerouteServiceTests.java index db93aa39c2da7..2f6d34e1eb204 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/BatchedRerouteServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/BatchedRerouteServiceTests.java @@ -101,7 +101,7 @@ public void testReroutesWhenRequested() throws InterruptedException { public void testBatchesReroutesTogetherAtPriorityOfHighestSubmittedReroute() throws BrokenBarrierException, InterruptedException { final CyclicBarrier cyclicBarrier = new CyclicBarrier(2); - clusterService.submitStateUpdateTask("block master service", new ClusterStateUpdateTask() { + clusterService.submitStateUpdateTask("block cluster-manager service", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) throws Exception { cyclicBarrier.await(); // notify test that we are blocked @@ -115,7 +115,7 @@ public void onFailure(String source, Exception e) { } }); - cyclicBarrier.await(); // wait for master thread to be blocked + cyclicBarrier.await(); // wait for cluster-manager thread to be blocked final AtomicBoolean rerouteExecuted = new AtomicBoolean(); final BatchedRerouteService batchedRerouteService = new BatchedRerouteService(clusterService, (s, r) -> { @@ -194,7 +194,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS actions.forEach(threadPool.generic()::execute); assertTrue(tasksSubmittedCountDown.await(10, TimeUnit.SECONDS)); - cyclicBarrier.await(); // allow master thread to continue; + cyclicBarrier.await(); // allow cluster-manager thread to continue; assertTrue(tasksCompletedCountDown.await(10, TimeUnit.SECONDS)); // wait for reroute to complete assertTrue(rerouteExecuted.get()); // see above for assertion that it's only called once } diff --git a/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java index d42c3e80c60c9..8bf2b1626292a 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java @@ -774,14 +774,14 @@ private DiscoveryNode[] setupNodes() { ); allNodes[i++] = node; } - DiscoveryNode master = new DiscoveryNode( - "master", + DiscoveryNode clusterManager = new DiscoveryNode( + "cluster-manager", buildNewFakeTransportAddress(), Collections.emptyMap(), Collections.singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE), Version.CURRENT ); - allNodes[i] = master; + allNodes[i] = clusterManager; return allNodes; } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedNodeRoutingTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedNodeRoutingTests.java index f60497b4108b7..b3d62ea9c6160 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedNodeRoutingTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedNodeRoutingTests.java @@ -230,7 +230,7 @@ private static Version getNodeVersion(ShardRouting shardRouting, ClusterState st public ClusterState randomInitialClusterState() { List allNodes = new ArrayList<>(); - DiscoveryNode localNode = createNode(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); // local node is the master + DiscoveryNode localNode = createNode(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); // local node is the cluster-manager allNodes.add(localNode); // at least two nodes that have the data role so that we can allocate shards allNodes.add(createNode(DiscoveryNodeRole.DATA_ROLE)); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/InSyncAllocationIdTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/InSyncAllocationIdTests.java index 243701e746ef5..3a1f4a586d519 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/InSyncAllocationIdTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/InSyncAllocationIdTests.java @@ -170,7 +170,7 @@ public void testInSyncAllocationIdsUpdated() { /** * Assume following scenario: indexing request is written to primary, but fails to be replicated to active replica. - * The primary instructs master to fail replica before acknowledging write to client. In the meanwhile, the node of the replica was + * The primary instructs cluster-manager to fail replica before acknowledging write to client. In the meanwhile, the node of the replica was * removed from the cluster (disassociateDeadNodes). This means that the ShardRouting of the replica was failed, but it's allocation * id is still part of the in-sync set. We have to make sure that the failShard request from the primary removes the allocation id * from the in-sync set. @@ -204,8 +204,8 @@ public void testDeadNodesBeforeReplicaFailed() throws Exception { /** * Assume following scenario: indexing request is written to primary, but fails to be replicated to active replica. - * The primary instructs master to fail replica before acknowledging write to client. In the meanwhile, primary fails for an unrelated - * reason. Master now batches both requests to fail primary and replica. We have to make sure that only the allocation id of the primary + * The primary instructs cluster-manager to fail replica before acknowledging write to client. In the meanwhile, primary fails for an unrelated + * reason. Cluster-manager now batches both requests to fail primary and replica. We have to make sure that only the allocation id of the primary * is kept in the in-sync allocation set before we acknowledge request to client. Otherwise we would acknowledge a write that made it * into the primary but not the replica but the replica is still considered non-stale. */ diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java index cbf624cdad2ca..c3f54fa7580ac 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java @@ -1072,7 +1072,7 @@ public void testForSingleDataNode() { RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metadata.index("test")).build(); - logger.info("--> adding one master node, one data node"); + logger.info("--> adding one cluster-manager node, one data node"); DiscoveryNode discoveryNode1 = new DiscoveryNode( "", "node1", @@ -1222,9 +1222,9 @@ public void testWatermarksEnabledForSingleDataNode() { .build(); RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metadata.index("test")).build(); - DiscoveryNode masterNode = new DiscoveryNode( - "master", - "master", + DiscoveryNode clusterManagerNode = new DiscoveryNode( + "cluster-manager", + "cluster-manager", buildNewFakeTransportAddress(), emptyMap(), singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE), @@ -1240,7 +1240,7 @@ public void testWatermarksEnabledForSingleDataNode() { ); DiscoveryNodes.Builder discoveryNodesBuilder = DiscoveryNodes.builder().add(dataNode); if (randomBoolean()) { - discoveryNodesBuilder.add(masterNode); + discoveryNodesBuilder.add(clusterManagerNode); } DiscoveryNodes discoveryNodes = discoveryNodesBuilder.build(); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDeciderTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDeciderTests.java index 5b05cb3afd83e..1a047b3ccd9da 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDeciderTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDeciderTests.java @@ -199,9 +199,9 @@ private ClusterState createInitialClusterState() { RoutingTable routingTable = RoutingTable.builder().addAsNew(metadata.index("test")).build(); DiscoveryNodes discoveryNodes = DiscoveryNodes.builder() - .add(newNode("master", Collections.singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE))) - .localNodeId("master") - .masterNodeId("master") + .add(newNode("cluster-manager", Collections.singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE))) + .localNodeId("cluster-manager") + .masterNodeId("cluster-manager") .build(); ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) diff --git a/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java index 04b4044864dbd..b9b939f28e365 100644 --- a/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java @@ -104,7 +104,7 @@ public void tearDown() throws Exception { super.tearDown(); } - private TimedClusterApplierService createTimedClusterService(boolean makeMaster) { + private TimedClusterApplierService createTimedClusterService(boolean makeClusterManager) { DiscoveryNode localNode = new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); TimedClusterApplierService timedClusterApplierService = new TimedClusterApplierService( Settings.builder().put("cluster.name", "ClusterApplierServiceTests").build(), @@ -118,7 +118,7 @@ private TimedClusterApplierService createTimedClusterService(boolean makeMaster) DiscoveryNodes.builder() .add(localNode) .localNodeId(localNode.getId()) - .masterNodeId(makeMaster ? localNode.getId() : null) + .masterNodeId(makeClusterManager ? localNode.getId() : null) ) .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) .build() @@ -292,19 +292,19 @@ public void onFailure(String source, Exception e) { } } - public void testLocalNodeMasterListenerCallbacks() { + public void testLocalNodeClusterManagerListenerCallbacks() { TimedClusterApplierService timedClusterApplierService = createTimedClusterService(false); - AtomicBoolean isMaster = new AtomicBoolean(); + AtomicBoolean isClusterManager = new AtomicBoolean(); timedClusterApplierService.addLocalNodeMasterListener(new LocalNodeMasterListener() { @Override public void onClusterManager() { - isMaster.set(true); + isClusterManager.set(true); } @Override public void offClusterManager() { - isMaster.set(false); + isClusterManager.set(false); } }); @@ -313,7 +313,7 @@ public void offClusterManager() { DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(nodes).masterNodeId(nodes.getLocalNodeId()); state = ClusterState.builder(state).blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).nodes(nodesBuilder).build(); setState(timedClusterApplierService, state); - assertThat(isMaster.get(), is(true)); + assertThat(isClusterManager.get(), is(true)); nodes = state.nodes(); nodesBuilder = DiscoveryNodes.builder(nodes).masterNodeId(null); @@ -322,11 +322,11 @@ public void offClusterManager() { .nodes(nodesBuilder) .build(); setState(timedClusterApplierService, state); - assertThat(isMaster.get(), is(false)); + assertThat(isClusterManager.get(), is(false)); nodesBuilder = DiscoveryNodes.builder(nodes).masterNodeId(nodes.getLocalNodeId()); state = ClusterState.builder(state).blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).nodes(nodesBuilder).build(); setState(timedClusterApplierService, state); - assertThat(isMaster.get(), is(true)); + assertThat(isClusterManager.get(), is(true)); timedClusterApplierService.close(); } diff --git a/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java index 845a5ee91052d..d5f7344c544b9 100644 --- a/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java @@ -121,9 +121,9 @@ public void randomizeCurrentTime() { relativeTimeInMillis = randomLongBetween(0L, 1L << 62); } - private MasterService createMasterService(boolean makeMaster) { + private MasterService createClusterManagerService(boolean makeClusterManager) { final DiscoveryNode localNode = new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); - final MasterService masterService = new MasterService( + final MasterService clusterManagerService = new MasterService( Settings.builder() .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), MasterServiceTests.class.getSimpleName()) .put(Node.NODE_NAME_SETTING.getKey(), "test_node") @@ -133,26 +133,29 @@ private MasterService createMasterService(boolean makeMaster) { ); final ClusterState initialClusterState = ClusterState.builder(new ClusterName(MasterServiceTests.class.getSimpleName())) .nodes( - DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).masterNodeId(makeMaster ? localNode.getId() : null) + DiscoveryNodes.builder() + .add(localNode) + .localNodeId(localNode.getId()) + .masterNodeId(makeClusterManager ? localNode.getId() : null) ) .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) .build(); final AtomicReference clusterStateRef = new AtomicReference<>(initialClusterState); - masterService.setClusterStatePublisher((event, publishListener, ackListener) -> { + clusterManagerService.setClusterStatePublisher((event, publishListener, ackListener) -> { clusterStateRef.set(event.state()); publishListener.onResponse(null); }); - masterService.setClusterStateSupplier(clusterStateRef::get); - masterService.start(); - return masterService; + clusterManagerService.setClusterStateSupplier(clusterStateRef::get); + clusterManagerService.start(); + return clusterManagerService; } - public void testMasterAwareExecution() throws Exception { - final MasterService nonMaster = createMasterService(false); + public void testClusterManagerAwareExecution() throws Exception { + final MasterService nonClusterManager = createClusterManagerService(false); final boolean[] taskFailed = { false }; final CountDownLatch latch1 = new CountDownLatch(1); - nonMaster.submitStateUpdateTask("test", new ClusterStateUpdateTask() { + nonClusterManager.submitStateUpdateTask("test", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { latch1.countDown(); @@ -167,10 +170,10 @@ public void onFailure(String source, Exception e) { }); latch1.await(); - assertTrue("cluster state update task was executed on a non-master", taskFailed[0]); + assertTrue("cluster state update task was executed on a non-cluster-manager", taskFailed[0]); final CountDownLatch latch2 = new CountDownLatch(1); - nonMaster.submitStateUpdateTask("test", new LocalClusterUpdateTask() { + nonClusterManager.submitStateUpdateTask("test", new LocalClusterUpdateTask() { @Override public ClusterTasksResult execute(ClusterState currentState) { taskFailed[0] = false; @@ -185,13 +188,13 @@ public void onFailure(String source, Exception e) { } }); latch2.await(); - assertFalse("non-master cluster state update task was not executed", taskFailed[0]); + assertFalse("non-cluster-manager cluster state update task was not executed", taskFailed[0]); - nonMaster.close(); + nonClusterManager.close(); } public void testThreadContext() throws InterruptedException { - final MasterService master = createMasterService(true); + final MasterService clusterManager = createClusterManagerService(true); final CountDownLatch latch = new CountDownLatch(1); try (ThreadContext.StoredContext ignored = threadPool.getThreadContext().stashContext()) { @@ -203,9 +206,9 @@ public void testThreadContext() throws InterruptedException { threadPool.getThreadContext().putHeader(expectedHeaders); final TimeValue ackTimeout = randomBoolean() ? TimeValue.ZERO : TimeValue.timeValueMillis(randomInt(10000)); - final TimeValue masterTimeout = randomBoolean() ? TimeValue.ZERO : TimeValue.timeValueMillis(randomInt(10000)); + final TimeValue clusterManagerTimeout = randomBoolean() ? TimeValue.ZERO : TimeValue.timeValueMillis(randomInt(10000)); - master.submitStateUpdateTask("test", new AckedClusterStateUpdateTask(null, null) { + clusterManager.submitStateUpdateTask("test", new AckedClusterStateUpdateTask(null, null) { @Override public ClusterState execute(ClusterState currentState) { assertTrue(threadPool.getThreadContext().isSystemContext()); @@ -249,7 +252,7 @@ public TimeValue ackTimeout() { @Override public TimeValue timeout() { - return masterTimeout; + return clusterManagerTimeout; } @Override @@ -277,7 +280,7 @@ public void onAckTimeout() { latch.await(); - master.close(); + clusterManager.close(); } /* @@ -289,8 +292,8 @@ public void testClusterStateTaskListenerThrowingExceptionIsOkay() throws Interru final CountDownLatch latch = new CountDownLatch(1); AtomicBoolean published = new AtomicBoolean(); - try (MasterService masterService = createMasterService(true)) { - masterService.submitStateUpdateTask( + try (MasterService clusterManagerService = createClusterManagerService(true)) { + clusterManagerService.submitStateUpdateTask( "testClusterStateTaskListenerThrowingExceptionIsOkay", new Object(), ClusterStateTaskConfig.build(Priority.NORMAL), @@ -418,8 +421,8 @@ public void testClusterStateUpdateLogging() throws Exception { ) ); - try (MasterService masterService = createMasterService(true)) { - masterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { + try (MasterService clusterManagerService = createClusterManagerService(true)) { + clusterManagerService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { relativeTimeInMillis += TimeValue.timeValueSeconds(1).millis(); @@ -434,7 +437,7 @@ public void onFailure(String source, Exception e) { fail(); } }); - masterService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { + clusterManagerService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { relativeTimeInMillis += TimeValue.timeValueSeconds(2).millis(); @@ -449,7 +452,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS @Override public void onFailure(String source, Exception e) {} }); - masterService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() { + clusterManagerService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { relativeTimeInMillis += TimeValue.timeValueSeconds(3).millis(); @@ -466,7 +469,7 @@ public void onFailure(String source, Exception e) { fail(); } }); - masterService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() { + clusterManagerService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { return currentState; @@ -614,7 +617,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS } }; - try (MasterService masterService = createMasterService(true)) { + try (MasterService clusterManagerService = createClusterManagerService(true)) { final ConcurrentMap submittedTasksPerThread = new ConcurrentHashMap<>(); CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads); for (int i = 0; i < numberOfThreads; i++) { @@ -629,7 +632,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS submittedTasksPerThread.computeIfAbsent(threadName, key -> new AtomicInteger()).addAndGet(tasks.size()); final TaskExecutor executor = assignment.v1(); if (tasks.size() == 1) { - masterService.submitStateUpdateTask( + clusterManagerService.submitStateUpdateTask( threadName, tasks.stream().findFirst().get(), ClusterStateTaskConfig.build(randomFrom(Priority.values())), @@ -639,7 +642,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS } else { Map taskListeners = new HashMap<>(); tasks.forEach(t -> taskListeners.put(t, listener)); - masterService.submitStateUpdateTasks( + clusterManagerService.submitStateUpdateTasks( threadName, taskListeners, ClusterStateTaskConfig.build(randomFrom(Priority.values())), @@ -693,8 +696,8 @@ public void testBlockingCallInClusterStateTaskListenerFails() throws Interrupted final CountDownLatch latch = new CountDownLatch(1); final AtomicReference assertionRef = new AtomicReference<>(); - try (MasterService masterService = createMasterService(true)) { - masterService.submitStateUpdateTask( + try (MasterService clusterManagerService = createClusterManagerService(true)) { + clusterManagerService.submitStateUpdateTask( "testBlockingCallInClusterStateTaskListenerFails", new Object(), ClusterStateTaskConfig.build(Priority.NORMAL), @@ -785,7 +788,7 @@ public void testLongClusterStateUpdateLogging() throws Exception { ); try ( - MasterService masterService = new MasterService( + MasterService clusterManagerService = new MasterService( Settings.builder() .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), MasterServiceTests.class.getSimpleName()) .put(Node.NODE_NAME_SETTING.getKey(), "test_node") @@ -807,7 +810,7 @@ public void testLongClusterStateUpdateLogging() throws Exception { .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) .build(); final AtomicReference clusterStateRef = new AtomicReference<>(initialClusterState); - masterService.setClusterStatePublisher((event, publishListener, ackListener) -> { + clusterManagerService.setClusterStatePublisher((event, publishListener, ackListener) -> { if (event.source().contains("test5")) { relativeTimeInMillis += MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( Settings.EMPTY @@ -822,12 +825,12 @@ public void testLongClusterStateUpdateLogging() throws Exception { clusterStateRef.set(event.state()); publishListener.onResponse(null); }); - masterService.setClusterStateSupplier(clusterStateRef::get); - masterService.start(); + clusterManagerService.setClusterStateSupplier(clusterStateRef::get); + clusterManagerService.start(); final CountDownLatch latch = new CountDownLatch(6); final CountDownLatch processedFirstTask = new CountDownLatch(1); - masterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { + clusterManagerService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { relativeTimeInMillis += randomLongBetween( @@ -850,7 +853,7 @@ public void onFailure(String source, Exception e) { }); processedFirstTask.await(); - masterService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { + clusterManagerService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { relativeTimeInMillis += MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( @@ -869,7 +872,7 @@ public void onFailure(String source, Exception e) { latch.countDown(); } }); - masterService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() { + clusterManagerService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { relativeTimeInMillis += MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( @@ -888,7 +891,7 @@ public void onFailure(String source, Exception e) { fail(); } }); - masterService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() { + clusterManagerService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { relativeTimeInMillis += MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( @@ -907,7 +910,7 @@ public void onFailure(String source, Exception e) { fail(); } }); - masterService.submitStateUpdateTask("test5", new ClusterStateUpdateTask() { + clusterManagerService.submitStateUpdateTask("test5", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { return ClusterState.builder(currentState).incrementVersion().build(); @@ -923,7 +926,7 @@ public void onFailure(String source, Exception e) { fail(); } }); - masterService.submitStateUpdateTask("test6", new ClusterStateUpdateTask() { + clusterManagerService.submitStateUpdateTask("test6", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { return ClusterState.builder(currentState).incrementVersion().build(); @@ -941,7 +944,7 @@ public void onFailure(String source, Exception e) { }); // Additional update task to make sure all previous logging made it to the loggerName // We don't check logging for this on since there is no guarantee that it will occur before our check - masterService.submitStateUpdateTask("test7", new ClusterStateUpdateTask() { + clusterManagerService.submitStateUpdateTask("test7", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { return currentState; @@ -968,7 +971,7 @@ public void testAcking() throws InterruptedException { final DiscoveryNode node2 = new DiscoveryNode("node2", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); final DiscoveryNode node3 = new DiscoveryNode("node3", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); try ( - MasterService masterService = new MasterService( + MasterService clusterManagerService = new MasterService( Settings.builder() .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), MasterServiceTests.class.getSimpleName()) .put(Node.NODE_NAME_SETTING.getKey(), "test_node") @@ -983,9 +986,9 @@ public void testAcking() throws InterruptedException { .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) .build(); final AtomicReference publisherRef = new AtomicReference<>(); - masterService.setClusterStatePublisher((e, pl, al) -> publisherRef.get().publish(e, pl, al)); - masterService.setClusterStateSupplier(() -> initialClusterState); - masterService.start(); + clusterManagerService.setClusterStatePublisher((e, pl, al) -> publisherRef.get().publish(e, pl, al)); + clusterManagerService.setClusterStateSupplier(() -> initialClusterState); + clusterManagerService.start(); // check that we don't time out before even committing the cluster state { @@ -997,7 +1000,7 @@ public void testAcking() throws InterruptedException { ) ); - masterService.submitStateUpdateTask("test2", new AckedClusterStateUpdateTask(null, null) { + clusterManagerService.submitStateUpdateTask("test2", new AckedClusterStateUpdateTask(null, null) { @Override public ClusterState execute(ClusterState currentState) { return ClusterState.builder(currentState).build(); @@ -1052,7 +1055,7 @@ public void onAckTimeout() { ackListener.onNodeAck(node3, null); }); - masterService.submitStateUpdateTask("test2", new AckedClusterStateUpdateTask(null, null) { + clusterManagerService.submitStateUpdateTask("test2", new AckedClusterStateUpdateTask(null, null) { @Override public ClusterState execute(ClusterState currentState) { return ClusterState.builder(currentState).build(); @@ -1096,10 +1099,10 @@ public void onAckTimeout() { } /** - * Returns the cluster state that the master service uses (and that is provided by the discovery layer) + * Returns the cluster state that the cluster-manager service uses (and that is provided by the discovery layer) */ - public static ClusterState discoveryState(MasterService masterService) { - return masterService.state(); + public static ClusterState discoveryState(MasterService clusterManagerService) { + return clusterManagerService.state(); } } diff --git a/server/src/test/java/org/opensearch/discovery/AbstractDisruptionTestCase.java b/server/src/test/java/org/opensearch/discovery/AbstractDisruptionTestCase.java index e690770b3d0a5..307edc2f03075 100644 --- a/server/src/test/java/org/opensearch/discovery/AbstractDisruptionTestCase.java +++ b/server/src/test/java/org/opensearch/discovery/AbstractDisruptionTestCase.java @@ -155,15 +155,15 @@ ClusterState getNodeClusterState(String node) { return client(node).admin().cluster().prepareState().setLocal(true).get().getState(); } - void assertNoMaster(final String node) throws Exception { - assertNoMaster(node, null, TimeValue.timeValueSeconds(30)); + void assertNoClusterManager(final String node) throws Exception { + assertNoClusterManager(node, null, TimeValue.timeValueSeconds(30)); } - void assertNoMaster(final String node, TimeValue maxWaitTime) throws Exception { - assertNoMaster(node, null, maxWaitTime); + void assertNoClusterManager(final String node, TimeValue maxWaitTime) throws Exception { + assertNoClusterManager(node, null, maxWaitTime); } - void assertNoMaster(final String node, @Nullable final ClusterBlock expectedBlocks, TimeValue maxWaitTime) throws Exception { + void assertNoClusterManager(final String node, @Nullable final ClusterBlock expectedBlocks, TimeValue maxWaitTime) throws Exception { assertBusy(() -> { ClusterState state = getNodeClusterState(node); final DiscoveryNodes nodes = state.nodes(); @@ -179,26 +179,34 @@ void assertNoMaster(final String node, @Nullable final ClusterBlock expectedBloc }, maxWaitTime.getMillis(), TimeUnit.MILLISECONDS); } - void assertDifferentMaster(final String node, final String oldMasterNode) throws Exception { + void assertDifferentClusterManager(final String node, final String oldClusterManagerNode) throws Exception { assertBusy(() -> { ClusterState state = getNodeClusterState(node); - String masterNode = null; + String clusterManagerNode = null; if (state.nodes().getMasterNode() != null) { - masterNode = state.nodes().getMasterNode().getName(); + clusterManagerNode = state.nodes().getMasterNode().getName(); } - logger.trace("[{}] master is [{}]", node, state.nodes().getMasterNode()); - assertThat("node [" + node + "] still has [" + masterNode + "] as master", oldMasterNode, not(equalTo(masterNode))); + logger.trace("[{}] cluster-manager is [{}]", node, state.nodes().getMasterNode()); + assertThat( + "node [" + node + "] still has [" + clusterManagerNode + "] as cluster-manager", + oldClusterManagerNode, + not(equalTo(clusterManagerNode)) + ); }, 30, TimeUnit.SECONDS); } - void assertMaster(String masterNode, List nodes) throws Exception { + void assertClusterManager(String clusterManagerNode, List nodes) throws Exception { assertBusy(() -> { for (String node : nodes) { ClusterState state = getNodeClusterState(node); String failMsgSuffix = "cluster_state:\n" + state; assertThat("wrong node count on [" + node + "]. " + failMsgSuffix, state.nodes().getSize(), equalTo(nodes.size())); - String otherMasterNodeName = state.nodes().getMasterNode() != null ? state.nodes().getMasterNode().getName() : null; - assertThat("wrong master on node [" + node + "]. " + failMsgSuffix, otherMasterNodeName, equalTo(masterNode)); + String otherClusterManagerNodeName = state.nodes().getMasterNode() != null ? state.nodes().getMasterNode().getName() : null; + assertThat( + "wrong cluster-manager on node [" + node + "]. " + failMsgSuffix, + otherClusterManagerNodeName, + equalTo(clusterManagerNode) + ); } }); } diff --git a/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java b/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java index d1e3f406b4933..efcefab6c9f8b 100644 --- a/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java +++ b/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java @@ -70,7 +70,7 @@ public class DiscoveryModuleTests extends OpenSearchTestCase { private TransportService transportService; private NamedWriteableRegistry namedWriteableRegistry; - private MasterService masterService; + private MasterService clusterManagerService; private ClusterApplier clusterApplier; private ThreadPool threadPool; private ClusterSettings clusterSettings; @@ -93,7 +93,7 @@ public void setupDummyServices() { threadPool = mock(ThreadPool.class); when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null); - masterService = mock(MasterService.class); + clusterManagerService = mock(MasterService.class); namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); clusterApplier = mock(ClusterApplier.class); clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); @@ -112,7 +112,7 @@ private DiscoveryModule newModule(Settings settings, List plugi transportService, namedWriteableRegistry, null, - masterService, + clusterManagerService, clusterApplier, clusterSettings, plugins, diff --git a/server/src/test/java/org/opensearch/discovery/HandshakingTransportAddressConnectorTests.java b/server/src/test/java/org/opensearch/discovery/HandshakingTransportAddressConnectorTests.java index 403d2e2122855..3a1c24806e266 100644 --- a/server/src/test/java/org/opensearch/discovery/HandshakingTransportAddressConnectorTests.java +++ b/server/src/test/java/org/opensearch/discovery/HandshakingTransportAddressConnectorTests.java @@ -137,7 +137,7 @@ public void stopServices() { terminate(threadPool); } - public void testConnectsToMasterNode() throws InterruptedException { + public void testConnectsToClustreManagerNode() throws InterruptedException { final CountDownLatch completionLatch = new CountDownLatch(1); final SetOnce receivedNode = new SetOnce<>(); @@ -190,7 +190,7 @@ public void testLogsFullConnectionFailureAfterSuccessfulHandshake() throws Excep } } - public void testDoesNotConnectToNonMasterNode() throws InterruptedException { + public void testDoesNotConnectToNonClusterManagerNode() throws InterruptedException { remoteNode = new DiscoveryNode("remote-node", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); discoveryAddress = getDiscoveryAddress(); remoteClusterName = "local-cluster"; diff --git a/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java b/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java index d6cafb3421f7d..2f78e60631ec2 100644 --- a/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java +++ b/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java @@ -147,7 +147,7 @@ public void run() { listener.onResponse(discoveryNode); return; } else { - listener.onFailure(new OpenSearchException("non-master node " + discoveryNode)); + listener.onFailure(new OpenSearchException("non-cluster-manager node " + discoveryNode)); return; } } @@ -165,20 +165,20 @@ public String toString() { } class TestPeerFinder extends PeerFinder { - DiscoveryNode discoveredMasterNode; - OptionalLong discoveredMasterTerm = OptionalLong.empty(); + DiscoveryNode discoveredClusterManagerNode; + OptionalLong discoveredClusterManagerTerm = OptionalLong.empty(); TestPeerFinder(Settings settings, TransportService transportService, TransportAddressConnector transportAddressConnector) { super(settings, transportService, transportAddressConnector, PeerFinderTests.this::resolveConfiguredHosts); } @Override - protected void onActiveClusterManagerFound(DiscoveryNode masterNode, long term) { + protected void onActiveClusterManagerFound(DiscoveryNode clusterManagerNode, long term) { assert holdsLock() == false : "PeerFinder lock held in error"; - assertThat(discoveredMasterNode, nullValue()); - assertFalse(discoveredMasterTerm.isPresent()); - discoveredMasterNode = masterNode; - discoveredMasterTerm = OptionalLong.of(term); + assertThat(discoveredClusterManagerNode, nullValue()); + assertFalse(discoveredClusterManagerTerm.isPresent()); + discoveredClusterManagerNode = clusterManagerNode; + discoveredClusterManagerTerm = OptionalLong.of(term); } @Override @@ -335,8 +335,8 @@ public void testDoesNotAddUnreachableNodesFromUnicastHostsList() { assertFoundPeers(); } - public void testDoesNotAddNonMasterEligibleNodesFromUnicastHostsList() { - final DiscoveryNode nonMasterNode = new DiscoveryNode( + public void testDoesNotAddNonClusterManagerEligibleNodesFromUnicastHostsList() { + final DiscoveryNode nonClusterManagerNode = new DiscoveryNode( "node-from-hosts-list", buildNewFakeTransportAddress(), emptyMap(), @@ -344,8 +344,8 @@ public void testDoesNotAddNonMasterEligibleNodesFromUnicastHostsList() { Version.CURRENT ); - providedAddresses.add(nonMasterNode.getAddress()); - transportAddressConnector.addReachableNode(nonMasterNode); + providedAddresses.add(nonClusterManagerNode.getAddress()); + transportAddressConnector.addReachableNode(nonClusterManagerNode); peerFinder.activate(lastAcceptedNodes); runAllRunnableTasks(); @@ -423,7 +423,7 @@ public void testAddsReachableNodesFromIncomingRequests() { assertFoundPeers(sourceNode, otherKnownNode); } - public void testDoesNotAddReachableNonMasterEligibleNodesFromIncomingRequests() { + public void testDoesNotAddReachableNonClusterManagerEligibleNodesFromIncomingRequests() { final DiscoveryNode sourceNode = new DiscoveryNode( "request-source", buildNewFakeTransportAddress(), @@ -494,7 +494,7 @@ public void testRespondsToRequestWhenActive() { } public void testDelegatesRequestHandlingWhenInactive() { - final DiscoveryNode masterNode = newDiscoveryNode("master-node"); + final DiscoveryNode clusterManagerNode = newDiscoveryNode("cluster-manager-node"); final DiscoveryNode sourceNode = newDiscoveryNode("request-source"); transportAddressConnector.addReachableNode(sourceNode); @@ -502,9 +502,9 @@ public void testDelegatesRequestHandlingWhenInactive() { final long term = randomNonNegativeLong(); peerFinder.setCurrentTerm(term); - peerFinder.deactivate(masterNode); + peerFinder.deactivate(clusterManagerNode); - final PeersResponse expectedResponse = new PeersResponse(Optional.of(masterNode), Collections.emptyList(), term); + final PeersResponse expectedResponse = new PeersResponse(Optional.of(clusterManagerNode), Collections.emptyList(), term); final PeersResponse peersResponse = peerFinder.handlePeersRequest(new PeersRequest(sourceNode, Collections.emptyList())); assertThat(peersResponse, equalTo(expectedResponse)); } @@ -590,7 +590,7 @@ public void testAddsReachablePeersFromResponse() { assertFoundPeers(otherNode, discoveredNode); } - public void testAddsReachableMasterFromResponse() { + public void testAddsReachableClusterManagerFromResponse() { final DiscoveryNode otherNode = newDiscoveryNode("node-from-hosts-list"); providedAddresses.add(otherNode.getAddress()); transportAddressConnector.addReachableNode(otherNode); @@ -599,21 +599,21 @@ public void testAddsReachableMasterFromResponse() { runAllRunnableTasks(); assertFoundPeers(otherNode); - final DiscoveryNode discoveredMaster = newDiscoveryNode("discovered-master"); + final DiscoveryNode discoveredClusterManager = newDiscoveryNode("discovered-cluster-manager"); respondToRequests(node -> { assertThat(node, is(otherNode)); - return new PeersResponse(Optional.of(discoveredMaster), emptyList(), randomNonNegativeLong()); + return new PeersResponse(Optional.of(discoveredClusterManager), emptyList(), randomNonNegativeLong()); }); - transportAddressConnector.addReachableNode(discoveredMaster); + transportAddressConnector.addReachableNode(discoveredClusterManager); runAllRunnableTasks(); - assertFoundPeers(otherNode, discoveredMaster); - assertThat(peerFinder.discoveredMasterNode, nullValue()); - assertFalse(peerFinder.discoveredMasterTerm.isPresent()); + assertFoundPeers(otherNode, discoveredClusterManager); + assertThat(peerFinder.discoveredClusterManagerNode, nullValue()); + assertFalse(peerFinder.discoveredClusterManagerTerm.isPresent()); } - public void testHandlesDiscoveryOfMasterFromResponseFromMaster() { + public void testHandlesDiscoveryOfClusterManagerFromResponseFromClusterManager() { final DiscoveryNode otherNode = newDiscoveryNode("node-from-hosts-list"); providedAddresses.add(otherNode.getAddress()); transportAddressConnector.addReachableNode(otherNode); @@ -631,8 +631,8 @@ public void testHandlesDiscoveryOfMasterFromResponseFromMaster() { runAllRunnableTasks(); assertFoundPeers(otherNode); - assertThat(peerFinder.discoveredMasterNode, is(otherNode)); - assertThat(peerFinder.discoveredMasterTerm, is(OptionalLong.of(term))); + assertThat(peerFinder.discoveredClusterManagerNode, is(otherNode)); + assertThat(peerFinder.discoveredClusterManagerTerm, is(OptionalLong.of(term))); } public void testOnlyRequestsPeersOncePerRoundButDoesRetryNextRound() { diff --git a/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java b/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java index 6f07d0de1e31d..f9e1b8e30af41 100644 --- a/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java +++ b/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java @@ -524,8 +524,8 @@ public void testEnsureNoShardDataOrIndexMetadata() throws IOException { Settings settings = buildEnvSettings(Settings.EMPTY); Index index = new Index("test", "testUUID"); - // build settings using same path.data as original but without data and master roles - Settings noDataNoMasterSettings = Settings.builder() + // build settings using same path.data as original but without data and cluster-manager roles + Settings noDataNoClusterManagerSettings = Settings.builder() .put(settings) .put( NodeRoles.removeRoles( @@ -535,8 +535,8 @@ public void testEnsureNoShardDataOrIndexMetadata() throws IOException { ) .build(); - // test that we can create data=false and master=false with no meta information - newNodeEnvironment(noDataNoMasterSettings).close(); + // test that we can create data=false and cluster_manager=false with no meta information + newNodeEnvironment(noDataNoClusterManagerSettings).close(); Path indexPath; try (NodeEnvironment env = newNodeEnvironment(settings)) { @@ -546,7 +546,7 @@ public void testEnsureNoShardDataOrIndexMetadata() throws IOException { indexPath = env.indexPaths(index)[0]; } - verifyFailsOnMetadata(noDataNoMasterSettings, indexPath); + verifyFailsOnMetadata(noDataNoClusterManagerSettings, indexPath); // build settings using same path.data as original but without data role Settings noDataSettings = nonDataNode(settings); @@ -563,15 +563,15 @@ public void testEnsureNoShardDataOrIndexMetadata() throws IOException { verifyFailsOnShardData(noDataSettings, indexPath, shardDataDirName); // assert that we get the stricter message on meta-data when both conditions fail - verifyFailsOnMetadata(noDataNoMasterSettings, indexPath); + verifyFailsOnMetadata(noDataNoClusterManagerSettings, indexPath); - // build settings using same path.data as original but without master role - Settings noMasterSettings = nonMasterNode(settings); + // build settings using same path.data as original but without cluster-manager role + Settings noClusterManagerSettings = nonMasterNode(settings); - // test that we can create master=false env regardless of data. - newNodeEnvironment(noMasterSettings).close(); + // test that we can create cluster_manager=false env regardless of data. + newNodeEnvironment(noClusterManagerSettings).close(); - // test that we can create data=true, master=true env. Also remove state dir to leave only shard data for following asserts + // test that we can create data=true, cluster_manager=true env. Also remove state dir to leave only shard data for following asserts try (NodeEnvironment env = newNodeEnvironment(settings)) { for (Path path : env.indexPaths(index)) { Files.delete(path.resolve(MetadataStateFormat.STATE_DIR_NAME)); @@ -580,7 +580,7 @@ public void testEnsureNoShardDataOrIndexMetadata() throws IOException { // assert that we fail on shard data even without the metadata dir. verifyFailsOnShardData(noDataSettings, indexPath, shardDataDirName); - verifyFailsOnShardData(noDataNoMasterSettings, indexPath, shardDataDirName); + verifyFailsOnShardData(noDataNoClusterManagerSettings, indexPath, shardDataDirName); } private void verifyFailsOnShardData(Settings settings, Path indexPath, String shardDataDirName) { @@ -597,7 +597,7 @@ private void verifyFailsOnShardData(Settings settings, Path indexPath, String sh private void verifyFailsOnMetadata(Settings settings, Path indexPath) { IllegalStateException ex = expectThrows( IllegalStateException.class, - "Must fail creating NodeEnvironment on a data path that has index metadata if node does not have data and master roles", + "Must fail creating NodeEnvironment on a data path that has index metadata if node does not have data and cluster-manager roles", () -> newNodeEnvironment(settings).close() ); diff --git a/server/src/test/java/org/opensearch/env/NodeRepurposeCommandTests.java b/server/src/test/java/org/opensearch/env/NodeRepurposeCommandTests.java index 7a346d4cf9fc5..ffcbb3eed91f7 100644 --- a/server/src/test/java/org/opensearch/env/NodeRepurposeCommandTests.java +++ b/server/src/test/java/org/opensearch/env/NodeRepurposeCommandTests.java @@ -75,18 +75,18 @@ public class NodeRepurposeCommandTests extends OpenSearchTestCase { private static final Index INDEX = new Index("testIndex", "testUUID"); - private Settings dataMasterSettings; + private Settings dataClusterManagerSettings; private Environment environment; private Path[] nodePaths; - private Settings dataNoMasterSettings; - private Settings noDataNoMasterSettings; - private Settings noDataMasterSettings; + private Settings dataNoClusterManagerSettings; + private Settings noDataNoClusterManagerSettings; + private Settings noDataClusterManagerSettings; @Before public void createNodePaths() throws IOException { - dataMasterSettings = buildEnvSettings(Settings.EMPTY); - environment = TestEnvironment.newEnvironment(dataMasterSettings); - try (NodeEnvironment nodeEnvironment = new NodeEnvironment(dataMasterSettings, environment)) { + dataClusterManagerSettings = buildEnvSettings(Settings.EMPTY); + environment = TestEnvironment.newEnvironment(dataClusterManagerSettings); + try (NodeEnvironment nodeEnvironment = new NodeEnvironment(dataClusterManagerSettings, environment)) { nodePaths = nodeEnvironment.nodeDataPaths(); final String nodeId = randomAlphaOfLength(10); try ( @@ -95,36 +95,36 @@ public void createNodePaths() throws IOException { nodeId, xContentRegistry(), BigArrays.NON_RECYCLING_INSTANCE, - new ClusterSettings(dataMasterSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + new ClusterSettings(dataClusterManagerSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L ).createWriter() ) { writer.writeFullStateAndCommit(1L, ClusterState.EMPTY_STATE); } } - dataNoMasterSettings = nonMasterNode(dataMasterSettings); - noDataNoMasterSettings = removeRoles( - dataMasterSettings, + dataNoClusterManagerSettings = nonMasterNode(dataClusterManagerSettings); + noDataNoClusterManagerSettings = removeRoles( + dataClusterManagerSettings, Collections.unmodifiableSet(new HashSet<>(Arrays.asList(DiscoveryNodeRole.DATA_ROLE, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE))) ); - noDataMasterSettings = masterNode(nonDataNode(dataMasterSettings)); + noDataClusterManagerSettings = masterNode(nonDataNode(dataClusterManagerSettings)); } public void testEarlyExitNoCleanup() throws Exception { - createIndexDataFiles(dataMasterSettings, randomInt(10), randomBoolean()); + createIndexDataFiles(dataClusterManagerSettings, randomInt(10), randomBoolean()); - verifyNoQuestions(dataMasterSettings, containsString(NO_CLEANUP)); - verifyNoQuestions(dataNoMasterSettings, containsString(NO_CLEANUP)); + verifyNoQuestions(dataClusterManagerSettings, containsString(NO_CLEANUP)); + verifyNoQuestions(dataNoClusterManagerSettings, containsString(NO_CLEANUP)); } public void testNothingToCleanup() throws Exception { - verifyNoQuestions(noDataNoMasterSettings, containsString(NO_DATA_TO_CLEAN_UP_FOUND)); - verifyNoQuestions(noDataMasterSettings, containsString(NO_SHARD_DATA_TO_CLEAN_UP_FOUND)); + verifyNoQuestions(noDataNoClusterManagerSettings, containsString(NO_DATA_TO_CLEAN_UP_FOUND)); + verifyNoQuestions(noDataClusterManagerSettings, containsString(NO_SHARD_DATA_TO_CLEAN_UP_FOUND)); - Environment environment = TestEnvironment.newEnvironment(noDataMasterSettings); + Environment environment = TestEnvironment.newEnvironment(noDataClusterManagerSettings); if (randomBoolean()) { - try (NodeEnvironment env = new NodeEnvironment(noDataMasterSettings, environment)) { + try (NodeEnvironment env = new NodeEnvironment(noDataClusterManagerSettings, environment)) { try ( PersistedClusterStateService.Writer writer = OpenSearchNodeCommand.createPersistedClusterStateService( Settings.EMPTY, @@ -136,19 +136,24 @@ public void testNothingToCleanup() throws Exception { } } - verifyNoQuestions(noDataNoMasterSettings, containsString(NO_DATA_TO_CLEAN_UP_FOUND)); - verifyNoQuestions(noDataMasterSettings, containsString(NO_SHARD_DATA_TO_CLEAN_UP_FOUND)); + verifyNoQuestions(noDataNoClusterManagerSettings, containsString(NO_DATA_TO_CLEAN_UP_FOUND)); + verifyNoQuestions(noDataClusterManagerSettings, containsString(NO_SHARD_DATA_TO_CLEAN_UP_FOUND)); - createIndexDataFiles(dataMasterSettings, 0, randomBoolean()); + createIndexDataFiles(dataClusterManagerSettings, 0, randomBoolean()); - verifyNoQuestions(noDataMasterSettings, containsString(NO_SHARD_DATA_TO_CLEAN_UP_FOUND)); + verifyNoQuestions(noDataClusterManagerSettings, containsString(NO_SHARD_DATA_TO_CLEAN_UP_FOUND)); } public void testLocked() throws IOException { - try (NodeEnvironment env = new NodeEnvironment(dataMasterSettings, TestEnvironment.newEnvironment(dataMasterSettings))) { + try ( + NodeEnvironment env = new NodeEnvironment( + dataClusterManagerSettings, + TestEnvironment.newEnvironment(dataClusterManagerSettings) + ) + ) { assertThat( - expectThrows(OpenSearchException.class, () -> verifyNoQuestions(noDataNoMasterSettings, null)).getMessage(), + expectThrows(OpenSearchException.class, () -> verifyNoQuestions(noDataNoClusterManagerSettings, null)).getMessage(), containsString(NodeRepurposeCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG) ); } @@ -158,7 +163,7 @@ public void testCleanupAll() throws Exception { int shardCount = randomIntBetween(1, 10); boolean verbose = randomBoolean(); boolean hasClusterState = randomBoolean(); - createIndexDataFiles(dataMasterSettings, shardCount, hasClusterState); + createIndexDataFiles(dataClusterManagerSettings, shardCount, hasClusterState); String messageText = NodeRepurposeCommand.noClusterManagerMessage(1, environment.dataFiles().length * shardCount, 0); @@ -168,22 +173,22 @@ public void testCleanupAll() throws Exception { conditionalNot(containsString("no name for uuid: testUUID"), verbose == false || hasClusterState) ); - verifyUnchangedOnAbort(noDataNoMasterSettings, outputMatcher, verbose); + verifyUnchangedOnAbort(noDataNoClusterManagerSettings, outputMatcher, verbose); // verify test setup - expectThrows(IllegalStateException.class, () -> new NodeEnvironment(noDataNoMasterSettings, environment).close()); + expectThrows(IllegalStateException.class, () -> new NodeEnvironment(noDataNoClusterManagerSettings, environment).close()); - verifySuccess(noDataNoMasterSettings, outputMatcher, verbose); + verifySuccess(noDataNoClusterManagerSettings, outputMatcher, verbose); // verify cleaned. - new NodeEnvironment(noDataNoMasterSettings, environment).close(); + new NodeEnvironment(noDataNoClusterManagerSettings, environment).close(); } public void testCleanupShardData() throws Exception { int shardCount = randomIntBetween(1, 10); boolean verbose = randomBoolean(); boolean hasClusterState = randomBoolean(); - createIndexDataFiles(dataMasterSettings, shardCount, hasClusterState); + createIndexDataFiles(dataClusterManagerSettings, shardCount, hasClusterState); Matcher matcher = allOf( containsString(NodeRepurposeCommand.shardMessage(environment.dataFiles().length * shardCount, 1)), @@ -192,15 +197,15 @@ public void testCleanupShardData() throws Exception { conditionalNot(containsString("no name for uuid: testUUID"), verbose == false || hasClusterState) ); - verifyUnchangedOnAbort(noDataMasterSettings, matcher, verbose); + verifyUnchangedOnAbort(noDataClusterManagerSettings, matcher, verbose); // verify test setup - expectThrows(IllegalStateException.class, () -> new NodeEnvironment(noDataMasterSettings, environment).close()); + expectThrows(IllegalStateException.class, () -> new NodeEnvironment(noDataClusterManagerSettings, environment).close()); - verifySuccess(noDataMasterSettings, matcher, verbose); + verifySuccess(noDataClusterManagerSettings, matcher, verbose); // verify clean. - new NodeEnvironment(noDataMasterSettings, environment).close(); + new NodeEnvironment(noDataClusterManagerSettings, environment).close(); } static void verifySuccess(Settings settings, Matcher outputMatcher, boolean verbose) throws Exception { diff --git a/server/src/test/java/org/opensearch/gateway/AsyncShardFetchTests.java b/server/src/test/java/org/opensearch/gateway/AsyncShardFetchTests.java index 1af5a63e344d0..982c21a9e57ec 100644 --- a/server/src/test/java/org/opensearch/gateway/AsyncShardFetchTests.java +++ b/server/src/test/java/org/opensearch/gateway/AsyncShardFetchTests.java @@ -433,7 +433,7 @@ public void run() { try { entry = simulations.get(nodeId); if (entry == null) { - // we are simulating a master node switch, wait for it to not be null + // we are simulating a cluster-manager node switch, wait for it to not be null assertBusy(() -> assertTrue(simulations.containsKey(nodeId))); } assert entry != null; diff --git a/server/src/test/java/org/opensearch/gateway/GatewayServiceTests.java b/server/src/test/java/org/opensearch/gateway/GatewayServiceTests.java index 63792968b1c59..51ba096a86ae0 100644 --- a/server/src/test/java/org/opensearch/gateway/GatewayServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/GatewayServiceTests.java @@ -129,13 +129,13 @@ public void testRecoverStateUpdateTask() throws Exception { GatewayService service = createService(Settings.builder()); ClusterStateUpdateTask clusterStateUpdateTask = service.new RecoverStateUpdateTask(); String nodeId = randomAlphaOfLength(10); - DiscoveryNode masterNode = DiscoveryNode.createLocal( + DiscoveryNode clusterManagerNode = DiscoveryNode.createLocal( settings(Version.CURRENT).put(masterNode()).build(), new TransportAddress(TransportAddress.META_ADDRESS, 9300), nodeId ); ClusterState stateWithBlock = ClusterState.builder(ClusterName.DEFAULT) - .nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build()) + .nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(clusterManagerNode).build()) .blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK).build()) .build(); diff --git a/server/src/test/java/org/opensearch/gateway/IncrementalClusterStateWriterTests.java b/server/src/test/java/org/opensearch/gateway/IncrementalClusterStateWriterTests.java index 1907abbfcaabd..00ca35207620d 100644 --- a/server/src/test/java/org/opensearch/gateway/IncrementalClusterStateWriterTests.java +++ b/server/src/test/java/org/opensearch/gateway/IncrementalClusterStateWriterTests.java @@ -86,7 +86,7 @@ public class IncrementalClusterStateWriterTests extends OpenSearchAllocationTestCase { - private ClusterState clusterStateWithUnassignedIndex(IndexMetadata indexMetadata, boolean masterEligible) { + private ClusterState clusterStateWithUnassignedIndex(IndexMetadata indexMetadata, boolean clusterManagerEligible) { Metadata metadata = Metadata.builder().put(indexMetadata, false).build(); RoutingTable routingTable = RoutingTable.builder().addAsNew(metadata.index("test")).build(); @@ -94,11 +94,11 @@ private ClusterState clusterStateWithUnassignedIndex(IndexMetadata indexMetadata return ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metadata(metadata) .routingTable(routingTable) - .nodes(generateDiscoveryNodes(masterEligible)) + .nodes(generateDiscoveryNodes(clusterManagerEligible)) .build(); } - private ClusterState clusterStateWithAssignedIndex(IndexMetadata indexMetadata, boolean masterEligible) { + private ClusterState clusterStateWithAssignedIndex(IndexMetadata indexMetadata, boolean clusterManagerEligible) { AllocationService strategy = createAllocationService( Settings.builder() .put("cluster.routing.allocation.node_concurrent_recoveries", 100) @@ -108,7 +108,7 @@ private ClusterState clusterStateWithAssignedIndex(IndexMetadata indexMetadata, .build() ); - ClusterState oldClusterState = clusterStateWithUnassignedIndex(indexMetadata, masterEligible); + ClusterState oldClusterState = clusterStateWithUnassignedIndex(indexMetadata, clusterManagerEligible); RoutingTable routingTable = strategy.reroute(oldClusterState, "reroute").routingTable(); Metadata metadataNewClusterState = Metadata.builder().put(oldClusterState.metadata().index("test"), false).build(); @@ -120,8 +120,8 @@ private ClusterState clusterStateWithAssignedIndex(IndexMetadata indexMetadata, .build(); } - private ClusterState clusterStateWithNonReplicatedClosedIndex(IndexMetadata indexMetadata, boolean masterEligible) { - ClusterState oldClusterState = clusterStateWithAssignedIndex(indexMetadata, masterEligible); + private ClusterState clusterStateWithNonReplicatedClosedIndex(IndexMetadata indexMetadata, boolean clusterManagerEligible) { + ClusterState oldClusterState = clusterStateWithAssignedIndex(indexMetadata, clusterManagerEligible); Metadata metadataNewClusterState = Metadata.builder() .put( @@ -142,8 +142,12 @@ private ClusterState clusterStateWithNonReplicatedClosedIndex(IndexMetadata inde .build(); } - private ClusterState clusterStateWithReplicatedClosedIndex(IndexMetadata indexMetadata, boolean masterEligible, boolean assigned) { - ClusterState oldClusterState = clusterStateWithAssignedIndex(indexMetadata, masterEligible); + private ClusterState clusterStateWithReplicatedClosedIndex( + IndexMetadata indexMetadata, + boolean clusterManagerEligible, + boolean assigned + ) { + ClusterState oldClusterState = clusterStateWithAssignedIndex(indexMetadata, clusterManagerEligible); Metadata metadataNewClusterState = Metadata.builder() .put( @@ -178,20 +182,20 @@ private ClusterState clusterStateWithReplicatedClosedIndex(IndexMetadata indexMe .build(); } - private DiscoveryNodes.Builder generateDiscoveryNodes(boolean masterEligible) { + private DiscoveryNodes.Builder generateDiscoveryNodes(boolean clusterManagerEligible) { Set dataOnlyRoles = Collections.singleton(DiscoveryNodeRole.DATA_ROLE); return DiscoveryNodes.builder() - .add(newNode("node1", masterEligible ? CLUSTER_MANAGER_DATA_ROLES : dataOnlyRoles)) - .add(newNode("master_node", CLUSTER_MANAGER_DATA_ROLES)) + .add(newNode("node1", clusterManagerEligible ? CLUSTER_MANAGER_DATA_ROLES : dataOnlyRoles)) + .add(newNode("cluster_manager_node", CLUSTER_MANAGER_DATA_ROLES)) .localNodeId("node1") - .masterNodeId(masterEligible ? "node1" : "master_node"); + .masterNodeId(clusterManagerEligible ? "node1" : "cluster_manager_node"); } private IndexMetadata createIndexMetadata(String name) { return IndexMetadata.builder(name).settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(2).build(); } - public void testGetRelevantIndicesWithUnassignedShardsOnMasterEligibleNode() { + public void testGetRelevantIndicesWithUnassignedShardsOnClusterManagerEligibleNode() { IndexMetadata indexMetadata = createIndexMetadata("test"); Set indices = IncrementalClusterStateWriter.getRelevantIndices(clusterStateWithUnassignedIndex(indexMetadata, true)); assertThat(indices.size(), equalTo(0)); @@ -205,8 +209,10 @@ public void testGetRelevantIndicesWithUnassignedShardsOnDataOnlyNode() { public void testGetRelevantIndicesWithAssignedShards() { IndexMetadata indexMetadata = createIndexMetadata("test"); - boolean masterEligible = randomBoolean(); - Set indices = IncrementalClusterStateWriter.getRelevantIndices(clusterStateWithAssignedIndex(indexMetadata, masterEligible)); + boolean clusterManagerEligible = randomBoolean(); + Set indices = IncrementalClusterStateWriter.getRelevantIndices( + clusterStateWithAssignedIndex(indexMetadata, clusterManagerEligible) + ); assertThat(indices.size(), equalTo(1)); } diff --git a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java index 8fe8a13de9910..8fd8449108333 100644 --- a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java @@ -160,7 +160,7 @@ public void testGlobalCheckpointUpdate() { // now insert an unknown active/insync id , the checkpoint shouldn't change but a refresh should be requested. final AllocationId extraId = AllocationId.newInitializing(); - // first check that adding it without the master blessing doesn't change anything. + // first check that adding it without the cluster-manager blessing doesn't change anything. updateLocalCheckpoint(tracker, extraId.getId(), minLocalCheckpointAfterUpdates + 1 + randomInt(4)); assertNull(tracker.checkpoints.get(extraId.getId())); expectThrows(IllegalStateException.class, () -> tracker.initiateTracking(extraId.getId())); @@ -292,7 +292,7 @@ public void testMissingInSyncIdsPreventAdvance() { assertThat(updatedGlobalCheckpoint.get(), not(equalTo(UNASSIGNED_SEQ_NO))); } - public void testInSyncIdsAreIgnoredIfNotValidatedByMaster() { + public void testInSyncIdsAreIgnoredIfNotValidatedByClusterManager() { final Map active = randomAllocationsWithLocalCheckpoints(1, 5); final Map initializing = randomAllocationsWithLocalCheckpoints(1, 5); final Map nonApproved = randomAllocationsWithLocalCheckpoints(1, 5); @@ -313,7 +313,7 @@ public void testInSyncIdsAreIgnoredIfNotValidatedByMaster() { assertThat(tracker.getGlobalCheckpoint(), not(equalTo(UNASSIGNED_SEQ_NO))); } - public void testInSyncIdsAreRemovedIfNotValidatedByMaster() { + public void testInSyncIdsAreRemovedIfNotValidatedByClusterManager() { final long initialClusterStateVersion = randomNonNegativeLong(); final Map activeToStay = randomAllocationsWithLocalCheckpoints(1, 5); final Map initializingToStay = randomAllocationsWithLocalCheckpoints(1, 5); @@ -421,7 +421,7 @@ public void testWaitForAllocationIdToBeInSync() throws Exception { assertTrue(complete.get()); assertTrue(tracker.getTrackedLocalCheckpointForShard(trackingAllocationId.getId()).inSync); } else { - // master changes its mind and cancels the allocation + // cluster-manager changes its mind and cancels the allocation tracker.updateFromMaster( clusterStateVersion + 1, Collections.singleton(inSyncAllocationId.getId()), @@ -492,7 +492,7 @@ public void testWaitForAllocationIdToBeInSyncCanBeInterrupted() throws BrokenBar thread.join(); } - public void testUpdateAllocationIdsFromMaster() throws Exception { + public void testUpdateAllocationIdsFromClusterManager() throws Exception { final long initialClusterStateVersion = randomNonNegativeLong(); final int numberOfActiveAllocationsIds = randomIntBetween(2, 16); final int numberOfInitializingIds = randomIntBetween(2, 16); @@ -645,7 +645,7 @@ public void testUpdateAllocationIdsFromMaster() throws Exception { assertTrue(tracker.getTrackedLocalCheckpointForShard(newSyncingAllocationId.getId()).inSync); /* - * The new in-sync allocation ID is in the in-sync set now yet the master does not know this; the allocation ID should still be in + * The new in-sync allocation ID is in the in-sync set now yet the cluster-manager does not know this; the allocation ID should still be in * the in-sync set even if we receive a cluster state update that does not reflect this. * */ diff --git a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java index da984084321e1..e481384c3d6f3 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java @@ -438,7 +438,7 @@ public void testDanglingIndicesWithLaterVersion() throws Exception { final ClusterService clusterService = getInstanceFromNode(ClusterService.class); final ClusterState originalState = clusterService.state(); - // import an index with minor version incremented by one over cluster master version, it should be ignored + // import an index with minor version incremented by one over cluster cluster-manager version, it should be ignored final LocalAllocateDangledIndices dangling = getInstanceFromNode(LocalAllocateDangledIndices.class); final Settings idxSettingsLater = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, Version.fromId(Version.CURRENT.id + 10000)) diff --git a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java index a7d9ba0bf3d4b..8139ceec4611f 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java @@ -470,7 +470,12 @@ private , Response extends ActionResp ) { return executeClusterStateUpdateTask(clusterState, () -> { try { - TransportMasterNodeActionUtils.runMasterOperation(masterNodeAction, request, clusterState, new PlainActionFuture<>()); + TransportMasterNodeActionUtils.runClusterManagerOperation( + masterNodeAction, + request, + clusterState, + new PlainActionFuture<>() + ); } catch (Exception e) { throw new RuntimeException(e); } diff --git a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index cd3fee60014a7..d38d31f3ef43b 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -134,7 +134,7 @@ public void testRandomClusterStateUpdates() { } } - // apply cluster state to nodes (incl. master) + // apply cluster state to nodes (incl. cluster-manager) for (DiscoveryNode node : state.nodes()) { IndicesClusterStateService indicesClusterStateService = clusterStateServiceMap.get(node); ClusterState localState = adaptClusterStateToLocalNode(state, node); @@ -328,7 +328,7 @@ public ClusterState randomInitialClusterState( Supplier indicesServiceSupplier ) { List allNodes = new ArrayList<>(); - DiscoveryNode localNode = createNode(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); // local node is the master + DiscoveryNode localNode = createNode(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); // local node is the cluster-manager allNodes.add(localNode); // at least two nodes that have the data role so that we can allocate shards allNodes.add(createNode(DiscoveryNodeRole.DATA_ROLE)); @@ -368,20 +368,20 @@ public ClusterState randomlyUpdateClusterState( Map clusterStateServiceMap, Supplier indicesServiceSupplier ) { - // randomly remove no_master blocks + // randomly remove no_cluster_manager blocks if (randomBoolean() && state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)) { state = ClusterState.builder(state) .blocks(ClusterBlocks.builder().blocks(state.blocks()).removeGlobalBlock(NoMasterBlockService.NO_MASTER_BLOCK_ID)) .build(); } - // randomly add no_master blocks + // randomly add no_cluster_manager blocks if (rarely() && state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID) == false) { ClusterBlock block = randomBoolean() ? NoMasterBlockService.NO_MASTER_BLOCK_ALL : NoMasterBlockService.NO_MASTER_BLOCK_WRITES; state = ClusterState.builder(state).blocks(ClusterBlocks.builder().blocks(state.blocks()).addGlobalBlock(block)).build(); } - // if no_master block is in place, make no other cluster state changes + // if no_cluster_manager block is in place, make no other cluster state changes if (state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)) { return state; } @@ -481,7 +481,7 @@ public ClusterState randomlyUpdateClusterState( state = cluster.applyFailedShards(state, failedShards); state = cluster.applyStartedShards(state, startedShards); - // randomly add and remove nodes (except current master) + // randomly add and remove nodes (except current cluster-manager) if (rarely()) { if (randomBoolean()) { // add node @@ -506,7 +506,7 @@ public ClusterState randomlyUpdateClusterState( } } - // TODO: go masterless? + // TODO: go cluster-managerless? return state; } diff --git a/server/src/test/java/org/opensearch/persistent/PersistentTasksClusterServiceTests.java b/server/src/test/java/org/opensearch/persistent/PersistentTasksClusterServiceTests.java index bb8f0405ecf7e..40ffa2eeb0aff 100644 --- a/server/src/test/java/org/opensearch/persistent/PersistentTasksClusterServiceTests.java +++ b/server/src/test/java/org/opensearch/persistent/PersistentTasksClusterServiceTests.java @@ -494,7 +494,7 @@ public void testPeriodicRecheck() throws Exception { }); } - public void testPeriodicRecheckOffMaster() { + public void testPeriodicRecheckOffClusterManager() { ClusterState initialState = initialState(); ClusterState.Builder builder = ClusterState.builder(initialState); PersistentTasksCustomMetadata.Builder tasks = PersistentTasksCustomMetadata.builder( @@ -528,20 +528,20 @@ public void testPeriodicRecheckOffMaster() { assertThat(tasksInProgress.tasks().size(), equalTo(1)); } - // The rechecker should recheck indefinitely on the master node as the + // The rechecker should recheck indefinitely on the cluster-manager node as the // task can never be assigned while nonClusterStateCondition = false assertTrue(service.getPeriodicRechecker().isScheduled()); - // Now simulate the node ceasing to be the master + // Now simulate the node ceasing to be the cluster-manager builder = ClusterState.builder(clusterState); nodes = DiscoveryNodes.builder(clusterState.nodes()); - nodes.add(DiscoveryNode.createLocal(Settings.EMPTY, buildNewFakeTransportAddress(), "a_new_master_node")); - nodes.masterNodeId("a_new_master_node"); + nodes.add(DiscoveryNode.createLocal(Settings.EMPTY, buildNewFakeTransportAddress(), "a_new_cluster_manager_node")); + nodes.masterNodeId("a_new_cluster_manager_node"); ClusterState nonMasterClusterState = builder.nodes(nodes).build(); event = new ClusterChangedEvent("test", nonMasterClusterState, clusterState); service.clusterChanged(event); - // The service should have cancelled the rechecker on learning it is no longer running on the master node + // The service should have cancelled the rechecker on learning it is no longer running on the cluster-manager node assertFalse(service.getPeriodicRechecker().isScheduled()); } @@ -796,7 +796,7 @@ private ClusterState insignificantChange(ClusterState clusterState) { } } if (randomBoolean()) { - // remove a node that doesn't have any tasks assigned to it and it's not the master node + // remove a node that doesn't have any tasks assigned to it and it's not the cluster-manager node for (DiscoveryNode node : clusterState.nodes()) { if (hasTasksAssignedTo(tasks, node.getId()) == false && "this_node".equals(node.getId()) == false) { logger.info("removed unassigned node {}", node.getId()); diff --git a/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestNodesInfoActionTests.java b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestNodesInfoActionTests.java index 4d35098309b0d..fb633f9fa4a9c 100644 --- a/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestNodesInfoActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestNodesInfoActionTests.java @@ -54,11 +54,11 @@ public class RestNodesInfoActionTests extends OpenSearchTestCase { public void testDuplicatedFiltersAreNotRemoved() { Map params = new HashMap<>(); - params.put("nodeId", "_all,master:false,_all"); + params.put("nodeId", "_all,cluster_manager:false,_all"); RestRequest restRequest = buildRestRequest(params); NodesInfoRequest actual = RestNodesInfoAction.prepareRequest(restRequest); - assertArrayEquals(new String[] { "_all", "master:false", "_all" }, actual.nodesIds()); + assertArrayEquals(new String[] { "_all", "cluster_manager:false", "_all" }, actual.nodesIds()); } public void testOnlyMetrics() { diff --git a/server/src/test/java/org/opensearch/snapshots/InternalSnapshotsInfoServiceTests.java b/server/src/test/java/org/opensearch/snapshots/InternalSnapshotsInfoServiceTests.java index 6c789ae6d98cd..f0e283e6dde6d 100644 --- a/server/src/test/java/org/opensearch/snapshots/InternalSnapshotsInfoServiceTests.java +++ b/server/src/test/java/org/opensearch/snapshots/InternalSnapshotsInfoServiceTests.java @@ -290,7 +290,7 @@ public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, In assertThat("Expecting all snapshot shard size fetches to execute a Reroute", reroutes.get(), equalTo(maxShardsToCreate)); } - public void testNoLongerMaster() throws Exception { + public void testNoLongerClusterManager() throws Exception { final InternalSnapshotsInfoService snapshotsInfoService = new InternalSnapshotsInfoService( Settings.EMPTY, clusterService, @@ -310,18 +310,18 @@ public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, In final String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); final int nbShards = randomIntBetween(1, 5); applyClusterState( - "restore-indices-when-master-" + indexName, + "restore-indices-when-cluster-manager-" + indexName, clusterState -> addUnassignedShards(clusterState, indexName, nbShards) ); } - applyClusterState("demote-current-master", this::demoteMasterNode); + applyClusterState("demote-current-cluster-manager", this::demoteClusterManagerNode); for (int i = 0; i < randomIntBetween(1, 10); i++) { final String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); final int nbShards = randomIntBetween(1, 5); applyClusterState( - "restore-indices-when-no-longer-master-" + indexName, + "restore-indices-when-no-longer-cluster-manager-" + indexName, clusterState -> addUnassignedShards(clusterState, indexName, nbShards) ); } @@ -484,7 +484,7 @@ private ClusterState addUnassignedShards(final ClusterState currentState, String .build(); } - private ClusterState demoteMasterNode(final ClusterState currentState) { + private ClusterState demoteClusterManagerNode(final ClusterState currentState) { final DiscoveryNode node = new DiscoveryNode( "other", OpenSearchTestCase.buildNewFakeTransportAddress(), diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index ab9a455399366..68a6af25a7c82 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -306,7 +306,7 @@ public void verifyReposThenStopServices() { blobStoreContext.forceConsistent(); } BlobStoreTestUtil.assertConsistency( - (BlobStoreRepository) testClusterNodes.randomMasterNodeSafe().repositoriesService.repository("repo"), + (BlobStoreRepository) testClusterNodes.randomClusterManagerNodeSafe().repositoriesService.repository("repo"), Runnable::run ); } finally { @@ -323,7 +323,7 @@ public void testSuccessfulSnapshotAndRestore() { final int shards = randomIntBetween(1, 10); final int documents = randomIntBetween(0, 100); - final TestClusterNodes.TestClusterNode masterNode = testClusterNodes.currentMaster( + final TestClusterNodes.TestClusterNode clusterManagerNode = testClusterNodes.currentClusterManager( testClusterNodes.nodes.values().iterator().next().clusterService.state() ); @@ -389,9 +389,9 @@ public void testSuccessfulSnapshotAndRestore() { assertNotNull(createSnapshotResponseListener.result()); assertNotNull(restoreSnapshotResponseListener.result()); assertTrue(documentCountVerified.get()); - SnapshotsInProgress finalSnapshotsInProgress = masterNode.clusterService.state().custom(SnapshotsInProgress.TYPE); + SnapshotsInProgress finalSnapshotsInProgress = clusterManagerNode.clusterService.state().custom(SnapshotsInProgress.TYPE); assertFalse(finalSnapshotsInProgress.entries().stream().anyMatch(entry -> entry.state().completed() == false)); - final Repository repository = masterNode.repositoriesService.repository(repoName); + final Repository repository = clusterManagerNode.repositoriesService.repository(repoName); Collection snapshotIds = getRepositoryData(repository).getSnapshotIds(); assertThat(snapshotIds, hasSize(1)); @@ -404,8 +404,8 @@ public void testSuccessfulSnapshotAndRestore() { public void testSnapshotWithNodeDisconnects() { final int dataNodes = randomIntBetween(2, 10); - final int masterNodes = randomFrom(1, 3, 5); - setupTestCluster(masterNodes, dataNodes); + final int clusterManagerNodes = randomFrom(1, 3, 5); + setupTestCluster(clusterManagerNodes, dataNodes); String repoName = "repo"; String snapshotName = "snapshot"; @@ -422,7 +422,7 @@ public void testSnapshotWithNodeDisconnects() { if (randomBoolean()) { scheduleNow(() -> testClusterNodes.clearNetworkDisruptions()); } - testClusterNodes.randomMasterNodeSafe().client.admin() + testClusterNodes.randomClusterManagerNodeSafe().client.admin() .cluster() .prepareCreateSnapshot(repoName, snapshotName) .setPartial(partial) @@ -435,12 +435,12 @@ public void testSnapshotWithNodeDisconnects() { for (int i = 0; i < randomIntBetween(0, dataNodes); ++i) { scheduleNow(this::disconnectOrRestartDataNode); } - // Only disconnect master if we have more than a single master and can simulate a failover - final boolean disconnectedMaster = randomBoolean() && masterNodes > 1; - if (disconnectedMaster) { - scheduleNow(this::disconnectOrRestartMasterNode); + // Only disconnect cluster-manager if we have more than a single cluster-manager and can simulate a failover + final boolean disconnectedClusterManager = randomBoolean() && clusterManagerNodes > 1; + if (disconnectedClusterManager) { + scheduleNow(this::disconnectOrRestartClusterManagerNode); } - if (disconnectedMaster || randomBoolean()) { + if (disconnectedClusterManager || randomBoolean()) { scheduleSoon(() -> testClusterNodes.clearNetworkDisruptions()); } else if (randomBoolean()) { scheduleNow(() -> testClusterNodes.clearNetworkDisruptions()); @@ -456,22 +456,22 @@ public void testSnapshotWithNodeDisconnects() { } }); - runUntil(() -> testClusterNodes.randomMasterNode().map(master -> { + runUntil(() -> testClusterNodes.randomClusterManagerNode().map(clusterManager -> { if (snapshotNeverStarted.get()) { return true; } - final SnapshotsInProgress snapshotsInProgress = master.clusterService.state().custom(SnapshotsInProgress.TYPE); + final SnapshotsInProgress snapshotsInProgress = clusterManager.clusterService.state().custom(SnapshotsInProgress.TYPE); return snapshotsInProgress != null && snapshotsInProgress.entries().isEmpty(); }).orElse(false), TimeUnit.MINUTES.toMillis(1L)); clearDisruptionsAndAwaitSync(); - final TestClusterNodes.TestClusterNode randomMaster = testClusterNodes.randomMasterNode() - .orElseThrow(() -> new AssertionError("expected to find at least one active master node")); - SnapshotsInProgress finalSnapshotsInProgress = randomMaster.clusterService.state() + final TestClusterNodes.TestClusterNode randomClusterManager = testClusterNodes.randomClusterManagerNode() + .orElseThrow(() -> new AssertionError("expected to find at least one active cluster-manager node")); + SnapshotsInProgress finalSnapshotsInProgress = randomClusterManager.clusterService.state() .custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY); assertThat(finalSnapshotsInProgress.entries(), empty()); - final Repository repository = randomMaster.repositoriesService.repository(repoName); + final Repository repository = randomClusterManager.repositoriesService.repository(repoName); Collection snapshotIds = getRepositoryData(repository).getSnapshotIds(); if (snapshotNeverStarted.get()) { assertThat(snapshotIds, empty()); @@ -480,10 +480,10 @@ public void testSnapshotWithNodeDisconnects() { } } - public void testSnapshotDeleteWithMasterFailover() { + public void testSnapshotDeleteWithClusterManagerFailover() { final int dataNodes = randomIntBetween(2, 10); - final int masterNodes = randomFrom(3, 5); - setupTestCluster(masterNodes, dataNodes); + final int clusterManagerNodes = randomFrom(3, 5); + setupTestCluster(clusterManagerNodes, dataNodes); String repoName = "repo"; String snapshotName = "snapshot"; @@ -494,7 +494,7 @@ public void testSnapshotDeleteWithMasterFailover() { final StepListener createSnapshotResponseStepListener = new StepListener<>(); continueOrDie( createRepoAndIndex(repoName, index, shards), - createIndexResponse -> testClusterNodes.randomMasterNodeSafe().client.admin() + createIndexResponse -> testClusterNodes.randomClusterManagerNodeSafe().client.admin() .cluster() .prepareCreateSnapshot(repoName, snapshotName) .setWaitForCompletion(waitForSnapshot) @@ -503,7 +503,7 @@ public void testSnapshotDeleteWithMasterFailover() { final AtomicBoolean snapshotDeleteResponded = new AtomicBoolean(false); continueOrDie(createSnapshotResponseStepListener, createSnapshotResponse -> { - scheduleNow(this::disconnectOrRestartMasterNode); + scheduleNow(this::disconnectOrRestartClusterManagerNode); testClusterNodes.randomDataNodeSafe().client.admin() .cluster() .prepareDeleteSnapshot(repoName, snapshotName) @@ -511,10 +511,10 @@ public void testSnapshotDeleteWithMasterFailover() { }); runUntil( - () -> testClusterNodes.randomMasterNode() + () -> testClusterNodes.randomClusterManagerNode() .map( - master -> snapshotDeleteResponded.get() - && master.clusterService.state() + clusterManager -> snapshotDeleteResponded.get() + && clusterManager.clusterService.state() .custom(SnapshotDeletionsInProgress.TYPE, SnapshotDeletionsInProgress.EMPTY) .getEntries() .isEmpty() @@ -525,11 +525,11 @@ public void testSnapshotDeleteWithMasterFailover() { clearDisruptionsAndAwaitSync(); - final TestClusterNodes.TestClusterNode randomMaster = testClusterNodes.randomMasterNode() - .orElseThrow(() -> new AssertionError("expected to find at least one active master node")); - SnapshotsInProgress finalSnapshotsInProgress = randomMaster.clusterService.state().custom(SnapshotsInProgress.TYPE); + final TestClusterNodes.TestClusterNode randomClusterManager = testClusterNodes.randomClusterManagerNode() + .orElseThrow(() -> new AssertionError("expected to find at least one active cluster-manager node")); + SnapshotsInProgress finalSnapshotsInProgress = randomClusterManager.clusterService.state().custom(SnapshotsInProgress.TYPE); assertThat(finalSnapshotsInProgress.entries(), empty()); - final Repository repository = randomMaster.repositoriesService.repository(repoName); + final Repository repository = randomClusterManager.repositoriesService.repository(repoName); Collection snapshotIds = getRepositoryData(repository).getSnapshotIds(); assertThat(snapshotIds, hasSize(0)); } @@ -542,7 +542,7 @@ public void testConcurrentSnapshotCreateAndDelete() { final String index = "test"; final int shards = randomIntBetween(1, 10); - TestClusterNodes.TestClusterNode masterNode = testClusterNodes.currentMaster( + TestClusterNodes.TestClusterNode clusterManagerNode = testClusterNodes.currentClusterManager( testClusterNodes.nodes.values().iterator().next().clusterService.state() ); @@ -558,12 +558,12 @@ public void testConcurrentSnapshotCreateAndDelete() { final StepListener deleteSnapshotStepListener = new StepListener<>(); - masterNode.clusterService.addListener(new ClusterStateListener() { + clusterManagerNode.clusterService.addListener(new ClusterStateListener() { @Override public void clusterChanged(ClusterChangedEvent event) { if (event.state().custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY).entries().isEmpty() == false) { client().admin().cluster().prepareDeleteSnapshot(repoName, snapshotName).execute(deleteSnapshotStepListener); - masterNode.clusterService.removeListener(this); + clusterManagerNode.clusterService.removeListener(this); } } }); @@ -587,9 +587,9 @@ public void clusterChanged(ClusterChangedEvent event) { assertNotNull(createSnapshotResponseStepListener.result()); assertNotNull(createAnotherSnapshotResponseStepListener.result()); - SnapshotsInProgress finalSnapshotsInProgress = masterNode.clusterService.state().custom(SnapshotsInProgress.TYPE); + SnapshotsInProgress finalSnapshotsInProgress = clusterManagerNode.clusterService.state().custom(SnapshotsInProgress.TYPE); assertFalse(finalSnapshotsInProgress.entries().stream().anyMatch(entry -> entry.state().completed() == false)); - final Repository repository = masterNode.repositoriesService.repository(repoName); + final Repository repository = clusterManagerNode.repositoriesService.repository(repoName); Collection snapshotIds = getRepositoryData(repository).getSnapshotIds(); assertThat(snapshotIds, hasSize(1)); @@ -608,7 +608,7 @@ public void testConcurrentSnapshotCreateAndDeleteOther() { final String index = "test"; final int shards = randomIntBetween(1, 10); - TestClusterNodes.TestClusterNode masterNode = testClusterNodes.currentMaster( + TestClusterNodes.TestClusterNode clusterManagerNode = testClusterNodes.currentClusterManager( testClusterNodes.nodes.values().iterator().next().clusterService.state() ); @@ -659,9 +659,9 @@ public void testConcurrentSnapshotCreateAndDeleteOther() { deterministicTaskQueue.runAllRunnableTasks(); - SnapshotsInProgress finalSnapshotsInProgress = masterNode.clusterService.state().custom(SnapshotsInProgress.TYPE); + SnapshotsInProgress finalSnapshotsInProgress = clusterManagerNode.clusterService.state().custom(SnapshotsInProgress.TYPE); assertFalse(finalSnapshotsInProgress.entries().stream().anyMatch(entry -> entry.state().completed() == false)); - final Repository repository = masterNode.repositoriesService.repository(repoName); + final Repository repository = clusterManagerNode.repositoriesService.repository(repoName); Collection snapshotIds = getRepositoryData(repository).getSnapshotIds(); // We end up with two snapshots no matter if the delete worked out or not assertThat(snapshotIds, hasSize(2)); @@ -683,7 +683,7 @@ public void testBulkSnapshotDeleteWithAbort() { final String index = "test"; final int shards = randomIntBetween(1, 10); - TestClusterNodes.TestClusterNode masterNode = testClusterNodes.currentMaster( + TestClusterNodes.TestClusterNode clusterManagerNode = testClusterNodes.currentClusterManager( testClusterNodes.nodes.values().iterator().next().clusterService.state() ); @@ -722,9 +722,9 @@ public void testBulkSnapshotDeleteWithAbort() { deterministicTaskQueue.runAllRunnableTasks(); - SnapshotsInProgress finalSnapshotsInProgress = masterNode.clusterService.state().custom(SnapshotsInProgress.TYPE); + SnapshotsInProgress finalSnapshotsInProgress = clusterManagerNode.clusterService.state().custom(SnapshotsInProgress.TYPE); assertFalse(finalSnapshotsInProgress.entries().stream().anyMatch(entry -> entry.state().completed() == false)); - final Repository repository = masterNode.repositoriesService.repository(repoName); + final Repository repository = clusterManagerNode.repositoriesService.repository(repoName); Collection snapshotIds = getRepositoryData(repository).getSnapshotIds(); // No snapshots should be left in the repository assertThat(snapshotIds, empty()); @@ -738,7 +738,7 @@ public void testConcurrentSnapshotRestoreAndDeleteOther() { final String index = "test"; final int shards = randomIntBetween(1, 10); - TestClusterNodes.TestClusterNode masterNode = testClusterNodes.currentMaster( + TestClusterNodes.TestClusterNode clusterManagerNode = testClusterNodes.currentClusterManager( testClusterNodes.nodes.values().iterator().next().clusterService.state() ); @@ -812,7 +812,7 @@ public void testConcurrentSnapshotRestoreAndDeleteOther() { assertThat(deleteSnapshotStepListener.result().isAcknowledged(), is(true)); assertThat(restoreSnapshotResponseListener.result().getRestoreInfo().failedShards(), is(0)); - final Repository repository = masterNode.repositoriesService.repository(repoName); + final Repository repository = clusterManagerNode.repositoriesService.repository(repoName); Collection snapshotIds = getRepositoryData(repository).getSnapshotIds(); assertThat(snapshotIds, contains(createOtherSnapshotResponseStepListener.result().getSnapshotInfo().snapshotId())); @@ -850,7 +850,7 @@ public void testConcurrentSnapshotDeleteAndDeleteIndex() throws IOException { String snapshotName = "snapshot"; final String index = "test"; - TestClusterNodes.TestClusterNode masterNode = testClusterNodes.currentMaster( + TestClusterNodes.TestClusterNode clusterManagerNode = testClusterNodes.currentClusterManager( testClusterNodes.nodes.values().iterator().next().clusterService.state() ); @@ -859,7 +859,7 @@ public void testConcurrentSnapshotDeleteAndDeleteIndex() throws IOException { final SetOnce firstIndex = new SetOnce<>(); continueOrDie(createRepoAndIndex(repoName, index, 1), createIndexResponse -> { - firstIndex.set(masterNode.clusterService.state().metadata().index(index).getIndex()); + firstIndex.set(clusterManagerNode.clusterService.state().metadata().index(index).getIndex()); // create a few more indices to make it more likely that the subsequent index delete operation happens before snapshot // finalization final GroupedActionListener listener = new GroupedActionListener<>(createIndicesListener, indices); @@ -907,9 +907,9 @@ public void onFailure(Exception e) { deterministicTaskQueue.runAllRunnableTasks(); - SnapshotsInProgress finalSnapshotsInProgress = masterNode.clusterService.state().custom(SnapshotsInProgress.TYPE); + SnapshotsInProgress finalSnapshotsInProgress = clusterManagerNode.clusterService.state().custom(SnapshotsInProgress.TYPE); assertFalse(finalSnapshotsInProgress.entries().stream().anyMatch(entry -> entry.state().completed() == false)); - final Repository repository = masterNode.repositoriesService.repository(repoName); + final Repository repository = clusterManagerNode.repositoriesService.repository(repoName); final RepositoryData repositoryData = getRepositoryData(repository); Collection snapshotIds = repositoryData.getSnapshotIds(); assertThat(snapshotIds, hasSize(1)); @@ -944,7 +944,7 @@ public void testConcurrentDeletes() { final String index = "test"; final int shards = randomIntBetween(1, 10); - TestClusterNodes.TestClusterNode masterNode = testClusterNodes.currentMaster( + TestClusterNodes.TestClusterNode clusterManagerNode = testClusterNodes.currentClusterManager( testClusterNodes.nodes.values().iterator().next().clusterService.state() ); @@ -990,9 +990,10 @@ public void testConcurrentDeletes() { deterministicTaskQueue.runAllRunnableTasks(); - SnapshotDeletionsInProgress deletionsInProgress = masterNode.clusterService.state().custom(SnapshotDeletionsInProgress.TYPE); + SnapshotDeletionsInProgress deletionsInProgress = clusterManagerNode.clusterService.state() + .custom(SnapshotDeletionsInProgress.TYPE); assertFalse(deletionsInProgress.hasDeletionsInProgress()); - final Repository repository = masterNode.repositoriesService.repository(repoName); + final Repository repository = clusterManagerNode.repositoriesService.repository(repoName); final RepositoryData repositoryData = getRepositoryData(repository); Collection snapshotIds = repositoryData.getSnapshotIds(); // We end up with no snapshots since at least one of the deletes worked out @@ -1003,12 +1004,12 @@ public void testConcurrentDeletes() { } /** - * Simulates concurrent restarts of data and master nodes as well as relocating a primary shard, while starting and subsequently + * Simulates concurrent restarts of data and cluster-manager nodes as well as relocating a primary shard, while starting and subsequently * deleting a snapshot. */ public void testSnapshotPrimaryRelocations() { - final int masterNodeCount = randomFrom(1, 3, 5); - setupTestCluster(masterNodeCount, randomIntBetween(2, 5)); + final int clusterManagerNodeCount = randomFrom(1, 3, 5); + setupTestCluster(clusterManagerNodeCount, randomIntBetween(2, 5)); String repoName = "repo"; String snapshotName = "snapshot"; @@ -1016,11 +1017,11 @@ public void testSnapshotPrimaryRelocations() { final int shards = randomIntBetween(1, 5); - final TestClusterNodes.TestClusterNode masterNode = testClusterNodes.currentMaster( + final TestClusterNodes.TestClusterNode clusterManagerNode = testClusterNodes.currentClusterManager( testClusterNodes.nodes.values().iterator().next().clusterService.state() ); final AtomicBoolean createdSnapshot = new AtomicBoolean(); - final AdminClient masterAdminClient = masterNode.client.admin(); + final AdminClient clusterManagerAdminClient = clusterManagerNode.client.admin(); final StepListener clusterStateResponseStepListener = new StepListener<>(); @@ -1038,15 +1039,15 @@ public void testSnapshotPrimaryRelocations() { @Override public void run() { final StepListener updatedClusterStateResponseStepListener = new StepListener<>(); - masterAdminClient.cluster().state(new ClusterStateRequest(), updatedClusterStateResponseStepListener); + clusterManagerAdminClient.cluster().state(new ClusterStateRequest(), updatedClusterStateResponseStepListener); continueOrDie(updatedClusterStateResponseStepListener, updatedClusterState -> { final ShardRouting shardRouting = updatedClusterState.getState() .routingTable() .shardRoutingTable(shardToRelocate.shardId()) .primaryShard(); if (shardRouting.unassigned() && shardRouting.unassignedInfo().getReason() == UnassignedInfo.Reason.NODE_LEFT) { - if (masterNodeCount > 1) { - scheduleNow(() -> testClusterNodes.stopNode(masterNode)); + if (clusterManagerNodeCount > 1) { + scheduleNow(() -> testClusterNodes.stopNode(clusterManagerNode)); } testClusterNodes.randomDataNodeSafe().client.admin() .cluster() @@ -1058,7 +1059,7 @@ public void run() { .deleteSnapshot(new DeleteSnapshotRequest(repoName, snapshotName), noopListener()); })); scheduleNow( - () -> testClusterNodes.randomMasterNodeSafe().client.admin() + () -> testClusterNodes.randomClusterManagerNodeSafe().client.admin() .cluster() .reroute( new ClusterRerouteRequest().add( @@ -1080,11 +1081,11 @@ public void run() { }); }); - runUntil(() -> testClusterNodes.randomMasterNode().map(master -> { + runUntil(() -> testClusterNodes.randomClusterManagerNode().map(clusterManager -> { if (createdSnapshot.get() == false) { return false; } - return master.clusterService.state().custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY).entries().isEmpty(); + return clusterManager.clusterService.state().custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY).entries().isEmpty(); }).orElse(false), TimeUnit.MINUTES.toMillis(1L)); clearDisruptionsAndAwaitSync(); @@ -1096,7 +1097,7 @@ public void run() { .entries(), empty() ); - final Repository repository = testClusterNodes.randomMasterNodeSafe().repositoriesService.repository(repoName); + final Repository repository = testClusterNodes.randomClusterManagerNodeSafe().repositoriesService.repository(repoName); Collection snapshotIds = getRepositoryData(repository).getSnapshotIds(); assertThat(snapshotIds, either(hasSize(1)).or(hasSize(0))); } @@ -1110,7 +1111,7 @@ public void testSuccessfulSnapshotWithConcurrentDynamicMappingUpdates() { final int shards = randomIntBetween(1, 10); final int documents = randomIntBetween(2, 100); - TestClusterNodes.TestClusterNode masterNode = testClusterNodes.currentMaster( + TestClusterNodes.TestClusterNode clusterManagerNode = testClusterNodes.currentClusterManager( testClusterNodes.nodes.values().iterator().next().clusterService.state() ); @@ -1171,7 +1172,7 @@ public void testSuccessfulSnapshotWithConcurrentDynamicMappingUpdates() { "Documents were restored but the restored index mapping was older than some documents and misses some of their fields", (int) hitCount, lessThanOrEqualTo( - ((Map) masterNode.clusterService.state() + ((Map) clusterManagerNode.clusterService.state() .metadata() .index(restoredIndex) .mapping() @@ -1186,9 +1187,9 @@ public void testSuccessfulSnapshotWithConcurrentDynamicMappingUpdates() { assertNotNull(createSnapshotResponseStepListener.result()); assertNotNull(restoreSnapshotResponseStepListener.result()); - SnapshotsInProgress finalSnapshotsInProgress = masterNode.clusterService.state().custom(SnapshotsInProgress.TYPE); + SnapshotsInProgress finalSnapshotsInProgress = clusterManagerNode.clusterService.state().custom(SnapshotsInProgress.TYPE); assertFalse(finalSnapshotsInProgress.entries().stream().anyMatch(entry -> entry.state().completed() == false)); - final Repository repository = masterNode.repositoriesService.repository(repoName); + final Repository repository = clusterManagerNode.repositoriesService.repository(repoName); Collection snapshotIds = getRepositoryData(repository).getSnapshotIds(); assertThat(snapshotIds, hasSize(1)); @@ -1210,7 +1211,7 @@ public void testRunConcurrentSnapshots() { final int shards = randomIntBetween(1, 10); final int documents = randomIntBetween(1, 100); - final TestClusterNodes.TestClusterNode masterNode = testClusterNodes.currentMaster( + final TestClusterNodes.TestClusterNode clusterManagerNode = testClusterNodes.currentClusterManager( testClusterNodes.nodes.values().iterator().next().clusterService.state() ); @@ -1253,9 +1254,9 @@ public void testRunConcurrentSnapshots() { }); runUntil(() -> doneIndexing.get() && doneSnapshotting.get(), TimeUnit.MINUTES.toMillis(5L)); - SnapshotsInProgress finalSnapshotsInProgress = masterNode.clusterService.state().custom(SnapshotsInProgress.TYPE); + SnapshotsInProgress finalSnapshotsInProgress = clusterManagerNode.clusterService.state().custom(SnapshotsInProgress.TYPE); assertFalse(finalSnapshotsInProgress.entries().stream().anyMatch(entry -> entry.state().completed() == false)); - final Repository repository = masterNode.repositoriesService.repository(repoName); + final Repository repository = clusterManagerNode.repositoriesService.repository(repoName); Collection snapshotIds = getRepositoryData(repository).getSnapshotIds(); assertThat(snapshotIds, hasSize(snapshotNames.size())); @@ -1314,12 +1315,12 @@ private void disconnectOrRestartDataNode() { } } - private void disconnectOrRestartMasterNode() { - testClusterNodes.randomMasterNode().ifPresent(masterNode -> { + private void disconnectOrRestartClusterManagerNode() { + testClusterNodes.randomClusterManagerNode().ifPresent(clusterManagerNode -> { if (randomBoolean()) { - testClusterNodes.disconnectNode(masterNode); + testClusterNodes.disconnectNode(clusterManagerNode); } else { - masterNode.restart(); + clusterManagerNode.restart(); } }); } @@ -1374,12 +1375,15 @@ private void stabilize() { .stream() .map(node -> node.clusterService.state()) .collect(Collectors.toList()); - final Set masterNodeIds = clusterStates.stream() + final Set clusterManagerNodeIds = clusterStates.stream() .map(clusterState -> clusterState.nodes().getMasterNodeId()) .collect(Collectors.toSet()); final Set terms = clusterStates.stream().map(ClusterState::term).collect(Collectors.toSet()); final List versions = clusterStates.stream().map(ClusterState::version).distinct().collect(Collectors.toList()); - return versions.size() == 1 && masterNodeIds.size() == 1 && masterNodeIds.contains(null) == false && terms.size() == 1; + return versions.size() == 1 + && clusterManagerNodeIds.size() == 1 + && clusterManagerNodeIds.contains(null) == false + && terms.size() == 1; }, TimeUnit.MINUTES.toMillis(1L)); } @@ -1395,8 +1399,8 @@ private void runUntil(Supplier fulfilled, long timeout) { fail("Condition wasn't fulfilled."); } - private void setupTestCluster(int masterNodes, int dataNodes) { - testClusterNodes = new TestClusterNodes(masterNodes, dataNodes); + private void setupTestCluster(int clusterManagerNodes, int dataNodes) { + testClusterNodes = new TestClusterNodes(clusterManagerNodes, dataNodes); startCluster(); } @@ -1472,11 +1476,11 @@ private final class TestClusterNodes { */ private final Set disconnectedNodes = new HashSet<>(); - TestClusterNodes(int masterNodes, int dataNodes) { - for (int i = 0; i < masterNodes; ++i) { + TestClusterNodes(int clusterManagerNodes, int dataNodes) { + for (int i = 0; i < clusterManagerNodes; ++i) { nodes.computeIfAbsent("node" + i, nodeName -> { try { - return newMasterNode(nodeName); + return newClusterManagerNode(nodeName); } catch (IOException e) { throw new AssertionError(e); } @@ -1501,7 +1505,7 @@ public TestClusterNode nodeById(final String nodeId) { .orElseThrow(() -> new AssertionError("Could not find node by id [" + nodeId + ']')); } - private TestClusterNode newMasterNode(String nodeName) throws IOException { + private TestClusterNode newClusterManagerNode(String nodeName) throws IOException { return newNode(nodeName, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); } @@ -1522,19 +1526,21 @@ private TestClusterNode newNode(String nodeName, DiscoveryNodeRole role) throws ); } - public TestClusterNode randomMasterNodeSafe() { - return randomMasterNode().orElseThrow(() -> new AssertionError("Expected to find at least one connected master node")); + public TestClusterNode randomClusterManagerNodeSafe() { + return randomClusterManagerNode().orElseThrow( + () -> new AssertionError("Expected to find at least one connected cluster-manager node") + ); } - public Optional randomMasterNode() { + public Optional randomClusterManagerNode() { // Select from sorted list of data-nodes here to not have deterministic behaviour - final List masterNodes = testClusterNodes.nodes.values() + final List clusterManagerNodes = testClusterNodes.nodes.values() .stream() .filter(n -> n.node.isMasterNode()) .filter(n -> disconnectedNodes.contains(n.node.getName()) == false) .sorted(Comparator.comparing(n -> n.node.getName())) .collect(Collectors.toList()); - return masterNodes.isEmpty() ? Optional.empty() : Optional.of(randomFrom(masterNodes)); + return clusterManagerNodes.isEmpty() ? Optional.empty() : Optional.of(randomFrom(clusterManagerNodes)); } public void stopNode(TestClusterNode node) { @@ -1596,15 +1602,15 @@ public DiscoveryNodes discoveryNodes() { } /** - * Returns the {@link TestClusterNode} for the master node in the given {@link ClusterState}. + * Returns the {@link TestClusterNode} for the cluster-manager node in the given {@link ClusterState}. * @param state ClusterState - * @return Master Node + * @return Cluster Manager Node */ - public TestClusterNode currentMaster(ClusterState state) { - TestClusterNode master = nodes.get(state.nodes().getMasterNode().getName()); - assertNotNull(master); - assertTrue(master.node.isMasterNode()); - return master; + public TestClusterNode currentClusterManager(ClusterState state) { + TestClusterNode clusterManager = nodes.get(state.nodes().getMasterNode().getName()); + assertNotNull(clusterManager); + assertTrue(clusterManager.node.isMasterNode()); + return clusterManager; } private final class TestClusterNode { @@ -1636,7 +1642,7 @@ private final class TestClusterNode { private final DiscoveryNode node; - private final MasterService masterService; + private final MasterService clusterManagerService; private final AllocationService allocationService; @@ -1656,13 +1662,18 @@ private final class TestClusterNode { this.node = node; final Environment environment = createEnvironment(node.getName()); threadPool = deterministicTaskQueue.getThreadPool(runnable -> CoordinatorTests.onNodeLog(node, runnable)); - masterService = new FakeThreadPoolMasterService(node.getName(), "test", threadPool, deterministicTaskQueue::scheduleNow); + clusterManagerService = new FakeThreadPoolMasterService( + node.getName(), + "test", + threadPool, + deterministicTaskQueue::scheduleNow + ); final Settings settings = environment.settings(); final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); clusterService = new ClusterService( settings, clusterSettings, - masterService, + clusterManagerService, new ClusterApplierService(node.getName(), settings, clusterSettings, threadPool) { @Override protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() { @@ -2192,7 +2203,7 @@ public void start(ClusterState initialState) { transportService, namedWriteableRegistry, allocationService, - masterService, + clusterManagerService, () -> persistedState, hostsResolver -> nodes.values() .stream() @@ -2206,7 +2217,7 @@ public void start(ClusterState initialState) { ElectionStrategy.DEFAULT_INSTANCE, () -> new StatusInfo(HEALTHY, "healthy-info") ); - masterService.setClusterStatePublisher(coordinator); + clusterManagerService.setClusterStatePublisher(coordinator); coordinator.start(); clusterService.getClusterApplierService().setNodeConnectionsService(nodeConnectionsService); nodeConnectionsService.start(); diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotsServiceTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotsServiceTests.java index 16fc7467d099b..7f96d4842e37d 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotsServiceTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotsServiceTests.java @@ -371,11 +371,11 @@ public void testCompletedCloneStartsNextClone() throws Exception { final String indexName1 = "index-1"; final IndexId indexId1 = indexId(indexName1); final RepositoryShardId shardId1 = new RepositoryShardId(indexId1, 0); - final String masterNodeId = uuid(); + final String clusterManagerNodeId = uuid(); final SnapshotsInProgress.Entry cloneSingleShard = cloneEntry( targetSnapshot, sourceSnapshot.getSnapshotId(), - clonesMap(shardId1, initShardStatus(masterNodeId)) + clonesMap(shardId1, initShardStatus(clusterManagerNodeId)) ); final Snapshot queuedTargetSnapshot = snapshot(repoName, "test-snapshot"); @@ -388,11 +388,11 @@ public void testCompletedCloneStartsNextClone() throws Exception { assertThat(cloneSingleShard.state(), is(SnapshotsInProgress.State.STARTED)); final ClusterState stateWithUnassignedRoutingShard = stateWithSnapshots( - ClusterState.builder(ClusterState.EMPTY_STATE).nodes(discoveryNodes(masterNodeId)).build(), + ClusterState.builder(ClusterState.EMPTY_STATE).nodes(discoveryNodes(clusterManagerNodeId)).build(), cloneSingleShard, queuedClone ); - final SnapshotsService.ShardSnapshotUpdate completeShardClone = successUpdate(targetSnapshot, shardId1, masterNodeId); + final SnapshotsService.ShardSnapshotUpdate completeShardClone = successUpdate(targetSnapshot, shardId1, clusterManagerNodeId); final ClusterState updatedClusterState = applyUpdates(stateWithUnassignedRoutingShard, completeShardClone); final SnapshotsInProgress snapshotsInProgress = updatedClusterState.custom(SnapshotsInProgress.TYPE); diff --git a/server/src/test/java/org/opensearch/snapshots/mockstore/MockEventuallyConsistentRepository.java b/server/src/test/java/org/opensearch/snapshots/mockstore/MockEventuallyConsistentRepository.java index 6bfd96b328d75..8d43db15053f1 100644 --- a/server/src/test/java/org/opensearch/snapshots/mockstore/MockEventuallyConsistentRepository.java +++ b/server/src/test/java/org/opensearch/snapshots/mockstore/MockEventuallyConsistentRepository.java @@ -373,7 +373,7 @@ public void writeBlob(String blobName, InputStream inputStream, long blobSize, b new BytesArray(data) ); // If the existing snapshotInfo differs only in the timestamps it stores, then the overwrite is not - // a problem and could be the result of a correctly handled master failover. + // a problem and could be the result of a correctly handled cluster-manager failover. final SnapshotInfo existingInfo = SNAPSHOT_FORMAT.deserialize( blobName, namedXContentRegistry, diff --git a/server/src/test/java/org/opensearch/transport/RemoteClusterServiceTests.java b/server/src/test/java/org/opensearch/transport/RemoteClusterServiceTests.java index 0092763b4ba20..9bb8b79377939 100644 --- a/server/src/test/java/org/opensearch/transport/RemoteClusterServiceTests.java +++ b/server/src/test/java/org/opensearch/transport/RemoteClusterServiceTests.java @@ -553,11 +553,11 @@ public void testRemoteNodeRoles() throws IOException, InterruptedException { final Settings settings = Settings.EMPTY; final List knownNodes = new CopyOnWriteArrayList<>(); final Settings data = nonMasterNode(); - final Settings dedicatedMaster = clusterManagerOnlyNode(); + final Settings dedicatedClusterManager = clusterManagerOnlyNode(); try ( - MockTransportService c1N1 = startTransport("cluster_1_node_1", knownNodes, Version.CURRENT, dedicatedMaster); + MockTransportService c1N1 = startTransport("cluster_1_node_1", knownNodes, Version.CURRENT, dedicatedClusterManager); MockTransportService c1N2 = startTransport("cluster_1_node_2", knownNodes, Version.CURRENT, data); - MockTransportService c2N1 = startTransport("cluster_2_node_1", knownNodes, Version.CURRENT, dedicatedMaster); + MockTransportService c2N1 = startTransport("cluster_2_node_1", knownNodes, Version.CURRENT, dedicatedClusterManager); MockTransportService c2N2 = startTransport("cluster_2_node_2", knownNodes, Version.CURRENT, data) ) { final DiscoveryNode c1N1Node = c1N1.getLocalDiscoNode(); diff --git a/server/src/test/java/org/opensearch/transport/SniffConnectionStrategyTests.java b/server/src/test/java/org/opensearch/transport/SniffConnectionStrategyTests.java index 1714f154036a5..409bc327cb095 100644 --- a/server/src/test/java/org/opensearch/transport/SniffConnectionStrategyTests.java +++ b/server/src/test/java/org/opensearch/transport/SniffConnectionStrategyTests.java @@ -743,24 +743,24 @@ public void testGetNodePredicateNodeRoles() { assertTrue(nodePredicate.test(all)); } { - DiscoveryNode dataMaster = new DiscoveryNode( + DiscoveryNode dataClusterManager = new DiscoveryNode( "id", address, Collections.emptyMap(), new HashSet<>(Arrays.asList(DiscoveryNodeRole.DATA_ROLE, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)), Version.CURRENT ); - assertTrue(nodePredicate.test(dataMaster)); + assertTrue(nodePredicate.test(dataClusterManager)); } { - DiscoveryNode dedicatedMaster = new DiscoveryNode( + DiscoveryNode dedicatedClusterManager = new DiscoveryNode( "id", address, Collections.emptyMap(), new HashSet<>(Arrays.asList(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)), Version.CURRENT ); - assertFalse(nodePredicate.test(dedicatedMaster)); + assertFalse(nodePredicate.test(dedicatedClusterManager)); } { DiscoveryNode dedicatedIngest = new DiscoveryNode( @@ -773,14 +773,14 @@ public void testGetNodePredicateNodeRoles() { assertTrue(nodePredicate.test(dedicatedIngest)); } { - DiscoveryNode masterIngest = new DiscoveryNode( + DiscoveryNode clusterManagerIngest = new DiscoveryNode( "id", address, Collections.emptyMap(), new HashSet<>(Arrays.asList(DiscoveryNodeRole.INGEST_ROLE, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)), Version.CURRENT ); - assertTrue(nodePredicate.test(masterIngest)); + assertTrue(nodePredicate.test(clusterManagerIngest)); } { DiscoveryNode dedicatedData = new DiscoveryNode( @@ -855,14 +855,14 @@ public void testGetNodePredicatesCombination() { TransportAddress address = new TransportAddress(TransportAddress.META_ADDRESS, 0); Settings settings = Settings.builder().put("cluster.remote.node.attr", "gateway").build(); Predicate nodePredicate = SniffConnectionStrategy.getNodePredicate(settings); - Set dedicatedMasterRoles = new HashSet<>(Arrays.asList(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)); + Set dedicatedClusterManagerRoles = new HashSet<>(Arrays.asList(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)); Set allRoles = DiscoveryNodeRole.BUILT_IN_ROLES; { DiscoveryNode node = new DiscoveryNode( "id", address, Collections.singletonMap("gateway", "true"), - dedicatedMasterRoles, + dedicatedClusterManagerRoles, Version.CURRENT ); assertFalse(nodePredicate.test(node)); @@ -872,7 +872,7 @@ public void testGetNodePredicatesCombination() { "id", address, Collections.singletonMap("gateway", "false"), - dedicatedMasterRoles, + dedicatedClusterManagerRoles, Version.CURRENT ); assertFalse(nodePredicate.test(node)); @@ -882,7 +882,7 @@ public void testGetNodePredicatesCombination() { "id", address, Collections.singletonMap("gateway", "false"), - dedicatedMasterRoles, + dedicatedClusterManagerRoles, Version.CURRENT ); assertFalse(nodePredicate.test(node)); From 1e6dd560dfa00a86b350d9615810fd85ab84fa38 Mon Sep 17 00:00:00 2001 From: Ankit Jain Date: Wed, 25 May 2022 22:09:25 -0700 Subject: [PATCH 03/34] Removing unused method from TransportSearchAction (#3437) * Removing unused method from TransportSearchAction Signed-off-by: Ankit Jain --- .../action/search/TransportSearchAction.java | 76 ------------------- 1 file changed, 76 deletions(-) diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java index 1ca477942cdf6..ebb0f21d6fe16 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java @@ -65,7 +65,6 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.common.util.concurrent.CountDown; import org.opensearch.index.Index; import org.opensearch.index.query.Rewriteable; @@ -298,81 +297,6 @@ void executeOnShardTarget( ); } - public void executeRequest( - Task task, - SearchRequest searchRequest, - String actionName, - boolean includeSearchContext, - SinglePhaseSearchAction phaseSearchAction, - ActionListener listener - ) { - executeRequest(task, searchRequest, new SearchAsyncActionProvider() { - @Override - public AbstractSearchAsyncAction asyncSearchAction( - SearchTask task, - SearchRequest searchRequest, - Executor executor, - GroupShardsIterator shardsIts, - SearchTimeProvider timeProvider, - BiFunction connectionLookup, - ClusterState clusterState, - Map aliasFilter, - Map concreteIndexBoosts, - Map> indexRoutings, - ActionListener listener, - boolean preFilter, - ThreadPool threadPool, - SearchResponse.Clusters clusters - ) { - return new AbstractSearchAsyncAction( - actionName, - logger, - searchTransportService, - connectionLookup, - aliasFilter, - concreteIndexBoosts, - indexRoutings, - executor, - searchRequest, - listener, - shardsIts, - timeProvider, - clusterState, - task, - new ArraySearchPhaseResults<>(shardsIts.size()), - searchRequest.getMaxConcurrentShardRequests(), - clusters - ) { - @Override - protected void executePhaseOnShard( - SearchShardIterator shardIt, - SearchShardTarget shard, - SearchActionListener listener - ) { - final Transport.Connection connection = getConnection(shard.getClusterAlias(), shard.getNodeId()); - phaseSearchAction.executeOnShardTarget(task, shard, connection, listener); - } - - @Override - protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) { - return new SearchPhase(getName()) { - @Override - public void run() { - final AtomicArray atomicArray = results.getAtomicArray(); - sendSearchResponse(InternalSearchResponse.empty(), atomicArray); - } - }; - } - - @Override - boolean buildPointInTimeFromSearchResults() { - return includeSearchContext; - } - }; - } - }, listener); - } - private void executeRequest( Task task, SearchRequest searchRequest, From e6475255dea2345807e79b44a9f03e23bfe2a5d7 Mon Sep 17 00:00:00 2001 From: vpehkone <101240162+vpehkone@users.noreply.github.com> Date: Thu, 26 May 2022 08:05:52 -0700 Subject: [PATCH 04/34] Set term vector flags to false for ._index_prefix field (#1901). (#3119) * Set term vector flags to false for ._index_prefix field (#1901). Signed-off-by: Vesa Pehkonen * Replaced the FieldType copy ctor with ctor for the prefix field and replaced setting the field type parameters with setIndexOptions(). (#1901) Signed-off-by: Vesa Pehkonen * Added tests for term vectors. (#1901) Signed-off-by: Vesa Pehkonen * Fixed code formatting error. Signed-off-by: Vesa Pehkonen Co-authored-by: sdp --- .../mapper/SearchAsYouTypeFieldMapper.java | 4 +-- .../SearchAsYouTypeFieldMapperTests.java | 27 ++++++++++++++----- 2 files changed, 23 insertions(+), 8 deletions(-) diff --git a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapper.java b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapper.java index 68b887c4c4a43..1b6aad0bda32a 100644 --- a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapper.java @@ -205,8 +205,8 @@ public SearchAsYouTypeFieldMapper build(Mapper.BuilderContext context) { ft.setIndexAnalyzer(analyzers.getIndexAnalyzer()); // set up the prefix field - FieldType prefixft = new FieldType(fieldType); - prefixft.setStoreTermVectors(false); + FieldType prefixft = new FieldType(); + prefixft.setIndexOptions(fieldType.indexOptions()); prefixft.setOmitNorms(true); prefixft.setStored(false); final String fullName = buildFullName(context); diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java index 786791314692d..7c4b8956d9e3c 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java @@ -352,15 +352,30 @@ public void testIndex() throws IOException { } public void testTermVectors() throws IOException { - DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", "search_as_you_type").field("term_vector", "yes"))); + String[] termVectors = { + "yes", + "with_positions", + "with_offsets", + "with_positions_offsets", + "with_positions_payloads", + "with_positions_offsets_payloads" }; + + for (String termVector : termVectors) { + DocumentMapper mapper = createDocumentMapper( + fieldMapping(b -> b.field("type", "search_as_you_type").field("term_vector", termVector)) + ); - assertTrue(getRootFieldMapper(mapper, "field").fieldType().fieldType.storeTermVectors()); + assertTrue(getRootFieldMapper(mapper, "field").fieldType().fieldType.storeTermVectors()); - Stream.of(getShingleFieldMapper(mapper, "field._2gram"), getShingleFieldMapper(mapper, "field._3gram")) - .forEach(m -> assertTrue("for " + m.name(), m.fieldType.storeTermVectors())); + Stream.of(getShingleFieldMapper(mapper, "field._2gram"), getShingleFieldMapper(mapper, "field._3gram")) + .forEach(m -> assertTrue("for " + m.name(), m.fieldType.storeTermVectors())); - PrefixFieldMapper prefixFieldMapper = getPrefixFieldMapper(mapper, "field._index_prefix"); - assertFalse(prefixFieldMapper.fieldType.storeTermVectors()); + PrefixFieldMapper prefixFieldMapper = getPrefixFieldMapper(mapper, "field._index_prefix"); + assertFalse(prefixFieldMapper.fieldType.storeTermVectors()); + assertFalse(prefixFieldMapper.fieldType.storeTermVectorOffsets()); + assertFalse(prefixFieldMapper.fieldType.storeTermVectorPositions()); + assertFalse(prefixFieldMapper.fieldType.storeTermVectorPayloads()); + } } public void testNorms() throws IOException { From 1b93cf8924d2f6edcd80f9a6b5e0f3217fa21434 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Fri, 27 May 2022 08:01:50 -0400 Subject: [PATCH 05/34] [BUG] Fixing org.opensearch.monitor.os.OsProbeTests > testLogWarnCpuMessageOnlyOnes when cgroups are available but cgroup stats is not (#3448) Signed-off-by: Andriy Redko --- .../java/org/opensearch/monitor/os/OsProbeTests.java | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/server/src/test/java/org/opensearch/monitor/os/OsProbeTests.java b/server/src/test/java/org/opensearch/monitor/os/OsProbeTests.java index 505dce8879bdd..575ab02bd6f07 100644 --- a/server/src/test/java/org/opensearch/monitor/os/OsProbeTests.java +++ b/server/src/test/java/org/opensearch/monitor/os/OsProbeTests.java @@ -32,6 +32,8 @@ package org.opensearch.monitor.os; +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.both; @@ -296,8 +298,12 @@ List readSysFsCgroupCpuAcctCpuStat(String controlGroup) throws IOExcepti } }; - assumeThat("CGroups are not available", noCpuStatsOsProbe.areCgroupStatsAvailable(), is(true)); - noCpuStatsOsProbe.osStats(); + assumeThat("CGroups are available", noCpuStatsOsProbe.areCgroupStatsAvailable(), is(true)); + OsStats osStats = noCpuStatsOsProbe.osStats(); + + // Depending on CGroups v1/v2, the cgroup stats may not be available + assumeThat("CGroup is available", osStats.getCgroup(), is(not(nullValue()))); + // no nr_throttled and throttled_time verify(logger, times(2)).warn(anyString()); reset(logger); From e9b19a095c2cf90274c41aa701432a2748c03468 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Fri, 27 May 2022 12:25:12 -0700 Subject: [PATCH 06/34] [Segment Replication] Add SegmentReplicationTargetService to orchestrate replication events. (#3439) * Add SegmentReplicationTargetService to orchestrate replication events. This change introduces boilerplate classes for Segment Replication and a target service to orchestrate replication events. It also includes two refactors of peer recovery components for reuse. 1. Rename RecoveryFileChunkRequest to FileChunkRequest and extract code to handle throttling into ReplicationTarget. 2. Extracts a component to execute retryable requests over the transport layer. Signed-off-by: Marc Handalian * Code cleanup. Signed-off-by: Marc Handalian * Make SegmentReplicationTargetService component final so that it can not be extended by plugins. Signed-off-by: Marc Handalian --- .../index/store/CorruptedFileIT.java | 8 +- .../org/opensearch/recovery/RelocationIT.java | 4 +- .../recovery/TruncatedRecoveryIT.java | 4 +- ...hunkRequest.java => FileChunkRequest.java} | 8 +- .../recovery/PeerRecoveryTargetService.java | 119 ++++-------- .../indices/recovery/RecoveryState.java | 1 + .../indices/recovery/RecoveryTarget.java | 3 +- .../recovery/RemoteRecoveryTargetHandler.java | 124 +++---------- .../recovery/RetryableTransportClient.java | 139 ++++++++++++++ .../replication/CheckpointInfoResponse.java | 79 ++++++++ .../replication/GetSegmentFilesResponse.java | 40 +++++ .../replication/SegmentReplicationSource.java | 50 ++++++ .../SegmentReplicationSourceFactory.java | 41 +++++ .../replication/SegmentReplicationState.java | 84 +++++++++ .../replication/SegmentReplicationTarget.java | 115 ++++++++++++ .../SegmentReplicationTargetService.java | 170 ++++++++++++++++++ .../common/ReplicationCollection.java | 2 +- .../replication/common/ReplicationState.java | 2 + .../replication/common/ReplicationTarget.java | 96 +++++++++- .../PeerRecoveryTargetServiceTests.java | 8 +- .../SegmentReplicationTargetServiceTests.java | 127 +++++++++++++ 21 files changed, 1017 insertions(+), 207 deletions(-) rename server/src/main/java/org/opensearch/indices/recovery/{RecoveryFileChunkRequest.java => FileChunkRequest.java} (95%) create mode 100644 server/src/main/java/org/opensearch/indices/recovery/RetryableTransportClient.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/CheckpointInfoResponse.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/GetSegmentFilesResponse.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java create mode 100644 server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java diff --git a/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java b/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java index 3a5e21fc8ef65..ee2067c591cef 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java @@ -77,7 +77,7 @@ import org.opensearch.index.shard.IndexShardState; import org.opensearch.index.shard.ShardId; import org.opensearch.indices.recovery.PeerRecoveryTargetService; -import org.opensearch.indices.recovery.RecoveryFileChunkRequest; +import org.opensearch.indices.recovery.FileChunkRequest; import org.opensearch.monitor.fs.FsInfo; import org.opensearch.plugins.Plugin; import org.opensearch.snapshots.SnapshotState; @@ -397,7 +397,7 @@ public void testCorruptionOnNetworkLayerFinalizingRecovery() throws ExecutionExc internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), (connection, requestId, action, request, options) -> { if (corrupt.get() && action.equals(PeerRecoveryTargetService.Actions.FILE_CHUNK)) { - RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request; + FileChunkRequest req = (FileChunkRequest) request; byte[] array = BytesRef.deepCopyOf(req.content().toBytesRef()).bytes; int i = randomIntBetween(0, req.content().length() - 1); array[i] = (byte) ~array[i]; // flip one byte in the content @@ -474,11 +474,11 @@ public void testCorruptionOnNetworkLayer() throws ExecutionException, Interrupte internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), (connection, requestId, action, request, options) -> { if (action.equals(PeerRecoveryTargetService.Actions.FILE_CHUNK)) { - RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request; + FileChunkRequest req = (FileChunkRequest) request; if (truncate && req.length() > 1) { BytesRef bytesRef = req.content().toBytesRef(); BytesArray array = new BytesArray(bytesRef.bytes, bytesRef.offset, (int) req.length() - 1); - request = new RecoveryFileChunkRequest( + request = new FileChunkRequest( req.recoveryId(), req.requestSeqNo(), req.shardId(), diff --git a/server/src/internalClusterTest/java/org/opensearch/recovery/RelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/recovery/RelocationIT.java index 06475f1e7ac9d..1f16cc0363686 100644 --- a/server/src/internalClusterTest/java/org/opensearch/recovery/RelocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/recovery/RelocationIT.java @@ -67,7 +67,7 @@ import org.opensearch.index.shard.IndexShardState; import org.opensearch.index.shard.ShardId; import org.opensearch.indices.recovery.PeerRecoveryTargetService; -import org.opensearch.indices.recovery.RecoveryFileChunkRequest; +import org.opensearch.indices.recovery.FileChunkRequest; import org.opensearch.plugins.Plugin; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; @@ -809,7 +809,7 @@ public void sendRequest( TransportRequestOptions options ) throws IOException { if (action.equals(PeerRecoveryTargetService.Actions.FILE_CHUNK)) { - RecoveryFileChunkRequest chunkRequest = (RecoveryFileChunkRequest) request; + FileChunkRequest chunkRequest = (FileChunkRequest) request; if (chunkRequest.name().startsWith(IndexFileNames.SEGMENTS)) { // corrupting the segments_N files in order to make sure future recovery re-send files logger.debug("corrupting [{}] to {}. file name: [{}]", action, connection.getNode(), chunkRequest.name()); diff --git a/server/src/internalClusterTest/java/org/opensearch/recovery/TruncatedRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/recovery/TruncatedRecoveryIT.java index 1708454faf7b3..b5d7bd476059d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/recovery/TruncatedRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/recovery/TruncatedRecoveryIT.java @@ -43,7 +43,7 @@ import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.index.query.QueryBuilders; import org.opensearch.indices.recovery.PeerRecoveryTargetService; -import org.opensearch.indices.recovery.RecoveryFileChunkRequest; +import org.opensearch.indices.recovery.FileChunkRequest; import org.opensearch.node.RecoverySettingsChunkSizePlugin; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; @@ -146,7 +146,7 @@ public void testCancelRecoveryAndResume() throws Exception { internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), (connection, requestId, action, request, options) -> { if (action.equals(PeerRecoveryTargetService.Actions.FILE_CHUNK)) { - RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request; + FileChunkRequest req = (FileChunkRequest) request; logger.info("file chunk [{}] lastChunk: {}", req, req.lastChunk()); if ((req.name().endsWith("cfs") || req.name().endsWith("fdt")) && req.lastChunk() && truncate.get()) { latch.countDown(); diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryFileChunkRequest.java b/server/src/main/java/org/opensearch/indices/recovery/FileChunkRequest.java similarity index 95% rename from server/src/main/java/org/opensearch/indices/recovery/RecoveryFileChunkRequest.java rename to server/src/main/java/org/opensearch/indices/recovery/FileChunkRequest.java index 886de8d56645c..3594495224481 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryFileChunkRequest.java +++ b/server/src/main/java/org/opensearch/indices/recovery/FileChunkRequest.java @@ -43,11 +43,11 @@ import java.io.IOException; /** - * Request for a recovery file chunk + * Request containing a file chunk. * * @opensearch.internal */ -public final class RecoveryFileChunkRequest extends RecoveryTransportRequest { +public final class FileChunkRequest extends RecoveryTransportRequest { private final boolean lastChunk; private final long recoveryId; private final ShardId shardId; @@ -58,7 +58,7 @@ public final class RecoveryFileChunkRequest extends RecoveryTransportRequest { private final int totalTranslogOps; - public RecoveryFileChunkRequest(StreamInput in) throws IOException { + public FileChunkRequest(StreamInput in) throws IOException { super(in); recoveryId = in.readLong(); shardId = new ShardId(in); @@ -75,7 +75,7 @@ public RecoveryFileChunkRequest(StreamInput in) throws IOException { sourceThrottleTimeInNanos = in.readLong(); } - public RecoveryFileChunkRequest( + public FileChunkRequest( long recoveryId, final long requestSeqNo, ShardId shardId, diff --git a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java index e13022afa81ba..85141556657f3 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java @@ -36,20 +36,17 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; -import org.apache.lucene.store.RateLimiter; -import org.opensearch.ExceptionsHelper; import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchTimeoutException; +import org.opensearch.ExceptionsHelper; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; -import org.opensearch.action.support.ChannelActionListener; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateObserver; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.CheckedFunction; import org.opensearch.common.Nullable; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.settings.Settings; @@ -60,7 +57,6 @@ import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.engine.RecoveryEngineException; import org.opensearch.index.mapper.MapperException; -import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.IllegalIndexShardStateException; import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.IndexShard; @@ -71,7 +67,6 @@ import org.opensearch.index.translog.TranslogCorruptedException; import org.opensearch.indices.replication.common.ReplicationCollection; import org.opensearch.indices.replication.common.ReplicationCollection.ReplicationRef; -import org.opensearch.indices.replication.common.ReplicationLuceneIndex; import org.opensearch.indices.replication.common.ReplicationTimer; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; @@ -148,7 +143,7 @@ public PeerRecoveryTargetService( transportService.registerRequestHandler( Actions.FILE_CHUNK, ThreadPool.Names.GENERIC, - RecoveryFileChunkRequest::new, + FileChunkRequest::new, new FileChunkTransportRequestHandler() ); transportService.registerRequestHandler( @@ -354,12 +349,13 @@ class PrepareForTranslogOperationsRequestHandler implements TransportRequestHand @Override public void messageReceived(RecoveryPrepareForTranslogOperationsRequest request, TransportChannel channel, Task task) { try (ReplicationRef recoveryRef = onGoingRecoveries.getSafe(request.recoveryId(), request.shardId())) { - final ActionListener listener = createOrFinishListener(recoveryRef, channel, Actions.PREPARE_TRANSLOG, request); + final RecoveryTarget recoveryTarget = recoveryRef.get(); + final ActionListener listener = recoveryTarget.createOrFinishListener(channel, Actions.PREPARE_TRANSLOG, request); if (listener == null) { return; } - recoveryRef.get().prepareForTranslogOperations(request.totalTranslogOps(), listener); + recoveryTarget.prepareForTranslogOperations(request.totalTranslogOps(), listener); } } } @@ -369,12 +365,13 @@ class FinalizeRecoveryRequestHandler implements TransportRequestHandler recoveryRef = onGoingRecoveries.getSafe(request.recoveryId(), request.shardId())) { - final ActionListener listener = createOrFinishListener(recoveryRef, channel, Actions.FINALIZE, request); + final RecoveryTarget recoveryTarget = recoveryRef.get(); + final ActionListener listener = recoveryTarget.createOrFinishListener(channel, Actions.FINALIZE, request); if (listener == null) { return; } - recoveryRef.get().finalizeRecovery(request.globalCheckpoint(), request.trimAboveSeqNo(), listener); + recoveryTarget.finalizeRecovery(request.globalCheckpoint(), request.trimAboveSeqNo(), listener); } } } @@ -399,8 +396,7 @@ public void messageReceived(final RecoveryTranslogOperationsRequest request, fin throws IOException { try (ReplicationRef recoveryRef = onGoingRecoveries.getSafe(request.recoveryId(), request.shardId())) { final RecoveryTarget recoveryTarget = recoveryRef.get(); - final ActionListener listener = createOrFinishListener( - recoveryRef, + final ActionListener listener = recoveryTarget.createOrFinishListener( channel, Actions.TRANSLOG_OPS, request, @@ -484,20 +480,20 @@ class FilesInfoRequestHandler implements TransportRequestHandler recoveryRef = onGoingRecoveries.getSafe(request.recoveryId(), request.shardId())) { - final ActionListener listener = createOrFinishListener(recoveryRef, channel, Actions.FILES_INFO, request); + final RecoveryTarget recoveryTarget = recoveryRef.get(); + final ActionListener listener = recoveryTarget.createOrFinishListener(channel, Actions.FILES_INFO, request); if (listener == null) { return; } - recoveryRef.get() - .receiveFileInfo( - request.phase1FileNames, - request.phase1FileSizes, - request.phase1ExistingFileNames, - request.phase1ExistingFileSizes, - request.totalTranslogOps, - listener - ); + recoveryTarget.receiveFileInfo( + request.phase1FileNames, + request.phase1FileSizes, + request.phase1ExistingFileNames, + request.phase1ExistingFileSizes, + request.totalTranslogOps, + listener + ); } } } @@ -507,90 +503,37 @@ class CleanFilesRequestHandler implements TransportRequestHandler recoveryRef = onGoingRecoveries.getSafe(request.recoveryId(), request.shardId())) { - final ActionListener listener = createOrFinishListener(recoveryRef, channel, Actions.CLEAN_FILES, request); + final RecoveryTarget recoveryTarget = recoveryRef.get(); + final ActionListener listener = recoveryTarget.createOrFinishListener(channel, Actions.CLEAN_FILES, request); if (listener == null) { return; } - recoveryRef.get() - .cleanFiles(request.totalTranslogOps(), request.getGlobalCheckpoint(), request.sourceMetaSnapshot(), listener); + recoveryTarget.cleanFiles( + request.totalTranslogOps(), + request.getGlobalCheckpoint(), + request.sourceMetaSnapshot(), + listener + ); } } } - class FileChunkTransportRequestHandler implements TransportRequestHandler { + class FileChunkTransportRequestHandler implements TransportRequestHandler { // How many bytes we've copied since we last called RateLimiter.pause final AtomicLong bytesSinceLastPause = new AtomicLong(); @Override - public void messageReceived(final RecoveryFileChunkRequest request, TransportChannel channel, Task task) throws Exception { + public void messageReceived(final FileChunkRequest request, TransportChannel channel, Task task) throws Exception { try (ReplicationRef recoveryRef = onGoingRecoveries.getSafe(request.recoveryId(), request.shardId())) { final RecoveryTarget recoveryTarget = recoveryRef.get(); - final ActionListener listener = createOrFinishListener(recoveryRef, channel, Actions.FILE_CHUNK, request); - if (listener == null) { - return; - } - - final ReplicationLuceneIndex indexState = recoveryTarget.state().getIndex(); - if (request.sourceThrottleTimeInNanos() != ReplicationLuceneIndex.UNKNOWN) { - indexState.addSourceThrottling(request.sourceThrottleTimeInNanos()); - } - - RateLimiter rateLimiter = recoverySettings.rateLimiter(); - if (rateLimiter != null) { - long bytes = bytesSinceLastPause.addAndGet(request.content().length()); - if (bytes > rateLimiter.getMinPauseCheckBytes()) { - // Time to pause - bytesSinceLastPause.addAndGet(-bytes); - long throttleTimeInNanos = rateLimiter.pause(bytes); - indexState.addTargetThrottling(throttleTimeInNanos); - recoveryTarget.indexShard().recoveryStats().addThrottleTime(throttleTimeInNanos); - } - } - recoveryTarget.writeFileChunk( - request.metadata(), - request.position(), - request.content(), - request.lastChunk(), - request.totalTranslogOps(), - listener - ); + final ActionListener listener = recoveryTarget.createOrFinishListener(channel, Actions.FILE_CHUNK, request); + recoveryTarget.handleFileChunk(request, recoveryTarget, bytesSinceLastPause, recoverySettings.rateLimiter(), listener); } } } - private ActionListener createOrFinishListener( - final ReplicationRef recoveryRef, - final TransportChannel channel, - final String action, - final RecoveryTransportRequest request - ) { - return createOrFinishListener(recoveryRef, channel, action, request, nullVal -> TransportResponse.Empty.INSTANCE); - } - - private ActionListener createOrFinishListener( - final ReplicationRef recoveryRef, - final TransportChannel channel, - final String action, - final RecoveryTransportRequest request, - final CheckedFunction responseFn - ) { - final RecoveryTarget recoveryTarget = recoveryRef.get(); - final ActionListener channelListener = new ChannelActionListener<>(channel, action, request); - final ActionListener voidListener = ActionListener.map(channelListener, responseFn); - - final long requestSeqNo = request.requestSeqNo(); - final ActionListener listener; - if (requestSeqNo != SequenceNumbers.UNASSIGNED_SEQ_NO) { - listener = recoveryTarget.markRequestReceivedAndCreateListener(requestSeqNo, voidListener); - } else { - listener = voidListener; - } - - return listener; - } - class RecoveryRunner extends AbstractRunnable { final long recoveryId; diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java index a3c7adb755145..57208ab029bf4 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java @@ -260,6 +260,7 @@ public Translog getTranslog() { return translog; } + @Override public ReplicationTimer getTimer() { return timer; } diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java index 92897ab19ad64..1735bb015c90c 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java @@ -37,6 +37,7 @@ import org.apache.lucene.index.IndexFormatTooOldException; import org.opensearch.Assertions; import org.opensearch.ExceptionsHelper; +import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.cluster.node.DiscoveryNode; @@ -141,7 +142,7 @@ public String description() { } @Override - public void notifyListener(Exception e, boolean sendShardFailure) { + public void notifyListener(OpenSearchException e, boolean sendShardFailure) { listener.onFailure(state(), new RecoveryFailedException(state(), e.getMessage(), e), sendShardFailure); } diff --git a/server/src/main/java/org/opensearch/indices/recovery/RemoteRecoveryTargetHandler.java b/server/src/main/java/org/opensearch/indices/recovery/RemoteRecoveryTargetHandler.java index fd6de6322bb0a..ab6466feb11f8 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RemoteRecoveryTargetHandler.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RemoteRecoveryTargetHandler.java @@ -35,38 +35,24 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.store.RateLimiter; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; -import org.opensearch.ExceptionsHelper; import org.opensearch.action.ActionListener; -import org.opensearch.action.ActionListenerResponseHandler; -import org.opensearch.action.support.RetryableAction; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.common.breaker.CircuitBreakingException; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.io.stream.Writeable; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.CancellableThreads; -import org.opensearch.common.util.concurrent.ConcurrentCollections; -import org.opensearch.common.util.concurrent.OpenSearchRejectedExecutionException; import org.opensearch.index.seqno.ReplicationTracker; import org.opensearch.index.seqno.RetentionLeases; import org.opensearch.index.shard.ShardId; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.index.translog.Translog; -import org.opensearch.threadpool.ThreadPool; -import org.opensearch.transport.ConnectTransportException; import org.opensearch.transport.EmptyTransportResponseHandler; -import org.opensearch.transport.RemoteTransportException; -import org.opensearch.transport.SendRequestTransportException; import org.opensearch.transport.TransportRequestOptions; import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportService; import java.io.IOException; import java.util.List; -import java.util.Map; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Consumer; @@ -80,12 +66,10 @@ public class RemoteRecoveryTargetHandler implements RecoveryTargetHandler { private static final Logger logger = LogManager.getLogger(RemoteRecoveryTargetHandler.class); private final TransportService transportService; - private final ThreadPool threadPool; private final long recoveryId; private final ShardId shardId; private final DiscoveryNode targetNode; private final RecoverySettings recoverySettings; - private final Map> onGoingRetryableActions = ConcurrentCollections.newConcurrentMap(); private final TransportRequestOptions translogOpsRequestOptions; private final TransportRequestOptions fileChunkRequestOptions; @@ -94,8 +78,7 @@ public class RemoteRecoveryTargetHandler implements RecoveryTargetHandler { private final AtomicLong requestSeqNoGenerator = new AtomicLong(0); private final Consumer onSourceThrottle; - private final boolean retriesSupported; - private volatile boolean isCancelled = false; + private final RetryableTransportClient retryableTransportClient; public RemoteRecoveryTargetHandler( long recoveryId, @@ -106,7 +89,15 @@ public RemoteRecoveryTargetHandler( Consumer onSourceThrottle ) { this.transportService = transportService; - this.threadPool = transportService.getThreadPool(); + // It is safe to pass the retry timeout value here because RemoteRecoveryTargetHandler + // created per recovery. Any change to RecoverySettings will be applied on the next + // recovery. + this.retryableTransportClient = new RetryableTransportClient( + transportService, + targetNode, + recoverySettings.internalActionRetryTimeout(), + logger + ); this.recoveryId = recoveryId; this.shardId = shardId; this.targetNode = targetNode; @@ -120,7 +111,6 @@ public RemoteRecoveryTargetHandler( .withType(TransportRequestOptions.Type.RECOVERY) .withTimeout(recoverySettings.internalActionTimeout()) .build(); - this.retriesSupported = targetNode.getVersion().onOrAfter(LegacyESVersion.V_7_9_0); } public DiscoveryNode targetNode() { @@ -137,12 +127,9 @@ public void prepareForTranslogOperations(int totalTranslogOps, ActionListener reader = in -> TransportResponse.Empty.INSTANCE; final ActionListener responseListener = ActionListener.map(listener, r -> null); - executeRetryableAction(action, request, options, responseListener, reader); + retryableTransportClient.executeRetryableAction(action, request, responseListener, reader); } @Override @@ -156,12 +143,9 @@ public void finalizeRecovery(final long globalCheckpoint, final long trimAboveSe globalCheckpoint, trimAboveSeqNo ); - final TransportRequestOptions options = TransportRequestOptions.builder() - .withTimeout(recoverySettings.internalActionLongTimeout()) - .build(); final Writeable.Reader reader = in -> TransportResponse.Empty.INSTANCE; final ActionListener responseListener = ActionListener.map(listener, r -> null); - executeRetryableAction(action, request, options, responseListener, reader); + retryableTransportClient.executeRetryableAction(action, request, responseListener, reader); } @Override @@ -200,7 +184,7 @@ public void indexTranslogOperations( ); final Writeable.Reader reader = RecoveryTranslogOperationsResponse::new; final ActionListener responseListener = ActionListener.map(listener, r -> r.localCheckpoint); - executeRetryableAction(action, request, translogOpsRequestOptions, responseListener, reader); + retryableTransportClient.executeRetryableAction(action, request, translogOpsRequestOptions, responseListener, reader); } @Override @@ -224,12 +208,9 @@ public void receiveFileInfo( phase1ExistingFileSizes, totalTranslogOps ); - final TransportRequestOptions options = TransportRequestOptions.builder() - .withTimeout(recoverySettings.internalActionTimeout()) - .build(); final Writeable.Reader reader = in -> TransportResponse.Empty.INSTANCE; final ActionListener responseListener = ActionListener.map(listener, r -> null); - executeRetryableAction(action, request, options, responseListener, reader); + retryableTransportClient.executeRetryableAction(action, request, responseListener, reader); } @Override @@ -249,12 +230,9 @@ public void cleanFiles( totalTranslogOps, globalCheckpoint ); - final TransportRequestOptions options = TransportRequestOptions.builder() - .withTimeout(recoverySettings.internalActionTimeout()) - .build(); final Writeable.Reader reader = in -> TransportResponse.Empty.INSTANCE; final ActionListener responseListener = ActionListener.map(listener, r -> null); - executeRetryableAction(action, request, options, responseListener, reader); + retryableTransportClient.executeRetryableAction(action, request, responseListener, reader); } @Override @@ -294,7 +272,7 @@ public void writeFileChunk( * see how many translog ops we accumulate while copying files across the network. A future optimization * would be in to restart file copy again (new deltas) if we have too many translog ops are piling up. */ - final RecoveryFileChunkRequest request = new RecoveryFileChunkRequest( + final FileChunkRequest request = new FileChunkRequest( recoveryId, requestSeqNo, shardId, @@ -306,71 +284,17 @@ public void writeFileChunk( throttleTimeInNanos ); final Writeable.Reader reader = in -> TransportResponse.Empty.INSTANCE; - executeRetryableAction(action, request, fileChunkRequestOptions, ActionListener.map(listener, r -> null), reader); + retryableTransportClient.executeRetryableAction( + action, + request, + fileChunkRequestOptions, + ActionListener.map(listener, r -> null), + reader + ); } @Override public void cancel() { - isCancelled = true; - if (onGoingRetryableActions.isEmpty()) { - return; - } - final RuntimeException exception = new CancellableThreads.ExecutionCancelledException("recovery was cancelled"); - // Dispatch to generic as cancellation calls can come on the cluster state applier thread - threadPool.generic().execute(() -> { - for (RetryableAction action : onGoingRetryableActions.values()) { - action.cancel(exception); - } - onGoingRetryableActions.clear(); - }); - } - - private void executeRetryableAction( - String action, - RecoveryTransportRequest request, - TransportRequestOptions options, - ActionListener actionListener, - Writeable.Reader reader - ) { - final Object key = new Object(); - final ActionListener removeListener = ActionListener.runBefore(actionListener, () -> onGoingRetryableActions.remove(key)); - final TimeValue initialDelay = TimeValue.timeValueMillis(200); - final TimeValue timeout = recoverySettings.internalActionRetryTimeout(); - final RetryableAction retryableAction = new RetryableAction(logger, threadPool, initialDelay, timeout, removeListener) { - - @Override - public void tryAction(ActionListener listener) { - transportService.sendRequest( - targetNode, - action, - request, - options, - new ActionListenerResponseHandler<>(listener, reader, ThreadPool.Names.GENERIC) - ); - } - - @Override - public boolean shouldRetry(Exception e) { - return retriesSupported && retryableException(e); - } - }; - onGoingRetryableActions.put(key, retryableAction); - retryableAction.run(); - if (isCancelled) { - retryableAction.cancel(new CancellableThreads.ExecutionCancelledException("recovery was cancelled")); - } - } - - private static boolean retryableException(Exception e) { - if (e instanceof ConnectTransportException) { - return true; - } else if (e instanceof SendRequestTransportException) { - final Throwable cause = ExceptionsHelper.unwrapCause(e); - return cause instanceof ConnectTransportException; - } else if (e instanceof RemoteTransportException) { - final Throwable cause = ExceptionsHelper.unwrapCause(e); - return cause instanceof CircuitBreakingException || cause instanceof OpenSearchRejectedExecutionException; - } - return false; + retryableTransportClient.cancel(); } } diff --git a/server/src/main/java/org/opensearch/indices/recovery/RetryableTransportClient.java b/server/src/main/java/org/opensearch/indices/recovery/RetryableTransportClient.java new file mode 100644 index 0000000000000..bc10cc80b7fdc --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/recovery/RetryableTransportClient.java @@ -0,0 +1,139 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.recovery; + +import org.apache.logging.log4j.Logger; +import org.opensearch.ExceptionsHelper; +import org.opensearch.LegacyESVersion; +import org.opensearch.action.ActionListener; +import org.opensearch.action.ActionListenerResponseHandler; +import org.opensearch.action.support.RetryableAction; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.breaker.CircuitBreakingException; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.CancellableThreads; +import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.common.util.concurrent.OpenSearchRejectedExecutionException; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.ConnectTransportException; +import org.opensearch.transport.RemoteTransportException; +import org.opensearch.transport.SendRequestTransportException; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportRequestOptions; +import org.opensearch.transport.TransportResponse; +import org.opensearch.transport.TransportService; + +import java.util.Map; + +/** + * Client that implements retry functionality for transport layer requests. + * + * @opensearch.internal + */ +public final class RetryableTransportClient { + + private final ThreadPool threadPool; + private final Map> onGoingRetryableActions = ConcurrentCollections.newConcurrentMap(); + private volatile boolean isCancelled = false; + private final TransportService transportService; + private final TimeValue retryTimeout; + private final DiscoveryNode targetNode; + + private final Logger logger; + + public RetryableTransportClient(TransportService transportService, DiscoveryNode targetNode, TimeValue retryTimeout, Logger logger) { + this.threadPool = transportService.getThreadPool(); + this.transportService = transportService; + this.retryTimeout = retryTimeout; + this.targetNode = targetNode; + this.logger = logger; + } + + /** + * Execute a retryable action. + * @param action {@link String} Action Name. + * @param request {@link TransportRequest} Transport request to execute. + * @param actionListener {@link ActionListener} Listener to complete + * @param reader {@link Writeable.Reader} Reader to read the response stream. + * @param {@link TransportResponse} type. + */ + public void executeRetryableAction( + String action, + TransportRequest request, + ActionListener actionListener, + Writeable.Reader reader + ) { + final TransportRequestOptions options = TransportRequestOptions.builder().withTimeout(retryTimeout).build(); + executeRetryableAction(action, request, options, actionListener, reader); + } + + void executeRetryableAction( + String action, + TransportRequest request, + TransportRequestOptions options, + ActionListener actionListener, + Writeable.Reader reader + ) { + final Object key = new Object(); + final ActionListener removeListener = ActionListener.runBefore(actionListener, () -> onGoingRetryableActions.remove(key)); + final TimeValue initialDelay = TimeValue.timeValueMillis(200); + final RetryableAction retryableAction = new RetryableAction(logger, threadPool, initialDelay, retryTimeout, removeListener) { + + @Override + public void tryAction(ActionListener listener) { + transportService.sendRequest( + targetNode, + action, + request, + options, + new ActionListenerResponseHandler<>(listener, reader, ThreadPool.Names.GENERIC) + ); + } + + @Override + public boolean shouldRetry(Exception e) { + return targetNode.getVersion().onOrAfter(LegacyESVersion.V_7_9_0) && retryableException(e); + } + }; + onGoingRetryableActions.put(key, retryableAction); + retryableAction.run(); + if (isCancelled) { + retryableAction.cancel(new CancellableThreads.ExecutionCancelledException("retryable action was cancelled")); + } + } + + public void cancel() { + isCancelled = true; + if (onGoingRetryableActions.isEmpty()) { + return; + } + final RuntimeException exception = new CancellableThreads.ExecutionCancelledException("retryable action was cancelled"); + // Dispatch to generic as cancellation calls can come on the cluster state applier thread + threadPool.generic().execute(() -> { + for (RetryableAction action : onGoingRetryableActions.values()) { + action.cancel(exception); + } + onGoingRetryableActions.clear(); + }); + } + + private static boolean retryableException(Exception e) { + if (e instanceof ConnectTransportException) { + return true; + } else if (e instanceof SendRequestTransportException) { + final Throwable cause = ExceptionsHelper.unwrapCause(e); + return cause instanceof ConnectTransportException; + } else if (e instanceof RemoteTransportException) { + final Throwable cause = ExceptionsHelper.unwrapCause(e); + return cause instanceof CircuitBreakingException || cause instanceof OpenSearchRejectedExecutionException; + } + return false; + } +} diff --git a/server/src/main/java/org/opensearch/indices/replication/CheckpointInfoResponse.java b/server/src/main/java/org/opensearch/indices/replication/CheckpointInfoResponse.java new file mode 100644 index 0000000000000..a73a3b54184da --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/CheckpointInfoResponse.java @@ -0,0 +1,79 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.index.store.Store; +import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.transport.TransportResponse; + +import java.io.IOException; +import java.util.Set; + +/** + * Response returned from a {@link SegmentReplicationSource} that includes the file metadata, and SegmentInfos + * associated with a particular {@link ReplicationCheckpoint}. The {@link SegmentReplicationSource} may determine that + * the requested {@link ReplicationCheckpoint} is behind and return a different {@link ReplicationCheckpoint} in this response. + * + * @opensearch.internal + */ +public class CheckpointInfoResponse extends TransportResponse { + + private final ReplicationCheckpoint checkpoint; + private final Store.MetadataSnapshot snapshot; + private final byte[] infosBytes; + // pendingDeleteFiles are segments that have been merged away in the latest in memory SegmentInfos + // but are still referenced by the latest commit point (Segments_N). + private final Set pendingDeleteFiles; + + public CheckpointInfoResponse( + final ReplicationCheckpoint checkpoint, + final Store.MetadataSnapshot snapshot, + final byte[] infosBytes, + final Set additionalFiles + ) { + this.checkpoint = checkpoint; + this.snapshot = snapshot; + this.infosBytes = infosBytes; + this.pendingDeleteFiles = additionalFiles; + } + + public CheckpointInfoResponse(StreamInput in) throws IOException { + this.checkpoint = new ReplicationCheckpoint(in); + this.snapshot = new Store.MetadataSnapshot(in); + this.infosBytes = in.readByteArray(); + this.pendingDeleteFiles = in.readSet(StoreFileMetadata::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + checkpoint.writeTo(out); + snapshot.writeTo(out); + out.writeByteArray(infosBytes); + out.writeCollection(pendingDeleteFiles); + } + + public ReplicationCheckpoint getCheckpoint() { + return checkpoint; + } + + public Store.MetadataSnapshot getSnapshot() { + return snapshot; + } + + public byte[] getInfosBytes() { + return infosBytes; + } + + public Set getPendingDeleteFiles() { + return pendingDeleteFiles; + } +} diff --git a/server/src/main/java/org/opensearch/indices/replication/GetSegmentFilesResponse.java b/server/src/main/java/org/opensearch/indices/replication/GetSegmentFilesResponse.java new file mode 100644 index 0000000000000..6dc7e293b2c31 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/GetSegmentFilesResponse.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.transport.TransportResponse; + +import java.io.IOException; +import java.util.List; + +/** + * Response from a {@link SegmentReplicationSource} indicating that a replication event has completed. + * + * @opensearch.internal + */ +public class GetSegmentFilesResponse extends TransportResponse { + + List files; + + public GetSegmentFilesResponse(List files) { + this.files = files; + } + + public GetSegmentFilesResponse(StreamInput out) throws IOException { + out.readList(StoreFileMetadata::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeCollection(files); + } +} diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java new file mode 100644 index 0000000000000..8628a266ea7d0 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.opensearch.action.ActionListener; +import org.opensearch.index.store.Store; +import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; + +import java.util.List; + +/** + * Represents the source of a replication event. + * + * @opensearch.internal + */ +public interface SegmentReplicationSource { + + /** + * Get Metadata for a ReplicationCheckpoint. + * + * @param replicationId {@link long} - ID of the replication event. + * @param checkpoint {@link ReplicationCheckpoint} Checkpoint to fetch metadata for. + * @param listener {@link ActionListener} listener that completes with a {@link CheckpointInfoResponse}. + */ + void getCheckpointMetadata(long replicationId, ReplicationCheckpoint checkpoint, ActionListener listener); + + /** + * Fetch the requested segment files. Passes a listener that completes when files are stored locally. + * + * @param replicationId {@link long} - ID of the replication event. + * @param checkpoint {@link ReplicationCheckpoint} Checkpoint to fetch metadata for. + * @param filesToFetch {@link List} List of files to fetch. + * @param store {@link Store} Reference to the local store. + * @param listener {@link ActionListener} Listener that completes with the list of files copied. + */ + void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List filesToFetch, + Store store, + ActionListener listener + ); +} diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java new file mode 100644 index 0000000000000..3ca31503f176d --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java @@ -0,0 +1,41 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.transport.TransportService; + +/** + * Factory to build {@link SegmentReplicationSource} used by {@link SegmentReplicationTargetService}. + * + * @opensearch.internal + */ +public class SegmentReplicationSourceFactory { + + private TransportService transportService; + private RecoverySettings recoverySettings; + private ClusterService clusterService; + + public SegmentReplicationSourceFactory( + TransportService transportService, + RecoverySettings recoverySettings, + ClusterService clusterService + ) { + this.transportService = transportService; + this.recoverySettings = recoverySettings; + this.clusterService = clusterService; + } + + public SegmentReplicationSource get(IndexShard shard) { + // TODO: Default to an implementation that uses the primary shard. + return null; + } +} diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java new file mode 100644 index 0000000000000..b01016d2a1e62 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java @@ -0,0 +1,84 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.opensearch.indices.replication.common.ReplicationLuceneIndex; +import org.opensearch.indices.replication.common.ReplicationState; +import org.opensearch.indices.replication.common.ReplicationTimer; + +/** + * ReplicationState implementation to track Segment Replication events. + * + * @opensearch.internal + */ +public class SegmentReplicationState implements ReplicationState { + + /** + * The stage of the recovery state + * + * @opensearch.internal + */ + public enum Stage { + DONE((byte) 0), + + INIT((byte) 1); + + private static final Stage[] STAGES = new Stage[Stage.values().length]; + + static { + for (Stage stage : Stage.values()) { + assert stage.id() < STAGES.length && stage.id() >= 0; + STAGES[stage.id] = stage; + } + } + + private final byte id; + + Stage(byte id) { + this.id = id; + } + + public byte id() { + return id; + } + + public static Stage fromId(byte id) { + if (id < 0 || id >= STAGES.length) { + throw new IllegalArgumentException("No mapping for id [" + id + "]"); + } + return STAGES[id]; + } + } + + public SegmentReplicationState() { + this.stage = Stage.INIT; + } + + private Stage stage; + + @Override + public ReplicationLuceneIndex getIndex() { + // TODO + return null; + } + + @Override + public ReplicationTimer getTimer() { + // TODO + return null; + } + + public Stage getStage() { + return stage; + } + + public void setStage(Stage stage) { + this.stage = stage; + } +} diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java new file mode 100644 index 0000000000000..7933ea5f0344b --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java @@ -0,0 +1,115 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.opensearch.OpenSearchException; +import org.opensearch.action.ActionListener; +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.util.CancellableThreads; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.indices.replication.common.ReplicationLuceneIndex; +import org.opensearch.indices.replication.common.ReplicationState; +import org.opensearch.indices.replication.common.ReplicationTarget; + +import java.io.IOException; + +/** + * Represents the target of a replication event. + * + * @opensearch.internal + */ +public class SegmentReplicationTarget extends ReplicationTarget { + + private final ReplicationCheckpoint checkpoint; + private final SegmentReplicationSource source; + private final SegmentReplicationState state; + + public SegmentReplicationTarget( + ReplicationCheckpoint checkpoint, + IndexShard indexShard, + SegmentReplicationSource source, + SegmentReplicationTargetService.SegmentReplicationListener listener + ) { + super("replication_target", indexShard, new ReplicationLuceneIndex(), listener); + this.checkpoint = checkpoint; + this.source = source; + this.state = new SegmentReplicationState(); + } + + @Override + protected void closeInternal() { + // TODO + } + + @Override + protected String getPrefix() { + // TODO + return null; + } + + @Override + protected void onDone() { + this.state.setStage(SegmentReplicationState.Stage.DONE); + } + + @Override + protected void onCancel(String reason) { + // TODO + } + + @Override + public ReplicationState state() { + return state; + } + + @Override + public ReplicationTarget retryCopy() { + // TODO + return null; + } + + @Override + public String description() { + // TODO + return null; + } + + @Override + public void notifyListener(OpenSearchException e, boolean sendShardFailure) { + listener.onFailure(state(), e, sendShardFailure); + } + + @Override + public boolean reset(CancellableThreads newTargetCancellableThreads) throws IOException { + // TODO + return false; + } + + @Override + public void writeFileChunk( + StoreFileMetadata metadata, + long position, + BytesReference content, + boolean lastChunk, + int totalTranslogOps, + ActionListener listener + ) { + // TODO + } + + /** + * Start the Replication event. + * @param listener {@link ActionListener} listener. + */ + public void startReplication(ActionListener listener) { + // TODO + } +} diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java new file mode 100644 index 0000000000000..1c6053a72a4c5 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java @@ -0,0 +1,170 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.OpenSearchException; +import org.opensearch.action.ActionListener; +import org.opensearch.common.Nullable; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.shard.IndexEventListener; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.ShardId; +import org.opensearch.indices.recovery.FileChunkRequest; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.indices.replication.common.ReplicationCollection; +import org.opensearch.indices.replication.common.ReplicationCollection.ReplicationRef; +import org.opensearch.indices.replication.common.ReplicationListener; +import org.opensearch.indices.replication.common.ReplicationState; +import org.opensearch.tasks.Task; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportChannel; +import org.opensearch.transport.TransportRequestHandler; +import org.opensearch.transport.TransportService; + +import java.util.concurrent.atomic.AtomicLong; + +/** + * Service class that orchestrates replication events on replicas. + * + * @opensearch.internal + */ +public final class SegmentReplicationTargetService implements IndexEventListener { + + private static final Logger logger = LogManager.getLogger(SegmentReplicationTargetService.class); + + private final ThreadPool threadPool; + private final RecoverySettings recoverySettings; + + private final ReplicationCollection onGoingReplications; + + private final SegmentReplicationSourceFactory sourceFactory; + + /** + * The internal actions + * + * @opensearch.internal + */ + public static class Actions { + public static final String FILE_CHUNK = "internal:index/shard/replication/file_chunk"; + } + + public SegmentReplicationTargetService( + final ThreadPool threadPool, + final RecoverySettings recoverySettings, + final TransportService transportService, + final SegmentReplicationSourceFactory sourceFactory + ) { + this.threadPool = threadPool; + this.recoverySettings = recoverySettings; + this.onGoingReplications = new ReplicationCollection<>(logger, threadPool); + this.sourceFactory = sourceFactory; + + transportService.registerRequestHandler( + Actions.FILE_CHUNK, + ThreadPool.Names.GENERIC, + FileChunkRequest::new, + new FileChunkTransportRequestHandler() + ); + } + + @Override + public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, Settings indexSettings) { + if (indexShard != null) { + onGoingReplications.cancelForShard(shardId, "shard closed"); + } + } + + public void startReplication( + final ReplicationCheckpoint checkpoint, + final IndexShard indexShard, + final SegmentReplicationListener listener + ) { + startReplication(new SegmentReplicationTarget(checkpoint, indexShard, sourceFactory.get(indexShard), listener)); + } + + public void startReplication(final SegmentReplicationTarget target) { + final long replicationId = onGoingReplications.start(target, recoverySettings.activityTimeout()); + logger.trace(() -> new ParameterizedMessage("Starting replication {}", replicationId)); + threadPool.generic().execute(new ReplicationRunner(replicationId)); + } + + /** + * Listener that runs on changes in Replication state + * + * @opensearch.internal + */ + public interface SegmentReplicationListener extends ReplicationListener { + + @Override + default void onDone(ReplicationState state) { + onReplicationDone((SegmentReplicationState) state); + } + + @Override + default void onFailure(ReplicationState state, OpenSearchException e, boolean sendShardFailure) { + onReplicationFailure((SegmentReplicationState) state, e, sendShardFailure); + } + + void onReplicationDone(SegmentReplicationState state); + + void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure); + } + + /** + * Runnable implementation to trigger a replication event. + */ + private class ReplicationRunner implements Runnable { + + final long replicationId; + + public ReplicationRunner(long replicationId) { + this.replicationId = replicationId; + } + + @Override + public void run() { + start(replicationId); + } + } + + private void start(final long replicationId) { + try (ReplicationRef replicationRef = onGoingReplications.get(replicationId)) { + replicationRef.get().startReplication(new ActionListener<>() { + @Override + public void onResponse(Void o) { + onGoingReplications.markAsDone(replicationId); + } + + @Override + public void onFailure(Exception e) { + onGoingReplications.fail(replicationId, new OpenSearchException("Segment Replication failed", e), true); + } + }); + } + } + + private class FileChunkTransportRequestHandler implements TransportRequestHandler { + + // How many bytes we've copied since we last called RateLimiter.pause + final AtomicLong bytesSinceLastPause = new AtomicLong(); + + @Override + public void messageReceived(final FileChunkRequest request, TransportChannel channel, Task task) throws Exception { + try (ReplicationRef ref = onGoingReplications.getSafe(request.recoveryId(), request.shardId())) { + final SegmentReplicationTarget target = ref.get(); + final ActionListener listener = target.createOrFinishListener(channel, Actions.FILE_CHUNK, request); + target.handleFileChunk(request, target, bytesSinceLastPause, recoverySettings.rateLimiter(), listener); + } + } + } +} diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java index 609825eb5227b..b8295f0685a7f 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java @@ -133,7 +133,7 @@ public T reset(final long id, final TimeValue activityTimeout) { } catch (Exception e) { // fail shard to be safe assert oldTarget != null; - oldTarget.notifyListener(e, true); + oldTarget.notifyListener(new OpenSearchException("Unable to reset target", e), true); return null; } } diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationState.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationState.java index 7942fa8938dd0..029fcb6a3b690 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationState.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationState.java @@ -14,5 +14,7 @@ * @opensearch.internal */ public interface ReplicationState { + ReplicationLuceneIndex getIndex(); + ReplicationTimer getTimer(); } diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java index 0192270907fd2..f8dc07f122c02 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java @@ -9,14 +9,25 @@ package org.opensearch.indices.replication.common; import org.apache.logging.log4j.Logger; +import org.apache.lucene.store.RateLimiter; import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; +import org.opensearch.action.support.ChannelActionListener; +import org.opensearch.common.CheckedFunction; +import org.opensearch.common.Nullable; +import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.logging.Loggers; import org.opensearch.common.util.CancellableThreads; import org.opensearch.common.util.concurrent.AbstractRefCounted; +import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ShardId; +import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.indices.recovery.FileChunkRequest; +import org.opensearch.indices.recovery.RecoveryTransportRequest; +import org.opensearch.transport.TransportChannel; +import org.opensearch.transport.TransportResponse; import java.io.IOException; import java.util.concurrent.atomic.AtomicBoolean; @@ -64,7 +75,7 @@ public CancellableThreads cancellableThreads() { return cancellableThreads; } - public abstract void notifyListener(Exception e, boolean sendShardFailure); + public abstract void notifyListener(OpenSearchException e, boolean sendShardFailure); public ReplicationTarget(String name, IndexShard indexShard, ReplicationLuceneIndex stateIndex, ReplicationListener listener) { super(name); @@ -98,6 +109,7 @@ public void setLastAccessTime() { lastAccessTime = System.nanoTime(); } + @Nullable public ActionListener markRequestReceivedAndCreateListener(long requestSeqNo, ActionListener listener) { return requestTracker.markReceivedAndCreateListener(requestSeqNo, listener); } @@ -172,4 +184,86 @@ protected void ensureRefCount() { } } + @Nullable + public ActionListener createOrFinishListener( + final TransportChannel channel, + final String action, + final RecoveryTransportRequest request + ) { + return createOrFinishListener(channel, action, request, nullVal -> TransportResponse.Empty.INSTANCE); + } + + @Nullable + public ActionListener createOrFinishListener( + final TransportChannel channel, + final String action, + final RecoveryTransportRequest request, + final CheckedFunction responseFn + ) { + final ActionListener channelListener = new ChannelActionListener<>(channel, action, request); + final ActionListener voidListener = ActionListener.map(channelListener, responseFn); + + final long requestSeqNo = request.requestSeqNo(); + final ActionListener listener; + if (requestSeqNo != SequenceNumbers.UNASSIGNED_SEQ_NO) { + listener = markRequestReceivedAndCreateListener(requestSeqNo, voidListener); + } else { + listener = voidListener; + } + + return listener; + } + + /** + * Handle a FileChunkRequest for a {@link ReplicationTarget}. + * + * @param request {@link FileChunkRequest} Request containing the file chunk. + * @param bytesSinceLastPause {@link AtomicLong} Bytes since the last pause. + * @param rateLimiter {@link RateLimiter} Rate limiter. + * @param listener {@link ActionListener} listener that completes when the chunk has been written. + * @throws IOException When there is an issue pausing the rate limiter. + */ + public void handleFileChunk( + final FileChunkRequest request, + final ReplicationTarget replicationTarget, + final AtomicLong bytesSinceLastPause, + final RateLimiter rateLimiter, + final ActionListener listener + ) throws IOException { + + if (listener == null) { + return; + } + final ReplicationLuceneIndex indexState = replicationTarget.state().getIndex(); + if (request.sourceThrottleTimeInNanos() != ReplicationLuceneIndex.UNKNOWN) { + indexState.addSourceThrottling(request.sourceThrottleTimeInNanos()); + } + if (rateLimiter != null) { + long bytes = bytesSinceLastPause.addAndGet(request.content().length()); + if (bytes > rateLimiter.getMinPauseCheckBytes()) { + // Time to pause + bytesSinceLastPause.addAndGet(-bytes); + long throttleTimeInNanos = rateLimiter.pause(bytes); + indexState.addTargetThrottling(throttleTimeInNanos); + replicationTarget.indexShard().recoveryStats().addThrottleTime(throttleTimeInNanos); + } + } + writeFileChunk( + request.metadata(), + request.position(), + request.content(), + request.lastChunk(), + request.totalTranslogOps(), + listener + ); + } + + public abstract void writeFileChunk( + StoreFileMetadata metadata, + long position, + BytesReference content, + boolean lastChunk, + int totalTranslogOps, + ActionListener listener + ); } diff --git a/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java index e54f06937cad3..bda2a910d922e 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java @@ -105,7 +105,7 @@ public void testWriteFileChunksConcurrently() throws Exception { receiveFileInfoFuture ); receiveFileInfoFuture.actionGet(); - List requests = new ArrayList<>(); + List requests = new ArrayList<>(); long seqNo = 0; for (StoreFileMetadata md : mdFiles) { try (IndexInput in = sourceShard.store().directory().openInput(md.name(), IOContext.READONCE)) { @@ -115,7 +115,7 @@ public void testWriteFileChunksConcurrently() throws Exception { byte[] buffer = new byte[length]; in.readBytes(buffer, 0, length); requests.add( - new RecoveryFileChunkRequest( + new FileChunkRequest( 0, seqNo++, sourceShard.shardId(), @@ -132,7 +132,7 @@ public void testWriteFileChunksConcurrently() throws Exception { } } Randomness.shuffle(requests); - BlockingQueue queue = new ArrayBlockingQueue<>(requests.size()); + BlockingQueue queue = new ArrayBlockingQueue<>(requests.size()); queue.addAll(requests); Thread[] senders = new Thread[between(1, 4)]; CyclicBarrier barrier = new CyclicBarrier(senders.length); @@ -140,7 +140,7 @@ public void testWriteFileChunksConcurrently() throws Exception { senders[i] = new Thread(() -> { try { barrier.await(); - RecoveryFileChunkRequest r; + FileChunkRequest r; while ((r = queue.poll()) != null) { recoveryTarget.writeFileChunk( r.metadata(), diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java new file mode 100644 index 0000000000000..aa17dec5767da --- /dev/null +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -0,0 +1,127 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.junit.Assert; +import org.mockito.Mockito; +import org.opensearch.OpenSearchException; +import org.opensearch.action.ActionListener; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.IndexShardTestCase; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.transport.TransportService; + +import java.io.IOException; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.*; + +public class SegmentReplicationTargetServiceTests extends IndexShardTestCase { + + private IndexShard indexShard; + private ReplicationCheckpoint checkpoint; + private SegmentReplicationSource replicationSource; + private SegmentReplicationTargetService sut; + + @Override + public void setUp() throws Exception { + super.setUp(); + final Settings settings = Settings.builder().put("node.name", SegmentReplicationTargetServiceTests.class.getSimpleName()).build(); + final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); + final TransportService transportService = mock(TransportService.class); + indexShard = newShard(false, settings); + checkpoint = new ReplicationCheckpoint(indexShard.shardId(), 0L, 0L, 0L, 0L); + SegmentReplicationSourceFactory replicationSourceFactory = mock(SegmentReplicationSourceFactory.class); + replicationSource = mock(SegmentReplicationSource.class); + when(replicationSourceFactory.get(indexShard)).thenReturn(replicationSource); + + sut = new SegmentReplicationTargetService(threadPool, recoverySettings, transportService, replicationSourceFactory); + } + + @Override + public void tearDown() throws Exception { + closeShards(indexShard); + super.tearDown(); + } + + public void testTargetReturnsSuccess_listenerCompletes() throws IOException { + final SegmentReplicationTarget target = new SegmentReplicationTarget( + checkpoint, + indexShard, + replicationSource, + new SegmentReplicationTargetService.SegmentReplicationListener() { + @Override + public void onReplicationDone(SegmentReplicationState state) { + assertEquals(SegmentReplicationState.Stage.DONE, state.getStage()); + } + + @Override + public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { + Assert.fail(); + } + } + ); + final SegmentReplicationTarget spy = Mockito.spy(target); + doAnswer(invocation -> { + final ActionListener listener = invocation.getArgument(0); + listener.onResponse(null); + return null; + }).when(spy).startReplication(any()); + sut.startReplication(spy); + closeShards(indexShard); + } + + public void testTargetThrowsException() throws IOException { + final OpenSearchException expectedError = new OpenSearchException("Fail"); + final SegmentReplicationTarget target = new SegmentReplicationTarget( + checkpoint, + indexShard, + replicationSource, + new SegmentReplicationTargetService.SegmentReplicationListener() { + @Override + public void onReplicationDone(SegmentReplicationState state) { + Assert.fail(); + } + + @Override + public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { + assertEquals(SegmentReplicationState.Stage.INIT, state.getStage()); + assertEquals(expectedError, e.getCause()); + assertTrue(sendShardFailure); + } + } + ); + final SegmentReplicationTarget spy = Mockito.spy(target); + doAnswer(invocation -> { + final ActionListener listener = invocation.getArgument(0); + listener.onFailure(expectedError); + return null; + }).when(spy).startReplication(any()); + sut.startReplication(spy); + closeShards(indexShard); + } + + public void testBeforeIndexShardClosed_CancelsOngoingReplications() throws IOException { + final SegmentReplicationTarget target = new SegmentReplicationTarget( + checkpoint, + indexShard, + replicationSource, + mock(SegmentReplicationTargetService.SegmentReplicationListener.class) + ); + final SegmentReplicationTarget spy = Mockito.spy(target); + sut.startReplication(spy); + sut.beforeIndexShardClosed(indexShard.shardId(), indexShard, Settings.EMPTY); + Mockito.verify(spy, times(1)).cancel(any()); + closeShards(indexShard); + } +} From d0a16d382a40fac074896bb444aebf49715b03af Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 30 May 2022 11:03:41 -0500 Subject: [PATCH 07/34] Bump azure-core-http-netty from 1.11.9 to 1.12.0 in /plugins/repository-azure (#3474) Bumps [azure-core-http-netty](https://github.com/Azure/azure-sdk-for-java) from 1.11.9 to 1.12.0. - [Release notes](https://github.com/Azure/azure-sdk-for-java/releases) - [Commits](https://github.com/Azure/azure-sdk-for-java/compare/azure-core-http-netty_1.11.9...azure-core_1.12.0) --- updated-dependencies: - dependency-name: com.azure:azure-core-http-netty dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- plugins/repository-azure/build.gradle | 2 +- .../licenses/azure-core-http-netty-1.11.9.jar.sha1 | 1 - .../licenses/azure-core-http-netty-1.12.0.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-azure/licenses/azure-core-http-netty-1.11.9.jar.sha1 create mode 100644 plugins/repository-azure/licenses/azure-core-http-netty-1.12.0.jar.sha1 diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index eb5fc1650a1b4..dd2ad78ebed04 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -46,7 +46,7 @@ opensearchplugin { dependencies { api 'com.azure:azure-core:1.27.0' api 'com.azure:azure-storage-common:12.15.0' - api 'com.azure:azure-core-http-netty:1.11.9' + api 'com.azure:azure-core-http-netty:1.12.0' api "io.netty:netty-codec-dns:${versions.netty}" api "io.netty:netty-codec-socks:${versions.netty}" api "io.netty:netty-codec-http2:${versions.netty}" diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.11.9.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.11.9.jar.sha1 deleted file mode 100644 index 936a02dfba4d7..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-http-netty-1.11.9.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1d1f34b3e60db038f3913007a2706a820383dc26 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.12.0.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.12.0.jar.sha1 new file mode 100644 index 0000000000000..1b5d162c004de --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-http-netty-1.12.0.jar.sha1 @@ -0,0 +1 @@ +e4381e4e2801ee190ae76b61dbd992e94b40272e \ No newline at end of file From 1ebd5096b40afad9fec8abc8e038c515b35feccc Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Mon, 30 May 2022 14:25:49 -0400 Subject: [PATCH 08/34] Update to Apache Lucene 9.2 (#3477) Signed-off-by: Andriy Redko --- buildSrc/version.properties | 2 +- .../licenses/lucene-expressions-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - .../lang-expression/licenses/lucene-expressions-9.2.0.jar.sha1 | 1 + .../lucene-analysis-icu-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - .../analysis-icu/licenses/lucene-analysis-icu-9.2.0.jar.sha1 | 1 + .../lucene-analysis-kuromoji-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - .../licenses/lucene-analysis-kuromoji-9.2.0.jar.sha1 | 1 + .../lucene-analysis-nori-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - .../analysis-nori/licenses/lucene-analysis-nori-9.2.0.jar.sha1 | 1 + .../lucene-analysis-phonetic-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - .../licenses/lucene-analysis-phonetic-9.2.0.jar.sha1 | 1 + .../lucene-analysis-smartcn-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - .../licenses/lucene-analysis-smartcn-9.2.0.jar.sha1 | 1 + .../lucene-analysis-stempel-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - .../licenses/lucene-analysis-stempel-9.2.0.jar.sha1 | 1 + .../lucene-analysis-morfologik-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - .../licenses/lucene-analysis-morfologik-9.2.0.jar.sha1 | 1 + .../lucene-analysis-common-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - server/licenses/lucene-analysis-common-9.2.0.jar.sha1 | 1 + .../lucene-backward-codecs-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - server/licenses/lucene-backward-codecs-9.2.0.jar.sha1 | 1 + server/licenses/lucene-core-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - server/licenses/lucene-core-9.2.0.jar.sha1 | 1 + server/licenses/lucene-grouping-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - server/licenses/lucene-grouping-9.2.0.jar.sha1 | 1 + .../licenses/lucene-highlighter-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - server/licenses/lucene-highlighter-9.2.0.jar.sha1 | 1 + server/licenses/lucene-join-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - server/licenses/lucene-join-9.2.0.jar.sha1 | 1 + server/licenses/lucene-memory-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - server/licenses/lucene-memory-9.2.0.jar.sha1 | 1 + server/licenses/lucene-misc-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - server/licenses/lucene-misc-9.2.0.jar.sha1 | 1 + server/licenses/lucene-queries-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - server/licenses/lucene-queries-9.2.0.jar.sha1 | 1 + .../licenses/lucene-queryparser-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - server/licenses/lucene-queryparser-9.2.0.jar.sha1 | 1 + server/licenses/lucene-sandbox-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - server/licenses/lucene-sandbox-9.2.0.jar.sha1 | 1 + .../lucene-spatial-extras-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - server/licenses/lucene-spatial-extras-9.2.0.jar.sha1 | 1 + .../licenses/lucene-spatial3d-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - server/licenses/lucene-spatial3d-9.2.0.jar.sha1 | 1 + server/licenses/lucene-suggest-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - server/licenses/lucene-suggest-9.2.0.jar.sha1 | 1 + 45 files changed, 23 insertions(+), 23 deletions(-) delete mode 100644 modules/lang-expression/licenses/lucene-expressions-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 modules/lang-expression/licenses/lucene-expressions-9.2.0.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.2.0.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.2.0.jar.sha1 delete mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.2.0.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.2.0.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.2.0.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.2.0.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-analysis-common-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 server/licenses/lucene-analysis-common-9.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-backward-codecs-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 server/licenses/lucene-backward-codecs-9.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-core-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 server/licenses/lucene-core-9.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-grouping-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 server/licenses/lucene-grouping-9.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-highlighter-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 server/licenses/lucene-highlighter-9.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-join-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 server/licenses/lucene-join-9.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-memory-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 server/licenses/lucene-memory-9.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-misc-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 server/licenses/lucene-misc-9.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-queries-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 server/licenses/lucene-queries-9.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-queryparser-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 server/licenses/lucene-queryparser-9.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-sandbox-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 server/licenses/lucene-sandbox-9.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-extras-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 server/licenses/lucene-spatial-extras-9.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-spatial3d-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 server/licenses/lucene-spatial3d-9.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-suggest-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 server/licenses/lucene-suggest-9.2.0.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 7a8a9531ebda8..625c540737065 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ opensearch = 3.0.0 -lucene = 9.2.0-snapshot-ba8c3a8 +lucene = 9.2.0 bundled_jdk_vendor = adoptium bundled_jdk = 17.0.3+7 diff --git a/modules/lang-expression/licenses/lucene-expressions-9.2.0-snapshot-ba8c3a8.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index 17c82a8e9df7d..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fe5e4cf94d26bbe1d982808f34fa132bba5565e3 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.2.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..50ceb1672cc45 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.2.0.jar.sha1 @@ -0,0 +1 @@ +12e8ba1ca93695819d0251a16584880deac58ae0 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.2.0-snapshot-ba8c3a8.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index 1c9c809722104..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9fc73c790c037e817635fcc30ea0891e6acd2fac \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.2.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..ae82ce9134db8 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.2.0.jar.sha1 @@ -0,0 +1 @@ +832f62c39c8c2a77097e2d2d4438bd1642f11f29 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.2.0-snapshot-ba8c3a8.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index 0538fc53b8a60..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -058ffd84388f9ffcf0bfdd7f43a6e832836a2927 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.2.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..97615d33b942e --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.2.0.jar.sha1 @@ -0,0 +1 @@ +f3314a95b461d30e048a932f81ff3c5808dd145f \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.2.0-snapshot-ba8c3a8.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index b06795ab2c8a1..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fb166f35ba04a7687b3073afb9972f6669ac722e \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.2.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..38f65996f2395 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.2.0.jar.sha1 @@ -0,0 +1 @@ +e4fd55524bf85aa7d1ec86f8680faa7b07d95fb4 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.2.0-snapshot-ba8c3a8.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index daaa895551c70..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -abf9eb24601ec11ce5b61e4753b41444a869b29d \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.2.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..4a0a4a561bb44 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.2.0.jar.sha1 @@ -0,0 +1 @@ +85c59dcdd7ac761b7f384475aa687a0ae0afaab2 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.2.0-snapshot-ba8c3a8.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index 4bd203700bf5e..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b310130fe9e0f31ce4218cda921309b1143f3541 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.2.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..e5fb4a89d6fc3 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.2.0.jar.sha1 @@ -0,0 +1 @@ +8bfdb8ff2824a585be6d91d80a52a6d4d15c35e8 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.2.0-snapshot-ba8c3a8.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index ac27d25f7a100..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ad8783255cdcb6e7ab23a505123716ad979f3484 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.2.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..de10c0dfc9ef6 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.2.0.jar.sha1 @@ -0,0 +1 @@ +a050c43f529572590d8dd5a5bc9f7b64119795b4 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.2.0-snapshot-ba8c3a8.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index b8abf33514782..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -75f8fbb94a303d04c5dc2b25436300a463003dd6 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.2.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..8e2b8f32c035a --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.2.0.jar.sha1 @@ -0,0 +1 @@ +d2a148922ee01da3f653e931cb572d6dfec1ba3b \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.2.0-snapshot-ba8c3a8.jar.sha1 b/server/licenses/lucene-analysis-common-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index 3384d5fc221e2..0000000000000 --- a/server/licenses/lucene-analysis-common-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b2aa0739c95f1f715f407087dbcf96c5c21f4cc7 \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.2.0.jar.sha1 b/server/licenses/lucene-analysis-common-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..316a74de5f2d8 --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.2.0.jar.sha1 @@ -0,0 +1 @@ +da636dedae3155ef186b5eaa543093da069ddab1 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.2.0-snapshot-ba8c3a8.jar.sha1 b/server/licenses/lucene-backward-codecs-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index b0304427bafd7..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e4a2f89c03e98e0fc211bba2c090047a007eb442 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.2.0.jar.sha1 b/server/licenses/lucene-backward-codecs-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..991b99eadd4c3 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.2.0.jar.sha1 @@ -0,0 +1 @@ +97f362ff458b03850b3e0fb45a6cc2773ddbfbfa \ No newline at end of file diff --git a/server/licenses/lucene-core-9.2.0-snapshot-ba8c3a8.jar.sha1 b/server/licenses/lucene-core-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index d9d21a557db60..0000000000000 --- a/server/licenses/lucene-core-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -06df30c240dfb970002c104d44370ae58b7cb60a \ No newline at end of file diff --git a/server/licenses/lucene-core-9.2.0.jar.sha1 b/server/licenses/lucene-core-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..7f05fa3210bf3 --- /dev/null +++ b/server/licenses/lucene-core-9.2.0.jar.sha1 @@ -0,0 +1 @@ +da43e5472e43db68b8c74f05e63d900ecedc1631 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.2.0-snapshot-ba8c3a8.jar.sha1 b/server/licenses/lucene-grouping-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index 26260af3f5c20..0000000000000 --- a/server/licenses/lucene-grouping-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1991e0f0f71c3c99ba726fcfa372f7ba7c75bcf0 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.2.0.jar.sha1 b/server/licenses/lucene-grouping-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..06e446118ebfc --- /dev/null +++ b/server/licenses/lucene-grouping-9.2.0.jar.sha1 @@ -0,0 +1 @@ +b1ea8b82a036cbff93a9c849cbf574c6730a7b13 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.2.0-snapshot-ba8c3a8.jar.sha1 b/server/licenses/lucene-highlighter-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index 7010bcbd1a3c6..0000000000000 --- a/server/licenses/lucene-highlighter-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cac793b5cfbccf5c310d51bc78cf97ce3befceac \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.2.0.jar.sha1 b/server/licenses/lucene-highlighter-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..0729c42c4d129 --- /dev/null +++ b/server/licenses/lucene-highlighter-9.2.0.jar.sha1 @@ -0,0 +1 @@ +c447cad35d879bd656f8a0aeb3114c08e25ca1b1 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.2.0-snapshot-ba8c3a8.jar.sha1 b/server/licenses/lucene-join-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index 2af846e454951..0000000000000 --- a/server/licenses/lucene-join-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -79344146c032fda532def9771de589c4798117e5 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.2.0.jar.sha1 b/server/licenses/lucene-join-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..b401ef5c0d88c --- /dev/null +++ b/server/licenses/lucene-join-9.2.0.jar.sha1 @@ -0,0 +1 @@ +4652557ef1d68b0046f0bb28762ede953f6367ef \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.2.0-snapshot-ba8c3a8.jar.sha1 b/server/licenses/lucene-memory-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index 579b1eaadf13f..0000000000000 --- a/server/licenses/lucene-memory-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1d5f1c88786bcdfc50466f963ef07cbd9c6c7827 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.2.0.jar.sha1 b/server/licenses/lucene-memory-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..dd9a95000a6cd --- /dev/null +++ b/server/licenses/lucene-memory-9.2.0.jar.sha1 @@ -0,0 +1 @@ +6c9aa37760c11c033f154170c15c2b1961b7a886 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.2.0-snapshot-ba8c3a8.jar.sha1 b/server/licenses/lucene-misc-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index b5bea36607367..0000000000000 --- a/server/licenses/lucene-misc-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -adba90f90cf6815eeb9009c1a42d7c86f916d9da \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.2.0.jar.sha1 b/server/licenses/lucene-misc-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..df82a6bd926c4 --- /dev/null +++ b/server/licenses/lucene-misc-9.2.0.jar.sha1 @@ -0,0 +1 @@ +c51ef9a5894dfb4548bbf80d1a271cfe8e86cbf6 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.2.0-snapshot-ba8c3a8.jar.sha1 b/server/licenses/lucene-queries-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index 970dee25c8a9b..0000000000000 --- a/server/licenses/lucene-queries-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d0f38091eee45a118173c9201677ebafa9ed9e89 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.2.0.jar.sha1 b/server/licenses/lucene-queries-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..bdd9109cbd324 --- /dev/null +++ b/server/licenses/lucene-queries-9.2.0.jar.sha1 @@ -0,0 +1 @@ +fcb32402e0cba93454675cb631d59264968b32a4 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.2.0-snapshot-ba8c3a8.jar.sha1 b/server/licenses/lucene-queryparser-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index a8372fad8c3b4..0000000000000 --- a/server/licenses/lucene-queryparser-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -05e2ca9fc81e8b73f746c5ec40321d6d90e3bcdd \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.2.0.jar.sha1 b/server/licenses/lucene-queryparser-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..17ff055324cc2 --- /dev/null +++ b/server/licenses/lucene-queryparser-9.2.0.jar.sha1 @@ -0,0 +1 @@ +7bbcadf643c6bed8a15d789c71cd89a8c9dddf31 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.2.0-snapshot-ba8c3a8.jar.sha1 b/server/licenses/lucene-sandbox-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index 6199acd87d7c3..0000000000000 --- a/server/licenses/lucene-sandbox-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c4ebbf7fd05e2889624b4dd9afb3f7b22aad94f3 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.2.0.jar.sha1 b/server/licenses/lucene-sandbox-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..026075cb7165f --- /dev/null +++ b/server/licenses/lucene-sandbox-9.2.0.jar.sha1 @@ -0,0 +1 @@ +aedb9a641278845f81cb004d6bc557eb43f69a57 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.2.0-snapshot-ba8c3a8.jar.sha1 b/server/licenses/lucene-spatial-extras-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index 0a6932502bced..0000000000000 --- a/server/licenses/lucene-spatial-extras-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -08836d9dee5a2e9e92b538023285de3d620abd4b \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.2.0.jar.sha1 b/server/licenses/lucene-spatial-extras-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..e01ea9ef7c16f --- /dev/null +++ b/server/licenses/lucene-spatial-extras-9.2.0.jar.sha1 @@ -0,0 +1 @@ +47e15ef3815554c73cff7163c70115ea1f18818f \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.2.0-snapshot-ba8c3a8.jar.sha1 b/server/licenses/lucene-spatial3d-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index e587e445c7770..0000000000000 --- a/server/licenses/lucene-spatial3d-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d492d0c7b4bb76c3de7cfc1b4fe224ef9e9e7056 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.2.0.jar.sha1 b/server/licenses/lucene-spatial3d-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..dd06925902b0b --- /dev/null +++ b/server/licenses/lucene-spatial3d-9.2.0.jar.sha1 @@ -0,0 +1 @@ +22308d4eaab8bf8a2b16cfc9eff97bfc2fb5a508 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.2.0-snapshot-ba8c3a8.jar.sha1 b/server/licenses/lucene-suggest-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index 5b722bf4274d1..0000000000000 --- a/server/licenses/lucene-suggest-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -71b5b0cfb5b5809c4a86e947b1f4d9202d6f1b75 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.2.0.jar.sha1 b/server/licenses/lucene-suggest-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..1d53225793a33 --- /dev/null +++ b/server/licenses/lucene-suggest-9.2.0.jar.sha1 @@ -0,0 +1 @@ +608e3851216dc1d8d85f9389c71241f2b395f1ea \ No newline at end of file From 8723014c12959f8bcf0c5350eae229691cd7cae1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 30 May 2022 21:30:08 -0700 Subject: [PATCH 09/34] Bump protobuf-java from 3.20.1 to 3.21.1 in /plugins/repository-hdfs (#3472) Signed-off-by: dependabot[bot] --- plugins/repository-hdfs/build.gradle | 2 +- plugins/repository-hdfs/licenses/protobuf-java-3.20.1.jar.sha1 | 1 - plugins/repository-hdfs/licenses/protobuf-java-3.21.1.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/protobuf-java-3.20.1.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/protobuf-java-3.21.1.jar.sha1 diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 41c38b0b4e558..15980abcb16e3 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -67,7 +67,7 @@ dependencies { api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api 'com.google.code.gson:gson:2.9.0' runtimeOnly 'com.google.guava:guava:31.1-jre' - api 'com.google.protobuf:protobuf-java:3.20.1' + api 'com.google.protobuf:protobuf-java:3.21.1' api "commons-logging:commons-logging:${versions.commonslogging}" api 'commons-cli:commons-cli:1.5.0' api "commons-codec:commons-codec:${versions.commonscodec}" diff --git a/plugins/repository-hdfs/licenses/protobuf-java-3.20.1.jar.sha1 b/plugins/repository-hdfs/licenses/protobuf-java-3.20.1.jar.sha1 deleted file mode 100644 index 1ebc9838b7bea..0000000000000 --- a/plugins/repository-hdfs/licenses/protobuf-java-3.20.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5472700cd39a46060efbd35e29cb36b3fb89517b \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/protobuf-java-3.21.1.jar.sha1 b/plugins/repository-hdfs/licenses/protobuf-java-3.21.1.jar.sha1 new file mode 100644 index 0000000000000..2336816611bfe --- /dev/null +++ b/plugins/repository-hdfs/licenses/protobuf-java-3.21.1.jar.sha1 @@ -0,0 +1 @@ +2e396173a5b6ab549d790eba21c1d125bfe92912 \ No newline at end of file From 1ceff286a0ead3164c99b66332d4e008218bb338 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Tue, 31 May 2022 10:57:10 -0500 Subject: [PATCH 10/34] [Upgrade] Lucene-9.3.0-snapshot-823df23 (#3478) Upgrades to latest snapshot of lucene 9.3.0. Signed-off-by: Nicholas Walter Knize --- buildSrc/version.properties | 2 +- .../lang-expression/licenses/lucene-expressions-9.2.0.jar.sha1 | 1 - .../licenses/lucene-expressions-9.3.0-snapshot-823df23.jar.sha1 | 1 + .../analysis-icu/licenses/lucene-analysis-icu-9.2.0.jar.sha1 | 1 - .../lucene-analysis-icu-9.3.0-snapshot-823df23.jar.sha1 | 1 + .../licenses/lucene-analysis-kuromoji-9.2.0.jar.sha1 | 1 - .../lucene-analysis-kuromoji-9.3.0-snapshot-823df23.jar.sha1 | 1 + .../analysis-nori/licenses/lucene-analysis-nori-9.2.0.jar.sha1 | 1 - .../lucene-analysis-nori-9.3.0-snapshot-823df23.jar.sha1 | 1 + .../licenses/lucene-analysis-phonetic-9.2.0.jar.sha1 | 1 - .../lucene-analysis-phonetic-9.3.0-snapshot-823df23.jar.sha1 | 1 + .../licenses/lucene-analysis-smartcn-9.2.0.jar.sha1 | 1 - .../lucene-analysis-smartcn-9.3.0-snapshot-823df23.jar.sha1 | 1 + .../licenses/lucene-analysis-stempel-9.2.0.jar.sha1 | 1 - .../lucene-analysis-stempel-9.3.0-snapshot-823df23.jar.sha1 | 1 + .../licenses/lucene-analysis-morfologik-9.2.0.jar.sha1 | 1 - .../lucene-analysis-morfologik-9.3.0-snapshot-823df23.jar.sha1 | 1 + server/licenses/lucene-analysis-common-9.2.0.jar.sha1 | 1 - .../lucene-analysis-common-9.3.0-snapshot-823df23.jar.sha1 | 1 + server/licenses/lucene-backward-codecs-9.2.0.jar.sha1 | 1 - .../lucene-backward-codecs-9.3.0-snapshot-823df23.jar.sha1 | 1 + server/licenses/lucene-core-9.2.0.jar.sha1 | 1 - server/licenses/lucene-core-9.3.0-snapshot-823df23.jar.sha1 | 1 + server/licenses/lucene-grouping-9.2.0.jar.sha1 | 1 - server/licenses/lucene-grouping-9.3.0-snapshot-823df23.jar.sha1 | 1 + server/licenses/lucene-highlighter-9.2.0.jar.sha1 | 1 - .../licenses/lucene-highlighter-9.3.0-snapshot-823df23.jar.sha1 | 1 + server/licenses/lucene-join-9.2.0.jar.sha1 | 1 - server/licenses/lucene-join-9.3.0-snapshot-823df23.jar.sha1 | 1 + server/licenses/lucene-memory-9.2.0.jar.sha1 | 1 - server/licenses/lucene-memory-9.3.0-snapshot-823df23.jar.sha1 | 1 + server/licenses/lucene-misc-9.2.0.jar.sha1 | 1 - server/licenses/lucene-misc-9.3.0-snapshot-823df23.jar.sha1 | 1 + server/licenses/lucene-queries-9.2.0.jar.sha1 | 1 - server/licenses/lucene-queries-9.3.0-snapshot-823df23.jar.sha1 | 1 + server/licenses/lucene-queryparser-9.2.0.jar.sha1 | 1 - .../licenses/lucene-queryparser-9.3.0-snapshot-823df23.jar.sha1 | 1 + server/licenses/lucene-sandbox-9.2.0.jar.sha1 | 1 - server/licenses/lucene-sandbox-9.3.0-snapshot-823df23.jar.sha1 | 1 + server/licenses/lucene-spatial-extras-9.2.0.jar.sha1 | 1 - .../lucene-spatial-extras-9.3.0-snapshot-823df23.jar.sha1 | 1 + server/licenses/lucene-spatial3d-9.2.0.jar.sha1 | 1 - .../licenses/lucene-spatial3d-9.3.0-snapshot-823df23.jar.sha1 | 1 + server/licenses/lucene-suggest-9.2.0.jar.sha1 | 1 - server/licenses/lucene-suggest-9.3.0-snapshot-823df23.jar.sha1 | 1 + server/src/main/java/org/opensearch/Version.java | 2 +- 46 files changed, 24 insertions(+), 24 deletions(-) delete mode 100644 modules/lang-expression/licenses/lucene-expressions-9.2.0.jar.sha1 create mode 100644 modules/lang-expression/licenses/lucene-expressions-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.2.0.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.2.0.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.2.0.jar.sha1 create mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.2.0.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.2.0.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.2.0.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.2.0.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 server/licenses/lucene-analysis-common-9.2.0.jar.sha1 create mode 100644 server/licenses/lucene-analysis-common-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 server/licenses/lucene-backward-codecs-9.2.0.jar.sha1 create mode 100644 server/licenses/lucene-backward-codecs-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 server/licenses/lucene-core-9.2.0.jar.sha1 create mode 100644 server/licenses/lucene-core-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 server/licenses/lucene-grouping-9.2.0.jar.sha1 create mode 100644 server/licenses/lucene-grouping-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 server/licenses/lucene-highlighter-9.2.0.jar.sha1 create mode 100644 server/licenses/lucene-highlighter-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 server/licenses/lucene-join-9.2.0.jar.sha1 create mode 100644 server/licenses/lucene-join-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 server/licenses/lucene-memory-9.2.0.jar.sha1 create mode 100644 server/licenses/lucene-memory-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 server/licenses/lucene-misc-9.2.0.jar.sha1 create mode 100644 server/licenses/lucene-misc-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 server/licenses/lucene-queries-9.2.0.jar.sha1 create mode 100644 server/licenses/lucene-queries-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 server/licenses/lucene-queryparser-9.2.0.jar.sha1 create mode 100644 server/licenses/lucene-queryparser-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 server/licenses/lucene-sandbox-9.2.0.jar.sha1 create mode 100644 server/licenses/lucene-sandbox-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-extras-9.2.0.jar.sha1 create mode 100644 server/licenses/lucene-spatial-extras-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 server/licenses/lucene-spatial3d-9.2.0.jar.sha1 create mode 100644 server/licenses/lucene-spatial3d-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 server/licenses/lucene-suggest-9.2.0.jar.sha1 create mode 100644 server/licenses/lucene-suggest-9.3.0-snapshot-823df23.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 625c540737065..fe2cfe6a63ee6 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ opensearch = 3.0.0 -lucene = 9.2.0 +lucene = 9.3.0-snapshot-823df23 bundled_jdk_vendor = adoptium bundled_jdk = 17.0.3+7 diff --git a/modules/lang-expression/licenses/lucene-expressions-9.2.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.2.0.jar.sha1 deleted file mode 100644 index 50ceb1672cc45..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -12e8ba1ca93695819d0251a16584880deac58ae0 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.3.0-snapshot-823df23.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..540a48bf7415f --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +610ec9bb8001a2d2ea88e3384eb516017504139e \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.2.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.2.0.jar.sha1 deleted file mode 100644 index ae82ce9134db8..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -832f62c39c8c2a77097e2d2d4438bd1642f11f29 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0-snapshot-823df23.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..7bc128d4562fa --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +43f2ea45a2d12b4c75c7ac11b85ec736c73bc07f \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.2.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.2.0.jar.sha1 deleted file mode 100644 index 97615d33b942e..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f3314a95b461d30e048a932f81ff3c5808dd145f \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0-snapshot-823df23.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..bad2a0bdcfa2a --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +fb46807684a5b0e28a02b2a1ea3d528e4c25aa05 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.2.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.2.0.jar.sha1 deleted file mode 100644 index 38f65996f2395..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e4fd55524bf85aa7d1ec86f8680faa7b07d95fb4 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0-snapshot-823df23.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..b2c62bcbbade1 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +be94b15085b6390ed64a8e8a4f5afbcb2d4d5181 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.2.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.2.0.jar.sha1 deleted file mode 100644 index 4a0a4a561bb44..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -85c59dcdd7ac761b7f384475aa687a0ae0afaab2 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0-snapshot-823df23.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..c7f8fd797c589 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +3a6f705a7df2007f5583215420da0725f844ac4f \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.2.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.2.0.jar.sha1 deleted file mode 100644 index e5fb4a89d6fc3..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8bfdb8ff2824a585be6d91d80a52a6d4d15c35e8 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0-snapshot-823df23.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..28424c2dd1c7a --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +ea9931a34288fa6cbd894e244a101e86926ebfb8 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.2.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.2.0.jar.sha1 deleted file mode 100644 index de10c0dfc9ef6..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a050c43f529572590d8dd5a5bc9f7b64119795b4 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0-snapshot-823df23.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..d7c4b20a29db2 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +c339ce0a3b02d92a804081f5ff44b99f7a468caf \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.2.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.2.0.jar.sha1 deleted file mode 100644 index 8e2b8f32c035a..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d2a148922ee01da3f653e931cb572d6dfec1ba3b \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0-snapshot-823df23.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..b4a9090408165 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +a8faa5faa38ab8f545e12cf3dd914e934a2f2bfe \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.2.0.jar.sha1 b/server/licenses/lucene-analysis-common-9.2.0.jar.sha1 deleted file mode 100644 index 316a74de5f2d8..0000000000000 --- a/server/licenses/lucene-analysis-common-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da636dedae3155ef186b5eaa543093da069ddab1 \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-analysis-common-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..ab4abfd7d6a49 --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +8dbb5828e79780989a8758b7cbb5a1aacac0004f \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.2.0.jar.sha1 b/server/licenses/lucene-backward-codecs-9.2.0.jar.sha1 deleted file mode 100644 index 991b99eadd4c3..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -97f362ff458b03850b3e0fb45a6cc2773ddbfbfa \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-backward-codecs-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..8ff6a25c9547e --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +68ebd183f1e9edde9f2f37c60f784e4f03555eec \ No newline at end of file diff --git a/server/licenses/lucene-core-9.2.0.jar.sha1 b/server/licenses/lucene-core-9.2.0.jar.sha1 deleted file mode 100644 index 7f05fa3210bf3..0000000000000 --- a/server/licenses/lucene-core-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da43e5472e43db68b8c74f05e63d900ecedc1631 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-core-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..2ec15eb0012c5 --- /dev/null +++ b/server/licenses/lucene-core-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +ea3cb640597d93168765174207542c6765c1fe15 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.2.0.jar.sha1 b/server/licenses/lucene-grouping-9.2.0.jar.sha1 deleted file mode 100644 index 06e446118ebfc..0000000000000 --- a/server/licenses/lucene-grouping-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b1ea8b82a036cbff93a9c849cbf574c6730a7b13 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-grouping-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..7b6c561ddeedf --- /dev/null +++ b/server/licenses/lucene-grouping-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +ab2bcdbade5976e127c7e9393bf7a7e25a957d9a \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.2.0.jar.sha1 b/server/licenses/lucene-highlighter-9.2.0.jar.sha1 deleted file mode 100644 index 0729c42c4d129..0000000000000 --- a/server/licenses/lucene-highlighter-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c447cad35d879bd656f8a0aeb3114c08e25ca1b1 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-highlighter-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..b2aa53fcdfb83 --- /dev/null +++ b/server/licenses/lucene-highlighter-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +31ce6ff9188dea49dc4b4d082b498332cc7b86e7 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.2.0.jar.sha1 b/server/licenses/lucene-join-9.2.0.jar.sha1 deleted file mode 100644 index b401ef5c0d88c..0000000000000 --- a/server/licenses/lucene-join-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4652557ef1d68b0046f0bb28762ede953f6367ef \ No newline at end of file diff --git a/server/licenses/lucene-join-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-join-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..7918597d46763 --- /dev/null +++ b/server/licenses/lucene-join-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +c387884f0bc00fb1c064754a69e1e81dff12c755 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.2.0.jar.sha1 b/server/licenses/lucene-memory-9.2.0.jar.sha1 deleted file mode 100644 index dd9a95000a6cd..0000000000000 --- a/server/licenses/lucene-memory-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c9aa37760c11c033f154170c15c2b1961b7a886 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-memory-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..a87d3de9e2310 --- /dev/null +++ b/server/licenses/lucene-memory-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +e278a2cfe1500b76da770aa29ecd487fea5f8dc3 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.2.0.jar.sha1 b/server/licenses/lucene-misc-9.2.0.jar.sha1 deleted file mode 100644 index df82a6bd926c4..0000000000000 --- a/server/licenses/lucene-misc-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c51ef9a5894dfb4548bbf80d1a271cfe8e86cbf6 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-misc-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..18a165097d2be --- /dev/null +++ b/server/licenses/lucene-misc-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +77933cdffbcd0f56888a50fd1d9fb39cf6148f1a \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.2.0.jar.sha1 b/server/licenses/lucene-queries-9.2.0.jar.sha1 deleted file mode 100644 index bdd9109cbd324..0000000000000 --- a/server/licenses/lucene-queries-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fcb32402e0cba93454675cb631d59264968b32a4 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-queries-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..4d148f3a840c8 --- /dev/null +++ b/server/licenses/lucene-queries-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +8d521efa3a111e2feab1a7f07a0cc944bbdcddf4 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.2.0.jar.sha1 b/server/licenses/lucene-queryparser-9.2.0.jar.sha1 deleted file mode 100644 index 17ff055324cc2..0000000000000 --- a/server/licenses/lucene-queryparser-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7bbcadf643c6bed8a15d789c71cd89a8c9dddf31 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-queryparser-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..c6e913767696a --- /dev/null +++ b/server/licenses/lucene-queryparser-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +30d6f8f757a007248804ed5db624a125ada24154 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.2.0.jar.sha1 b/server/licenses/lucene-sandbox-9.2.0.jar.sha1 deleted file mode 100644 index 026075cb7165f..0000000000000 --- a/server/licenses/lucene-sandbox-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -aedb9a641278845f81cb004d6bc557eb43f69a57 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-sandbox-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..22b7769ee3b4d --- /dev/null +++ b/server/licenses/lucene-sandbox-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +8dd68761fade2dc4d2ea0d9d476a5172cfd22cd2 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.2.0.jar.sha1 b/server/licenses/lucene-spatial-extras-9.2.0.jar.sha1 deleted file mode 100644 index e01ea9ef7c16f..0000000000000 --- a/server/licenses/lucene-spatial-extras-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -47e15ef3815554c73cff7163c70115ea1f18818f \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-spatial-extras-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..22d9211a3b623 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +044ac03b461aaae4568f64948f783e87dae85a8b \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.2.0.jar.sha1 b/server/licenses/lucene-spatial3d-9.2.0.jar.sha1 deleted file mode 100644 index dd06925902b0b..0000000000000 --- a/server/licenses/lucene-spatial3d-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -22308d4eaab8bf8a2b16cfc9eff97bfc2fb5a508 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-spatial3d-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..66998393ed970 --- /dev/null +++ b/server/licenses/lucene-spatial3d-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +53a02ec5b0eabe7fdf97fea1b19eeca5a6cf1122 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.2.0.jar.sha1 b/server/licenses/lucene-suggest-9.2.0.jar.sha1 deleted file mode 100644 index 1d53225793a33..0000000000000 --- a/server/licenses/lucene-suggest-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -608e3851216dc1d8d85f9389c71241f2b395f1ea \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-suggest-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..e5aca63b21732 --- /dev/null +++ b/server/licenses/lucene-suggest-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +a57b91ee1c6f3f666dcac697ce6a7de9bd5abba7 \ No newline at end of file diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index e309af54eac6e..a69c1f3c3bcb1 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -89,7 +89,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_1_3_3 = new Version(1030399, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_2_0_0 = new Version(2000099, org.apache.lucene.util.Version.LUCENE_9_1_0); public static final Version V_2_1_0 = new Version(2010099, org.apache.lucene.util.Version.LUCENE_9_2_0); - public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_2_0); + public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version CURRENT = V_3_0_0; public static Version readVersion(StreamInput in) throws IOException { From 2bfe8b31af354e5d00e4ac67d544ce664f083934 Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Thu, 2 Jun 2022 09:29:32 -0700 Subject: [PATCH 11/34] Filter out invalid URI and HTTP method in the error message of no handler found for a REST request (#3459) Filter out invalid URI and HTTP method of a error message, which shown when there is no handler found for a REST request sent by user, so that HTML special characters <>&"' will not shown in the error message. The error message is return as mine-type `application/json`, which can't contain active (script) content, so it's not a vulnerability. Besides, no browsers are going to render as html when the mine-type is that. While the common security scanners will raise a false-positive alarm for having HTML tags in the response without escaping the HTML special characters, so the solution only aims to satisfy the code security scanners. Signed-off-by: Tianli Feng --- .../java/org/opensearch/rest/RestController.java | 14 ++++++++++++-- .../org/opensearch/rest/RestControllerTests.java | 13 +++++++++++++ 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/opensearch/rest/RestController.java b/server/src/main/java/org/opensearch/rest/RestController.java index b576f8b83e5a0..78bebcb9a0af1 100644 --- a/server/src/main/java/org/opensearch/rest/RestController.java +++ b/server/src/main/java/org/opensearch/rest/RestController.java @@ -56,6 +56,7 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; +import java.net.URI; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -447,7 +448,9 @@ private void handleUnsupportedHttpMethod( msg.append("Incorrect HTTP method for uri [").append(uri); msg.append("] and method [").append(method).append("]"); } else { - msg.append(exception.getMessage()); + // Not using the error message directly from 'exception.getMessage()' to avoid unescaped HTML special characters, + // in case false-positive cross site scripting vulnerability is detected by common security scanners. + msg.append("Unexpected HTTP method"); } if (validMethodSet.isEmpty() == false) { msg.append(", allowed: ").append(validMethodSet); @@ -488,7 +491,14 @@ private void handleBadRequest(String uri, RestRequest.Method method, RestChannel try (XContentBuilder builder = channel.newErrorBuilder()) { builder.startObject(); { - builder.field("error", "no handler found for uri [" + uri + "] and method [" + method + "]"); + try { + // Validate input URI to filter out HTML special characters in the error message, + // in case false-positive cross site scripting vulnerability is detected by common security scanners. + uri = new URI(uri).getPath(); + builder.field("error", "no handler found for uri [" + uri + "] and method [" + method + "]"); + } catch (Exception e) { + builder.field("error", "invalid uri has been requested"); + } } builder.endObject(); channel.sendResponse(new BytesRestResponse(BAD_REQUEST, builder)); diff --git a/server/src/test/java/org/opensearch/rest/RestControllerTests.java b/server/src/test/java/org/opensearch/rest/RestControllerTests.java index 6004613c0ed17..bd4c7c9a4f824 100644 --- a/server/src/test/java/org/opensearch/rest/RestControllerTests.java +++ b/server/src/test/java/org/opensearch/rest/RestControllerTests.java @@ -553,6 +553,15 @@ public void testFaviconWithWrongHttpMethod() { assertThat(channel.getRestResponse().getHeaders().get("Allow"), hasItem(equalTo(RestRequest.Method.GET.toString()))); } + public void testHandleBadRequestWithHtmlSpecialCharsInUri() { + final FakeRestRequest fakeRestRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withPath( + "/" + ).build(); + final AssertingChannel channel = new AssertingChannel(fakeRestRequest, true, RestStatus.BAD_REQUEST); + restController.dispatchRequest(fakeRestRequest, channel, client.threadPool().getThreadContext()); + assertThat(channel.getRestResponse().content().utf8ToString(), containsString("invalid uri has been requested")); + } + public void testDispatchUnsupportedHttpMethod() { final boolean hasContent = randomBoolean(); final RestRequest request = RestRequest.request(xContentRegistry(), new HttpRequest() { @@ -623,6 +632,10 @@ public Exception getInboundException() { assertTrue(channel.getSendResponseCalled()); assertThat(channel.getRestResponse().getHeaders().containsKey("Allow"), equalTo(true)); assertThat(channel.getRestResponse().getHeaders().get("Allow"), hasItem(equalTo(RestRequest.Method.GET.toString()))); + assertThat( + channel.getRestResponse().content().utf8ToString(), + equalTo("{\"error\":\"Unexpected HTTP method, allowed: [GET]\",\"status\":405}") + ); } private static final class TestHttpServerTransport extends AbstractLifecycleComponent implements HttpServerTransport { From 596d32a5fa3902a884d19e4d39663ccc7176f4e3 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Thu, 2 Jun 2022 12:56:18 -0400 Subject: [PATCH 12/34] Support use of IRSA for repository-s3 plugin credentials (#3475) * Support use of IRSA for repository-s3 plugin credentials Signed-off-by: Andriy Redko * Address code review comments Signed-off-by: Andriy Redko * Address code review comments Signed-off-by: Andriy Redko --- plugins/repository-s3/build.gradle | 1 + .../aws-java-sdk-sts-1.11.749.jar.sha1 | 1 + .../repositories/s3/AmazonS3Reference.java | 26 +++- .../s3/AmazonS3WithCredentials.java | 39 ++++++ .../repositories/s3/S3ClientSettings.java | 94 ++++++++++++- .../repositories/s3/S3RepositoryPlugin.java | 5 +- .../opensearch/repositories/s3/S3Service.java | 125 ++++++++++++++++-- .../s3/AwsS3ServiceImplTests.java | 105 +++++++++++++++ .../s3/RepositoryCredentialsTests.java | 7 +- .../s3/S3ClientSettingsTests.java | 49 ++++++- .../repositories/s3/S3ServiceTests.java | 29 +++- 11 files changed, 464 insertions(+), 17 deletions(-) create mode 100644 plugins/repository-s3/licenses/aws-java-sdk-sts-1.11.749.jar.sha1 create mode 100644 plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonS3WithCredentials.java diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 33448b0039ce2..54a2593f4c6f4 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -51,6 +51,7 @@ versions << [ dependencies { api "com.amazonaws:aws-java-sdk-s3:${versions.aws}" api "com.amazonaws:aws-java-sdk-core:${versions.aws}" + api "com.amazonaws:aws-java-sdk-sts:${versions.aws}" api "com.amazonaws:jmespath-java:${versions.aws}" api "org.apache.httpcomponents:httpclient:${versions.httpclient}" api "org.apache.httpcomponents:httpcore:${versions.httpcore}" diff --git a/plugins/repository-s3/licenses/aws-java-sdk-sts-1.11.749.jar.sha1 b/plugins/repository-s3/licenses/aws-java-sdk-sts-1.11.749.jar.sha1 new file mode 100644 index 0000000000000..29c9a93542058 --- /dev/null +++ b/plugins/repository-s3/licenses/aws-java-sdk-sts-1.11.749.jar.sha1 @@ -0,0 +1 @@ +724bd22c0ff41c496469e18f9bea12bdfb2f7540 \ No newline at end of file diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonS3Reference.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonS3Reference.java index 62e415705a011..6f14cd850ccf6 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonS3Reference.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonS3Reference.java @@ -32,17 +32,39 @@ package org.opensearch.repositories.s3; +import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3Client; + +import org.opensearch.common.Nullable; import org.opensearch.common.concurrent.RefCountedReleasable; +import java.io.Closeable; +import java.io.IOException; + /** * Handles the shutdown of the wrapped {@link AmazonS3Client} using reference * counting. */ public class AmazonS3Reference extends RefCountedReleasable { - AmazonS3Reference(AmazonS3 client) { - super("AWS_S3_CLIENT", client, client::shutdown); + this(client, null); + } + + AmazonS3Reference(AmazonS3WithCredentials client) { + this(client.client(), client.credentials()); + } + + AmazonS3Reference(AmazonS3 client, @Nullable AWSCredentialsProvider credentials) { + super("AWS_S3_CLIENT", client, () -> { + client.shutdown(); + if (credentials instanceof Closeable) { + try { + ((Closeable) credentials).close(); + } catch (IOException e) { + /* Do nothing here */ + } + } + }); } } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonS3WithCredentials.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonS3WithCredentials.java new file mode 100644 index 0000000000000..5622be5546cb1 --- /dev/null +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonS3WithCredentials.java @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.repositories.s3; + +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.services.s3.AmazonS3; + +import org.opensearch.common.Nullable; + +/** + * The holder of the AmazonS3 and AWSCredentialsProvider + */ +final class AmazonS3WithCredentials { + private final AmazonS3 client; + private final AWSCredentialsProvider credentials; + + private AmazonS3WithCredentials(final AmazonS3 client, @Nullable final AWSCredentialsProvider credentials) { + this.client = client; + this.credentials = credentials; + } + + AmazonS3 client() { + return client; + } + + AWSCredentialsProvider credentials() { + return credentials; + } + + static AmazonS3WithCredentials create(final AmazonS3 client, @Nullable final AWSCredentialsProvider credentials) { + return new AmazonS3WithCredentials(client, credentials); + } +} diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java index e02c7cae89378..1f9af5314f30d 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java @@ -67,6 +67,29 @@ final class S3ClientSettings { /** Placeholder client name for normalizing client settings in the repository settings. */ private static final String PLACEHOLDER_CLIENT = "placeholder"; + // Properties to support using IAM Roles for Service Accounts (IRSA) + + /** The identity token file for connecting to s3. */ + static final Setting.AffixSetting IDENTITY_TOKEN_FILE_SETTING = Setting.affixKeySetting( + PREFIX, + "identity_token_file", + key -> SecureSetting.simpleString(key, Property.NodeScope) + ); + + /** The role ARN (Amazon Resource Name) for connecting to s3. */ + static final Setting.AffixSetting ROLE_ARN_SETTING = Setting.affixKeySetting( + PREFIX, + "role_arn", + key -> SecureSetting.secureString(key, null) + ); + + /** The role session name for connecting to s3. */ + static final Setting.AffixSetting ROLE_SESSION_NAME_SETTING = Setting.affixKeySetting( + PREFIX, + "role_session_name", + key -> SecureSetting.secureString(key, null) + ); + /** The access key (ie login id) for connecting to s3. */ static final Setting.AffixSetting ACCESS_KEY_SETTING = Setting.affixKeySetting( PREFIX, @@ -189,6 +212,9 @@ final class S3ClientSettings { /** Credentials to authenticate with s3. */ final S3BasicCredentials credentials; + /** Credentials to authenticate with s3 using IAM Roles for Service Accounts (IRSA). */ + final IrsaCredentials irsaCredentials; + /** The s3 endpoint the client should talk to, or empty string to use the default. */ final String endpoint; @@ -221,6 +247,7 @@ final class S3ClientSettings { private S3ClientSettings( S3BasicCredentials credentials, + IrsaCredentials irsaCredentials, String endpoint, Protocol protocol, int readTimeoutMillis, @@ -233,6 +260,7 @@ private S3ClientSettings( ProxySettings proxySettings ) { this.credentials = credentials; + this.irsaCredentials = irsaCredentials; this.endpoint = endpoint; this.protocol = protocol; this.readTimeoutMillis = readTimeoutMillis; @@ -301,6 +329,7 @@ S3ClientSettings refine(Settings repositorySettings) { validateInetAddressFor(newProxyHost); return new S3ClientSettings( newCredentials, + irsaCredentials, newEndpoint, newProtocol, newReadTimeoutMillis, @@ -396,12 +425,27 @@ private static S3BasicCredentials loadCredentials(Settings settings, String clie } } + private static IrsaCredentials loadIrsaCredentials(Settings settings, String clientName) { + String identityTokenFile = getConfigValue(settings, clientName, IDENTITY_TOKEN_FILE_SETTING); + try ( + SecureString roleArn = getConfigValue(settings, clientName, ROLE_ARN_SETTING); + SecureString roleSessionName = getConfigValue(settings, clientName, ROLE_SESSION_NAME_SETTING) + ) { + if (identityTokenFile.length() != 0 || roleArn.length() != 0 || roleSessionName.length() != 0) { + return new IrsaCredentials(identityTokenFile.toString(), roleArn.toString(), roleSessionName.toString()); + } + + return null; + } + } + // pkg private for tests /** Parse settings for a single client. */ static S3ClientSettings getClientSettings(final Settings settings, final String clientName) { final Protocol awsProtocol = getConfigValue(settings, clientName, PROTOCOL_SETTING); return new S3ClientSettings( S3ClientSettings.loadCredentials(settings, clientName), + S3ClientSettings.loadIrsaCredentials(settings, clientName), getConfigValue(settings, clientName, ENDPOINT_SETTING), awsProtocol, Math.toIntExact(getConfigValue(settings, clientName, READ_TIMEOUT_SETTING).millis()), @@ -482,7 +526,8 @@ public boolean equals(final Object o) { && proxySettings.equals(that.proxySettings) && Objects.equals(disableChunkedEncoding, that.disableChunkedEncoding) && Objects.equals(region, that.region) - && Objects.equals(signerOverride, that.signerOverride); + && Objects.equals(signerOverride, that.signerOverride) + && Objects.equals(irsaCredentials, that.irsaCredentials); } @Override @@ -512,4 +557,51 @@ private static T getRepoSettingOrDefault(Setting.AffixSetting setting, Se } return defaultValue; } + + /** + * Class to store IAM Roles for Service Accounts (IRSA) credentials + * See please: https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html + */ + static class IrsaCredentials { + private final String identityTokenFile; + private final String roleArn; + private final String roleSessionName; + + IrsaCredentials(String identityTokenFile, String roleArn, String roleSessionName) { + this.identityTokenFile = Strings.isNullOrEmpty(identityTokenFile) ? null : identityTokenFile; + this.roleArn = Strings.isNullOrEmpty(roleArn) ? null : roleArn; + this.roleSessionName = Strings.isNullOrEmpty(roleSessionName) ? "s3-sdk-java-" + System.currentTimeMillis() : roleSessionName; + } + + public String getIdentityTokenFile() { + return identityTokenFile; + } + + public String getRoleArn() { + return roleArn; + } + + public String getRoleSessionName() { + return roleSessionName; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final IrsaCredentials that = (IrsaCredentials) o; + return Objects.equals(identityTokenFile, that.identityTokenFile) + && Objects.equals(roleArn, that.roleArn) + && Objects.equals(roleSessionName, that.roleSessionName); + } + + @Override + public int hashCode() { + return Objects.hash(identityTokenFile, roleArn, roleSessionName); + } + } } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java index 679243b28cfc7..e1ea31dc53d1e 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java @@ -132,7 +132,10 @@ public List> getSettings() { S3Repository.ACCESS_KEY_SETTING, S3Repository.SECRET_KEY_SETTING, S3ClientSettings.SIGNER_OVERRIDE, - S3ClientSettings.REGION + S3ClientSettings.REGION, + S3ClientSettings.ROLE_ARN_SETTING, + S3ClientSettings.IDENTITY_TOKEN_FILE_SETTING, + S3ClientSettings.ROLE_SESSION_NAME_SETTING ); } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java index 3ce19378ac05c..6919549874445 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java @@ -35,8 +35,11 @@ import com.amazonaws.ClientConfiguration; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.auth.AWSSessionCredentialsProvider; import com.amazonaws.auth.AWSStaticCredentialsProvider; import com.amazonaws.auth.EC2ContainerCredentialsProviderWrapper; +import com.amazonaws.auth.STSAssumeRoleSessionCredentialsProvider; +import com.amazonaws.auth.STSAssumeRoleWithWebIdentitySessionCredentialsProvider; import com.amazonaws.client.builder.AwsClientBuilder; import com.amazonaws.http.IdleConnectionReaper; import com.amazonaws.http.SystemPropertyTlsKeyManagersProvider; @@ -45,6 +48,8 @@ import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.amazonaws.services.s3.internal.Constants; +import com.amazonaws.services.securitytoken.AWSSecurityTokenService; +import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClientBuilder; import org.apache.http.conn.ssl.DefaultHostnameVerifier; import org.apache.http.conn.ssl.SSLConnectionSocketFactory; @@ -52,9 +57,11 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.common.Nullable; import org.opensearch.common.Strings; import org.opensearch.common.collect.MapBuilder; import org.opensearch.common.settings.Settings; +import org.opensearch.repositories.s3.S3ClientSettings.IrsaCredentials; import javax.net.ssl.SSLContext; import java.io.Closeable; @@ -67,6 +74,9 @@ import java.security.SecureRandom; import java.util.Map; +import static com.amazonaws.SDKGlobalConfiguration.AWS_ROLE_ARN_ENV_VAR; +import static com.amazonaws.SDKGlobalConfiguration.AWS_ROLE_SESSION_NAME_ENV_VAR; +import static com.amazonaws.SDKGlobalConfiguration.AWS_WEB_IDENTITY_ENV_VAR; import static java.util.Collections.emptyMap; class S3Service implements Closeable { @@ -163,9 +173,11 @@ S3ClientSettings settings(RepositoryMetadata repositoryMetadata) { } // proxy for testing - AmazonS3 buildClient(final S3ClientSettings clientSettings) { + AmazonS3WithCredentials buildClient(final S3ClientSettings clientSettings) { final AmazonS3ClientBuilder builder = AmazonS3ClientBuilder.standard(); - builder.withCredentials(buildCredentials(logger, clientSettings)); + + final AWSCredentialsProvider credentials = buildCredentials(logger, clientSettings); + builder.withCredentials(credentials); builder.withClientConfiguration(buildConfiguration(clientSettings)); String endpoint = Strings.hasLength(clientSettings.endpoint) ? clientSettings.endpoint : Constants.S3_HOSTNAME; @@ -192,7 +204,8 @@ AmazonS3 buildClient(final S3ClientSettings clientSettings) { if (clientSettings.disableChunkedEncoding) { builder.disableChunkedEncoding(); } - return SocketAccess.doPrivileged(builder::build); + final AmazonS3 client = SocketAccess.doPrivileged(builder::build); + return AmazonS3WithCredentials.create(client, credentials); } // pkg private for tests @@ -258,24 +271,83 @@ public Socket createSocket(final HttpContext ctx) throws IOException { // pkg private for tests static AWSCredentialsProvider buildCredentials(Logger logger, S3ClientSettings clientSettings) { - final S3BasicCredentials credentials = clientSettings.credentials; - if (credentials == null) { + final S3BasicCredentials basicCredentials = clientSettings.credentials; + final IrsaCredentials irsaCredentials = buildFromEnviroment(clientSettings.irsaCredentials); + + // If IAM Roles for Service Accounts (IRSA) credentials are configured, start with them first + if (irsaCredentials != null) { + logger.debug("Using IRSA credentials"); + + AWSSecurityTokenService securityTokenService = null; + final String region = Strings.hasLength(clientSettings.region) ? clientSettings.region : null; + if (region != null || basicCredentials != null) { + securityTokenService = SocketAccess.doPrivileged( + () -> AWSSecurityTokenServiceClientBuilder.standard() + .withCredentials((basicCredentials != null) ? new AWSStaticCredentialsProvider(basicCredentials) : null) + .withRegion(region) + .build() + ); + } + + if (irsaCredentials.getIdentityTokenFile() == null) { + return new PrivilegedSTSAssumeRoleSessionCredentialsProvider<>( + securityTokenService, + new STSAssumeRoleSessionCredentialsProvider.Builder(irsaCredentials.getRoleArn(), irsaCredentials.getRoleSessionName()) + .withStsClient(securityTokenService) + .build() + ); + } else { + return new PrivilegedSTSAssumeRoleSessionCredentialsProvider<>( + securityTokenService, + new STSAssumeRoleWithWebIdentitySessionCredentialsProvider.Builder( + irsaCredentials.getRoleArn(), + irsaCredentials.getRoleSessionName(), + irsaCredentials.getIdentityTokenFile() + ).withStsClient(securityTokenService).build() + ); + } + } else if (basicCredentials != null) { + logger.debug("Using basic key/secret credentials"); + return new AWSStaticCredentialsProvider(basicCredentials); + } else { logger.debug("Using instance profile credentials"); return new PrivilegedInstanceProfileCredentialsProvider(); - } else { - logger.debug("Using basic key/secret credentials"); - return new AWSStaticCredentialsProvider(credentials); } } + private static IrsaCredentials buildFromEnviroment(IrsaCredentials defaults) { + if (defaults == null) { + return null; + } + + String webIdentityTokenFile = defaults.getIdentityTokenFile(); + if (webIdentityTokenFile == null) { + webIdentityTokenFile = System.getenv(AWS_WEB_IDENTITY_ENV_VAR); + } + + String roleArn = defaults.getRoleArn(); + if (roleArn == null) { + roleArn = System.getenv(AWS_ROLE_ARN_ENV_VAR); + } + + String roleSessionName = defaults.getRoleSessionName(); + if (roleSessionName == null) { + roleSessionName = System.getenv(AWS_ROLE_SESSION_NAME_ENV_VAR); + } + + return new IrsaCredentials(webIdentityTokenFile, roleArn, roleSessionName); + } + private synchronized void releaseCachedClients() { // the clients will shutdown when they will not be used anymore for (final AmazonS3Reference clientReference : clientsCache.values()) { clientReference.decRef(); } + // clear previously cached clients, they will be build lazily clientsCache = emptyMap(); derivedClientSettings = emptyMap(); + // shutdown IdleConnectionReaper background thread // it will be restarted on new client usage IdleConnectionReaper.shutdown(); @@ -300,6 +372,43 @@ public void refresh() { } } + static class PrivilegedSTSAssumeRoleSessionCredentialsProvider

+ implements + AWSCredentialsProvider, + Closeable { + private final P credentials; + private final AWSSecurityTokenService securityTokenService; + + private PrivilegedSTSAssumeRoleSessionCredentialsProvider( + @Nullable final AWSSecurityTokenService securityTokenService, + final P credentials + ) { + this.securityTokenService = securityTokenService; + this.credentials = credentials; + } + + @Override + public AWSCredentials getCredentials() { + return SocketAccess.doPrivileged(credentials::getCredentials); + } + + @Override + public void refresh() { + SocketAccess.doPrivilegedVoid(credentials::refresh); + } + + @Override + public void close() throws IOException { + SocketAccess.doPrivilegedIOException(() -> { + credentials.close(); + if (securityTokenService != null) { + securityTokenService.shutdown(); + } + return null; + }); + }; + } + @Override public void close() { releaseCachedClients(); diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/AwsS3ServiceImplTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/AwsS3ServiceImplTests.java index 38d9ebf337731..76bd5d303e5fb 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/AwsS3ServiceImplTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/AwsS3ServiceImplTests.java @@ -36,11 +36,16 @@ import com.amazonaws.Protocol; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.AWSStaticCredentialsProvider; +import com.amazonaws.http.IdleConnectionReaper; + +import org.junit.AfterClass; import org.opensearch.common.settings.MockSecureSettings; import org.opensearch.common.settings.Settings; import org.opensearch.test.OpenSearchTestCase; +import java.io.Closeable; import java.io.IOException; +import java.util.HashMap; import java.util.Locale; import java.util.Map; @@ -51,6 +56,11 @@ import static org.opensearch.repositories.s3.S3ClientSettings.PROXY_TYPE_SETTING; public class AwsS3ServiceImplTests extends OpenSearchTestCase { + @AfterClass + public static void shutdownIdleConnectionReaper() { + // created by default STS client + IdleConnectionReaper.shutdown(); + } public void testAWSCredentialsDefaultToInstanceProviders() { final String inexistentClientName = randomAlphaOfLength(8).toLowerCase(Locale.ROOT); @@ -86,6 +96,101 @@ public void testAWSCredentialsFromKeystore() { assertThat(defaultCredentialsProvider, instanceOf(S3Service.PrivilegedInstanceProfileCredentialsProvider.class)); } + public void testCredentialsAndIrsaWithIdentityTokenFileCredentialsFromKeystore() throws IOException { + final Map plainSettings = new HashMap<>(); + final MockSecureSettings secureSettings = new MockSecureSettings(); + final String clientNamePrefix = "some_client_name_"; + final int clientsCount = randomIntBetween(0, 4); + for (int i = 0; i < clientsCount; i++) { + final String clientName = clientNamePrefix + i; + secureSettings.setString("s3.client." + clientName + ".role_arn", clientName + "_role_arn"); + + // Use static AWS credentials for tests + secureSettings.setString("s3.client." + clientName + ".access_key", clientName + "_aws_access_key"); + secureSettings.setString("s3.client." + clientName + ".secret_key", clientName + "_aws_secret_key"); + + // Use explicit region setting + plainSettings.put("s3.client." + clientName + ".region", "us-east1"); + plainSettings.put("s3.client." + clientName + ".identity_token_file", clientName + "_identity_token_file"); + } + final Settings settings = Settings.builder().loadFromMap(plainSettings).setSecureSettings(secureSettings).build(); + final Map allClientsSettings = S3ClientSettings.load(settings); + // no less, no more + assertThat(allClientsSettings.size(), is(clientsCount + 1)); // including default + for (int i = 0; i < clientsCount; i++) { + final String clientName = clientNamePrefix + i; + final S3ClientSettings someClientSettings = allClientsSettings.get(clientName); + final AWSCredentialsProvider credentialsProvider = S3Service.buildCredentials(logger, someClientSettings); + assertThat(credentialsProvider, instanceOf(S3Service.PrivilegedSTSAssumeRoleSessionCredentialsProvider.class)); + ((Closeable) credentialsProvider).close(); + } + // test default exists and is an Instance provider + final S3ClientSettings defaultClientSettings = allClientsSettings.get("default"); + final AWSCredentialsProvider defaultCredentialsProvider = S3Service.buildCredentials(logger, defaultClientSettings); + assertThat(defaultCredentialsProvider, instanceOf(S3Service.PrivilegedInstanceProfileCredentialsProvider.class)); + } + + public void testCredentialsAndIrsaCredentialsFromKeystore() throws IOException { + final Map plainSettings = new HashMap<>(); + final MockSecureSettings secureSettings = new MockSecureSettings(); + final String clientNamePrefix = "some_client_name_"; + final int clientsCount = randomIntBetween(0, 4); + for (int i = 0; i < clientsCount; i++) { + final String clientName = clientNamePrefix + i; + secureSettings.setString("s3.client." + clientName + ".role_arn", clientName + "_role_arn"); + secureSettings.setString("s3.client." + clientName + ".role_session_name", clientName + "_role_session_name"); + + // Use static AWS credentials for tests + secureSettings.setString("s3.client." + clientName + ".access_key", clientName + "_aws_access_key"); + secureSettings.setString("s3.client." + clientName + ".secret_key", clientName + "_aws_secret_key"); + + // Use explicit region setting + plainSettings.put("s3.client." + clientName + ".region", "us-east1"); + } + final Settings settings = Settings.builder().loadFromMap(plainSettings).setSecureSettings(secureSettings).build(); + final Map allClientsSettings = S3ClientSettings.load(settings); + // no less, no more + assertThat(allClientsSettings.size(), is(clientsCount + 1)); // including default + for (int i = 0; i < clientsCount; i++) { + final String clientName = clientNamePrefix + i; + final S3ClientSettings someClientSettings = allClientsSettings.get(clientName); + final AWSCredentialsProvider credentialsProvider = S3Service.buildCredentials(logger, someClientSettings); + assertThat(credentialsProvider, instanceOf(S3Service.PrivilegedSTSAssumeRoleSessionCredentialsProvider.class)); + ((Closeable) credentialsProvider).close(); + } + // test default exists and is an Instance provider + final S3ClientSettings defaultClientSettings = allClientsSettings.get("default"); + final AWSCredentialsProvider defaultCredentialsProvider = S3Service.buildCredentials(logger, defaultClientSettings); + assertThat(defaultCredentialsProvider, instanceOf(S3Service.PrivilegedInstanceProfileCredentialsProvider.class)); + } + + public void testIrsaCredentialsFromKeystore() throws IOException { + final Map plainSettings = new HashMap<>(); + final MockSecureSettings secureSettings = new MockSecureSettings(); + final String clientNamePrefix = "some_client_name_"; + final int clientsCount = randomIntBetween(0, 4); + for (int i = 0; i < clientsCount; i++) { + final String clientName = clientNamePrefix + i; + secureSettings.setString("s3.client." + clientName + ".role_arn", clientName + "_role_arn"); + secureSettings.setString("s3.client." + clientName + ".role_session_name", clientName + "_role_session_name"); + } + final Settings settings = Settings.builder().loadFromMap(plainSettings).setSecureSettings(secureSettings).build(); + final Map allClientsSettings = S3ClientSettings.load(settings); + // no less, no more + assertThat(allClientsSettings.size(), is(clientsCount + 1)); // including default + for (int i = 0; i < clientsCount; i++) { + final String clientName = clientNamePrefix + i; + final S3ClientSettings someClientSettings = allClientsSettings.get(clientName); + final AWSCredentialsProvider credentialsProvider = S3Service.buildCredentials(logger, someClientSettings); + assertThat(credentialsProvider, instanceOf(S3Service.PrivilegedSTSAssumeRoleSessionCredentialsProvider.class)); + ((Closeable) credentialsProvider).close(); + } + // test default exists and is an Instance provider + final S3ClientSettings defaultClientSettings = allClientsSettings.get("default"); + final AWSCredentialsProvider defaultCredentialsProvider = S3Service.buildCredentials(logger, defaultClientSettings); + assertThat(defaultCredentialsProvider, instanceOf(S3Service.PrivilegedInstanceProfileCredentialsProvider.class)); + } + public void testSetDefaultCredential() { final MockSecureSettings secureSettings = new MockSecureSettings(); final String awsAccessKey = randomAlphaOfLength(8); diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java index 9c359d67db88b..a30b36cdd659c 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java @@ -317,9 +317,10 @@ public static final class ProxyS3Service extends S3Service { private static final Logger logger = LogManager.getLogger(ProxyS3Service.class); @Override - AmazonS3 buildClient(final S3ClientSettings clientSettings) { - final AmazonS3 client = super.buildClient(clientSettings); - return new ClientAndCredentials(client, buildCredentials(logger, clientSettings)); + AmazonS3WithCredentials buildClient(final S3ClientSettings clientSettings) { + final AmazonS3WithCredentials client = super.buildClient(clientSettings); + final AWSCredentialsProvider credentials = buildCredentials(logger, clientSettings); + return AmazonS3WithCredentials.create(new ClientAndCredentials(client.client(), credentials), credentials); } } diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ClientSettingsTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ClientSettingsTests.java index 462ed5377ff9a..a86ed3af17476 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ClientSettingsTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ClientSettingsTests.java @@ -45,6 +45,7 @@ import java.util.Locale; import java.util.Map; +import static org.hamcrest.CoreMatchers.startsWith; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.emptyString; import static org.hamcrest.Matchers.is; @@ -118,6 +119,52 @@ public void testRejectionOfLoneSessionToken() { assertThat(e.getMessage(), is("Missing access key and secret key for s3 client [default]")); } + public void testIrsaCredentialsTypeWithIdentityTokenFile() { + final Map settings = S3ClientSettings.load( + Settings.builder().put("s3.client.default.identity_token_file", "file").build() + ); + final S3ClientSettings defaultSettings = settings.get("default"); + final S3ClientSettings.IrsaCredentials credentials = defaultSettings.irsaCredentials; + assertThat(credentials.getIdentityTokenFile(), is("file")); + assertThat(credentials.getRoleArn(), is(nullValue())); + assertThat(credentials.getRoleSessionName(), startsWith("s3-sdk-java-")); + } + + public void testIrsaCredentialsTypeRoleArn() { + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("s3.client.default.role_arn", "role"); + final Map settings = S3ClientSettings.load(Settings.builder().setSecureSettings(secureSettings).build()); + final S3ClientSettings defaultSettings = settings.get("default"); + final S3ClientSettings.IrsaCredentials credentials = defaultSettings.irsaCredentials; + assertThat(credentials.getRoleArn(), is("role")); + assertThat(credentials.getRoleSessionName(), startsWith("s3-sdk-java-")); + } + + public void testIrsaCredentialsTypeWithRoleArnAndRoleSessionName() { + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("s3.client.default.role_arn", "role"); + secureSettings.setString("s3.client.default.role_session_name", "session"); + final Map settings = S3ClientSettings.load(Settings.builder().setSecureSettings(secureSettings).build()); + final S3ClientSettings defaultSettings = settings.get("default"); + final S3ClientSettings.IrsaCredentials credentials = defaultSettings.irsaCredentials; + assertThat(credentials.getRoleArn(), is("role")); + assertThat(credentials.getRoleSessionName(), is("session")); + } + + public void testIrsaCredentialsTypeWithRoleArnAndRoleSessionNameAndIdentityTokeFile() { + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("s3.client.default.role_arn", "role"); + secureSettings.setString("s3.client.default.role_session_name", "session"); + final Map settings = S3ClientSettings.load( + Settings.builder().setSecureSettings(secureSettings).put("s3.client.default.identity_token_file", "file").build() + ); + final S3ClientSettings defaultSettings = settings.get("default"); + final S3ClientSettings.IrsaCredentials credentials = defaultSettings.irsaCredentials; + assertThat(credentials.getIdentityTokenFile(), is("file")); + assertThat(credentials.getRoleArn(), is("role")); + assertThat(credentials.getRoleSessionName(), is("session")); + } + public void testCredentialsTypeWithAccessKeyAndSecretKey() { final MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString("s3.client.default.access_key", "access_key"); @@ -199,7 +246,7 @@ public void testRegionCanBeSet() { assertThat(settings.get("default").region, is("")); assertThat(settings.get("other").region, is(region)); try (S3Service s3Service = new S3Service()) { - AmazonS3Client other = (AmazonS3Client) s3Service.buildClient(settings.get("other")); + AmazonS3Client other = (AmazonS3Client) s3Service.buildClient(settings.get("other")).client(); assertThat(other.getSignerRegionOverride(), is(region)); } } diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ServiceTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ServiceTests.java index cb0e76e272b4e..71e42907ab997 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ServiceTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ServiceTests.java @@ -32,10 +32,12 @@ package org.opensearch.repositories.s3; import org.opensearch.cluster.metadata.RepositoryMetadata; - +import org.opensearch.common.settings.MockSecureSettings; import org.opensearch.common.settings.Settings; import org.opensearch.test.OpenSearchTestCase; +import java.util.Map; + public class S3ServiceTests extends OpenSearchTestCase { public void testCachedClientsAreReleased() { @@ -56,4 +58,29 @@ public void testCachedClientsAreReleased() { final S3ClientSettings clientSettingsReloaded = s3Service.settings(metadata1); assertNotSame(clientSettings, clientSettingsReloaded); } + + public void testCachedClientsWithCredentialsAreReleased() { + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("s3.client.default.role_arn", "role"); + final Map defaults = S3ClientSettings.load( + Settings.builder().setSecureSettings(secureSettings).put("s3.client.default.identity_token_file", "file").build() + ); + final S3Service s3Service = new S3Service(); + s3Service.refreshAndClearCache(defaults); + final Settings settings = Settings.builder().put("endpoint", "http://first").put("region", "us-east-2").build(); + final RepositoryMetadata metadata1 = new RepositoryMetadata("first", "s3", settings); + final RepositoryMetadata metadata2 = new RepositoryMetadata("second", "s3", settings); + final S3ClientSettings clientSettings = s3Service.settings(metadata2); + final S3ClientSettings otherClientSettings = s3Service.settings(metadata2); + assertSame(clientSettings, otherClientSettings); + final AmazonS3Reference reference = s3Service.client(metadata1); + reference.close(); + s3Service.close(); + final AmazonS3Reference referenceReloaded = s3Service.client(metadata1); + assertNotSame(referenceReloaded, reference); + referenceReloaded.close(); + s3Service.close(); + final S3ClientSettings clientSettingsReloaded = s3Service.settings(metadata1); + assertNotSame(clientSettings, clientSettingsReloaded); + } } From 0add9d2e2e8e2a3609885e0bca0f5a6e0bbdc4f6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 2 Jun 2022 13:06:50 -0700 Subject: [PATCH 13/34] Bump google-auth-library-oauth2-http from 0.20.0 to 1.7.0 in /plugins/repository-gcs (#3473) * Bump google-auth-library-oauth2-http in /plugins/repository-gcs Bumps google-auth-library-oauth2-http from 0.20.0 to 1.7.0. --- updated-dependencies: - dependency-name: com.google.auth:google-auth-library-oauth2-http dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Use variable to define the version of dependency google-auth-library-java Signed-off-by: Tianli Feng Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Co-authored-by: Tianli Feng --- plugins/repository-gcs/build.gradle | 8 ++++++-- .../google-auth-library-credentials-0.20.0.jar.sha1 | 1 - .../google-auth-library-credentials-1.7.0.jar.sha1 | 1 + .../google-auth-library-oauth2-http-0.20.0.jar.sha1 | 1 - .../google-auth-library-oauth2-http-1.7.0.jar.sha1 | 1 + 5 files changed, 8 insertions(+), 4 deletions(-) delete mode 100644 plugins/repository-gcs/licenses/google-auth-library-credentials-0.20.0.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/google-auth-library-credentials-1.7.0.jar.sha1 delete mode 100644 plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.20.0.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/google-auth-library-oauth2-http-1.7.0.jar.sha1 diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 72964f9444026..92ddc69c89f47 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -49,6 +49,10 @@ opensearchplugin { classname 'org.opensearch.repositories.gcs.GoogleCloudStoragePlugin' } +versions << [ + 'google_auth': '1.7.0' +] + dependencies { api 'com.google.cloud:google-cloud-storage:1.113.1' api 'com.google.cloud:google-cloud-core:2.5.10' @@ -67,8 +71,8 @@ dependencies { api 'com.google.api.grpc:proto-google-common-protos:2.8.0' api 'com.google.api.grpc:proto-google-iam-v1:0.12.0' api 'com.google.cloud:google-cloud-core-http:1.93.3' - api 'com.google.auth:google-auth-library-credentials:0.20.0' - api 'com.google.auth:google-auth-library-oauth2-http:0.20.0' + api "com.google.auth:google-auth-library-credentials:${versions.google_auth}" + api "com.google.auth:google-auth-library-oauth2-http:${versions.google_auth}" api 'com.google.oauth-client:google-oauth-client:1.33.1' api 'com.google.api-client:google-api-client:1.34.0' api 'com.google.http-client:google-http-client-appengine:1.41.8' diff --git a/plugins/repository-gcs/licenses/google-auth-library-credentials-0.20.0.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-credentials-0.20.0.jar.sha1 deleted file mode 100644 index 14cc742737eed..0000000000000 --- a/plugins/repository-gcs/licenses/google-auth-library-credentials-0.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -87a91a373e64ba5c3cdf8cc5cf54b189dd1492f8 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-auth-library-credentials-1.7.0.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-credentials-1.7.0.jar.sha1 new file mode 100644 index 0000000000000..f2e9a4f7283bf --- /dev/null +++ b/plugins/repository-gcs/licenses/google-auth-library-credentials-1.7.0.jar.sha1 @@ -0,0 +1 @@ +b29af5a9ea94e9e7f86bded11e39f5afda5b17e8 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.20.0.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.20.0.jar.sha1 deleted file mode 100644 index 7911c34780cbe..0000000000000 --- a/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f33d4d6c91a68826816606a2208990eea93fcb2a \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-1.7.0.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-1.7.0.jar.sha1 new file mode 100644 index 0000000000000..738645d6b8c7b --- /dev/null +++ b/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-1.7.0.jar.sha1 @@ -0,0 +1 @@ +985d183303dbd4b7ceb348056e41e59677f6f74f \ No newline at end of file From b902add3fab7412e39fa520ed14550f6b6ad3f81 Mon Sep 17 00:00:00 2001 From: Kartik Ganesh Date: Fri, 3 Jun 2022 10:53:58 -0700 Subject: [PATCH 14/34] [Segment Replication] Added source-side classes for orchestrating replication events (#3470) This change expands on the existing SegmentReplicationSource interface and its corresponding Factory class by introducing an implementation where the replication source is a primary shard (PrimaryShardReplicationSource). These code paths execute on the target. The primary shard implementation creates the requests to be send to the source/primary shard. Correspondingly, this change also defines two request classes for the GET_CHECKPOINT_INFO and GET_SEGMENT_FILES requests as well as an abstract superclass. A CopyState class has been introduced that captures point-in-time, file-level details from an IndexShard. This implementation mirrors Lucene's NRT CopyState implementation. Finally, a service class has been introduce for segment replication that runs on the source side (SegmentReplicationSourceService) which handles these two types of incoming requests. This includes private handler classes that house the logic to respond to these requests, with some functionality stubbed for now. The service class also uses a simple map to cache CopyState objects that would be needed by replication targets. Unit tests have been added/updated for all new functionality. Signed-off-by: Kartik Ganesh --- .../org/opensearch/index/engine/Engine.java | 21 +++ .../index/engine/InternalEngine.java | 17 ++ .../index/engine/NRTReplicationEngine.java | 1 + .../index/engine/ReadOnlyEngine.java | 7 + .../opensearch/index/shard/IndexShard.java | 18 ++ .../org/opensearch/index/store/Store.java | 97 +++++++---- .../replication/CheckpointInfoRequest.java | 54 ++++++ .../replication/GetSegmentFilesRequest.java | 60 +++++++ .../PrimaryShardReplicationSource.java | 90 ++++++++++ .../SegmentReplicationSourceFactory.java | 17 +- .../SegmentReplicationSourceService.java | 160 +++++++++++++++++ .../indices/replication/common/CopyState.java | 103 +++++++++++ .../SegmentReplicationTransportRequest.java | 49 ++++++ .../index/engine/EngineConfigTests.java | 108 ++++++++++++ .../index/engine/InternalEngineTests.java | 31 ++++ .../index/engine/ReadOnlyEngineTests.java | 3 + .../opensearch/index/store/StoreTests.java | 29 +++- .../PeerRecoveryTargetServiceTests.java | 2 +- .../recovery/RecoverySourceHandlerTests.java | 8 +- .../PrimaryShardReplicationSourceTests.java | 139 +++++++++++++++ .../SegmentReplicationSourceServiceTests.java | 161 ++++++++++++++++++ .../replication/common/CopyStateTests.java | 80 +++++++++ 22 files changed, 1204 insertions(+), 51 deletions(-) create mode 100644 server/src/main/java/org/opensearch/indices/replication/CheckpointInfoRequest.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/GetSegmentFilesRequest.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/common/CopyState.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/common/SegmentReplicationTransportRequest.java create mode 100644 server/src/test/java/org/opensearch/index/engine/EngineConfigTests.java create mode 100644 server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java create mode 100644 server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java create mode 100644 server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java diff --git a/server/src/main/java/org/opensearch/index/engine/Engine.java b/server/src/main/java/org/opensearch/index/engine/Engine.java index c242d98b4b65c..4829148322b31 100644 --- a/server/src/main/java/org/opensearch/index/engine/Engine.java +++ b/server/src/main/java/org/opensearch/index/engine/Engine.java @@ -175,6 +175,21 @@ public final EngineConfig config() { */ protected abstract SegmentInfos getLatestSegmentInfos(); + /** + * In contrast to {@link #getLatestSegmentInfos()}, which returns a {@link SegmentInfos} + * object directly, this method returns a {@link GatedCloseable} reference to the same object. + * This allows the engine to include a clean-up {@link org.opensearch.common.CheckedRunnable} + * which is run when the reference is closed. The default implementation of the clean-up + * procedure is a no-op. + * + * @return {@link GatedCloseable} - A wrapper around a {@link SegmentInfos} instance that + * must be closed for segment files to be deleted. + */ + public GatedCloseable getSegmentInfosSnapshot() { + // default implementation + return new GatedCloseable<>(getLatestSegmentInfos(), () -> {}); + } + public MergeStats getMergeStats() { return new MergeStats(); } @@ -846,6 +861,12 @@ public final CommitStats commitStats() { */ public abstract long getPersistedLocalCheckpoint(); + /** + * @return the latest checkpoint that has been processed but not necessarily persisted. + * Also see {@link #getPersistedLocalCheckpoint()} + */ + public abstract long getProcessedLocalCheckpoint(); + /** * @return a {@link SeqNoStats} object, using local state and the supplied global checkpoint */ diff --git a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java index e60e650372ec4..b63a39ebb1222 100644 --- a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java @@ -2305,6 +2305,22 @@ public SegmentInfos getLatestSegmentInfos() { } } + /** + * Fetch the latest {@link SegmentInfos} object via {@link #getLatestSegmentInfos()} + * but also increment the ref-count to ensure that these segment files are retained + * until the reference is closed. On close, the ref-count is decremented. + */ + @Override + public GatedCloseable getSegmentInfosSnapshot() { + final SegmentInfos segmentInfos = getLatestSegmentInfos(); + try { + indexWriter.incRefDeleter(segmentInfos); + } catch (IOException e) { + throw new EngineException(shardId, e.getMessage(), e); + } + return new GatedCloseable<>(segmentInfos, () -> indexWriter.decRefDeleter(segmentInfos)); + } + @Override protected final void writerSegmentStats(SegmentsStats stats) { stats.addVersionMapMemoryInBytes(versionMap.ramBytesUsed()); @@ -2724,6 +2740,7 @@ public long getLastSyncedGlobalCheckpoint() { return getTranslog().getLastSyncedGlobalCheckpoint(); } + @Override public long getProcessedLocalCheckpoint() { return localCheckpointTracker.getProcessedCheckpoint(); } diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java index 106643198cc3b..e4f4bbbba8f16 100644 --- a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java @@ -248,6 +248,7 @@ public long getPersistedLocalCheckpoint() { return localCheckpointTracker.getPersistedCheckpoint(); } + @Override public long getProcessedLocalCheckpoint() { return localCheckpointTracker.getProcessedCheckpoint(); } diff --git a/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java index 23a86d8da5599..6262a9269c01c 100644 --- a/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java @@ -374,6 +374,13 @@ public long getPersistedLocalCheckpoint() { return seqNoStats.getLocalCheckpoint(); } + @Override + public long getProcessedLocalCheckpoint() { + // the read-only engine does not process checkpoints, so its + // processed checkpoint is identical to its persisted one. + return getPersistedLocalCheckpoint(); + } + @Override public SeqNoStats getSeqNoStats(long globalCheckpoint) { return new SeqNoStats(seqNoStats.getMaxSeqNo(), seqNoStats.getLocalCheckpoint(), globalCheckpoint); diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 995a92e94aeb3..5d11c34ca205c 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -2638,6 +2638,14 @@ public long getLocalCheckpoint() { return getEngine().getPersistedLocalCheckpoint(); } + /** + * Fetch the latest checkpoint that has been processed but not necessarily persisted. + * Also see {@link #getLocalCheckpoint()}. + */ + public long getProcessedLocalCheckpoint() { + return getEngine().getProcessedLocalCheckpoint(); + } + /** * Returns the global checkpoint for the shard. * @@ -4005,4 +4013,14 @@ public void verifyShardBeforeIndexClosing() throws IllegalStateException { RetentionLeaseSyncer getRetentionLeaseSyncer() { return retentionLeaseSyncer; } + + /** + * Fetch the latest SegmentInfos held by the shard's underlying Engine, wrapped + * by a a {@link GatedCloseable} to ensure files are not deleted/merged away. + * + * @throws EngineException - When segment infos cannot be safely retrieved + */ + public GatedCloseable getSegmentInfosSnapshot() { + return getEngine().getSegmentInfosSnapshot(); + } } diff --git a/server/src/main/java/org/opensearch/index/store/Store.java b/server/src/main/java/org/opensearch/index/store/Store.java index 65c47f66b7654..f818456c3a2c8 100644 --- a/server/src/main/java/org/opensearch/index/store/Store.java +++ b/server/src/main/java/org/opensearch/index/store/Store.java @@ -274,6 +274,13 @@ public MetadataSnapshot getMetadata(IndexCommit commit) throws IOException { return getMetadata(commit, false); } + /** + * Convenience wrapper around the {@link #getMetadata(IndexCommit)} method for null input. + */ + public MetadataSnapshot getMetadata() throws IOException { + return getMetadata(null, false); + } + /** * Returns a new MetadataSnapshot for the given commit. If the given commit is null * the latest commit point is used. @@ -315,6 +322,16 @@ public MetadataSnapshot getMetadata(IndexCommit commit, boolean lockDirectory) t } } + /** + * Returns a new {@link MetadataSnapshot} for the given {@link SegmentInfos} object. + * In contrast to {@link #getMetadata(IndexCommit)}, this method is useful for scenarios + * where we need to construct a MetadataSnapshot from an in-memory SegmentInfos object that + * may not have a IndexCommit associated with it, such as with segment replication. + */ + public MetadataSnapshot getMetadata(SegmentInfos segmentInfos) throws IOException { + return new MetadataSnapshot(segmentInfos, directory, logger); + } + /** * Renames all the given files from the key of the map to the * value of the map. All successfully renamed files are removed from the map in-place. @@ -477,7 +494,7 @@ public static MetadataSnapshot readMetadataSnapshot( Directory dir = new NIOFSDirectory(indexLocation) ) { failIfCorrupted(dir); - return new MetadataSnapshot(null, dir, logger); + return new MetadataSnapshot((IndexCommit) null, dir, logger); } catch (IndexNotFoundException ex) { // that's fine - happens all the time no need to log } catch (FileNotFoundException | NoSuchFileException ex) { @@ -682,7 +699,7 @@ public void cleanupAndVerify(String reason, MetadataSnapshot sourceMetadata) thr } } directory.syncMetaData(); - final Store.MetadataSnapshot metadataOrEmpty = getMetadata(null); + final Store.MetadataSnapshot metadataOrEmpty = getMetadata(); verifyAfterCleanup(sourceMetadata, metadataOrEmpty); } finally { metadataLock.writeLock().unlock(); @@ -822,7 +839,14 @@ public MetadataSnapshot(Map metadata, Map builder = new HashMap<>(); - Map commitUserDataBuilder = new HashMap<>(); try { final SegmentInfos segmentCommitInfos = Store.readSegmentsInfo(commit, directory); - numDocs = Lucene.getNumDocs(segmentCommitInfos); - commitUserDataBuilder.putAll(segmentCommitInfos.getUserData()); - // we don't know which version was used to write so we take the max version. - Version maxVersion = segmentCommitInfos.getMinSegmentLuceneVersion(); - for (SegmentCommitInfo info : segmentCommitInfos) { - final Version version = info.info.getVersion(); - if (version == null) { - // version is written since 3.1+: we should have already hit IndexFormatTooOld. - throw new IllegalArgumentException("expected valid version value: " + info.info.toString()); - } - if (version.onOrAfter(maxVersion)) { - maxVersion = version; - } - for (String file : info.files()) { - checksumFromLuceneFile( - directory, - file, - builder, - logger, - version, - SEGMENT_INFO_EXTENSION.equals(IndexFileNames.getExtension(file)) - ); - } - } - if (maxVersion == null) { - maxVersion = org.opensearch.Version.CURRENT.minimumIndexCompatibilityVersion().luceneVersion; - } - final String segmentsFile = segmentCommitInfos.getSegmentsFileName(); - checksumFromLuceneFile(directory, segmentsFile, builder, logger, maxVersion, true); + return loadMetadata(segmentCommitInfos, directory, logger); } catch (CorruptIndexException | IndexNotFoundException | IndexFormatTooOldException | IndexFormatTooNewException ex) { // we either know the index is corrupted or it's just not there throw ex; @@ -949,6 +942,40 @@ static LoadedMetadata loadMetadata(IndexCommit commit, Directory directory, Logg } throw ex; } + } + + static LoadedMetadata loadMetadata(SegmentInfos segmentInfos, Directory directory, Logger logger) throws IOException { + long numDocs = Lucene.getNumDocs(segmentInfos); + Map commitUserDataBuilder = new HashMap<>(); + commitUserDataBuilder.putAll(segmentInfos.getUserData()); + Map builder = new HashMap<>(); + // we don't know which version was used to write so we take the max version. + Version maxVersion = segmentInfos.getMinSegmentLuceneVersion(); + for (SegmentCommitInfo info : segmentInfos) { + final Version version = info.info.getVersion(); + if (version == null) { + // version is written since 3.1+: we should have already hit IndexFormatTooOld. + throw new IllegalArgumentException("expected valid version value: " + info.info.toString()); + } + if (version.onOrAfter(maxVersion)) { + maxVersion = version; + } + for (String file : info.files()) { + checksumFromLuceneFile( + directory, + file, + builder, + logger, + version, + SEGMENT_INFO_EXTENSION.equals(IndexFileNames.getExtension(file)) + ); + } + } + if (maxVersion == null) { + maxVersion = org.opensearch.Version.CURRENT.minimumIndexCompatibilityVersion().luceneVersion; + } + final String segmentsFile = segmentInfos.getSegmentsFileName(); + checksumFromLuceneFile(directory, segmentsFile, builder, logger, maxVersion, true); return new LoadedMetadata(unmodifiableMap(builder), unmodifiableMap(commitUserDataBuilder), numDocs); } diff --git a/server/src/main/java/org/opensearch/indices/replication/CheckpointInfoRequest.java b/server/src/main/java/org/opensearch/indices/replication/CheckpointInfoRequest.java new file mode 100644 index 0000000000000..188a4c1e40fa7 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/CheckpointInfoRequest.java @@ -0,0 +1,54 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.indices.replication.common.SegmentReplicationTransportRequest; + +import java.io.IOException; + +/** + * Request object for fetching segment metadata for a {@link ReplicationCheckpoint} from + * a {@link SegmentReplicationSource}. This object is created by the target node and sent + * to the source node. + * + * @opensearch.internal + */ +public class CheckpointInfoRequest extends SegmentReplicationTransportRequest { + + private final ReplicationCheckpoint checkpoint; + + public CheckpointInfoRequest(StreamInput in) throws IOException { + super(in); + checkpoint = new ReplicationCheckpoint(in); + } + + public CheckpointInfoRequest( + long replicationId, + String targetAllocationId, + DiscoveryNode targetNode, + ReplicationCheckpoint checkpoint + ) { + super(replicationId, targetAllocationId, targetNode); + this.checkpoint = checkpoint; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + checkpoint.writeTo(out); + } + + public ReplicationCheckpoint getCheckpoint() { + return checkpoint; + } +} diff --git a/server/src/main/java/org/opensearch/indices/replication/GetSegmentFilesRequest.java b/server/src/main/java/org/opensearch/indices/replication/GetSegmentFilesRequest.java new file mode 100644 index 0000000000000..21749d3fe7d8a --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/GetSegmentFilesRequest.java @@ -0,0 +1,60 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.indices.replication.common.SegmentReplicationTransportRequest; + +import java.io.IOException; +import java.util.List; + +/** + * Request object for fetching a list of segment files metadata from a {@link SegmentReplicationSource}. + * This object is created by the target node and sent to the source node. + * + * @opensearch.internal + */ +public class GetSegmentFilesRequest extends SegmentReplicationTransportRequest { + + private final List filesToFetch; + private final ReplicationCheckpoint checkpoint; + + public GetSegmentFilesRequest(StreamInput in) throws IOException { + super(in); + this.filesToFetch = in.readList(StoreFileMetadata::new); + this.checkpoint = new ReplicationCheckpoint(in); + } + + public GetSegmentFilesRequest( + long replicationId, + String targetAllocationId, + DiscoveryNode targetNode, + List filesToFetch, + ReplicationCheckpoint checkpoint + ) { + super(replicationId, targetAllocationId, targetNode); + this.filesToFetch = filesToFetch; + this.checkpoint = checkpoint; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeList(filesToFetch); + checkpoint.writeTo(out); + } + + public ReplicationCheckpoint getCheckpoint() { + return checkpoint; + } +} diff --git a/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java new file mode 100644 index 0000000000000..08dc0b97b31d5 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java @@ -0,0 +1,90 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.ActionListener; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.index.store.Store; +import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.indices.recovery.RetryableTransportClient; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.transport.TransportService; + +import java.util.List; + +import static org.opensearch.indices.replication.SegmentReplicationSourceService.Actions.GET_CHECKPOINT_INFO; +import static org.opensearch.indices.replication.SegmentReplicationSourceService.Actions.GET_SEGMENT_FILES; + +/** + * Implementation of a {@link SegmentReplicationSource} where the source is a primary node. + * This code executes on the target node. + * + * @opensearch.internal + */ +public class PrimaryShardReplicationSource implements SegmentReplicationSource { + + private static final Logger logger = LogManager.getLogger(PrimaryShardReplicationSource.class); + + private final RetryableTransportClient transportClient; + private final DiscoveryNode targetNode; + private final String targetAllocationId; + + public PrimaryShardReplicationSource( + DiscoveryNode targetNode, + String targetAllocationId, + TransportService transportService, + RecoverySettings recoverySettings, + DiscoveryNode sourceNode + ) { + this.targetAllocationId = targetAllocationId; + this.transportClient = new RetryableTransportClient( + transportService, + sourceNode, + recoverySettings.internalActionRetryTimeout(), + logger + ); + this.targetNode = targetNode; + } + + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener listener + ) { + final Writeable.Reader reader = CheckpointInfoResponse::new; + final ActionListener responseListener = ActionListener.map(listener, r -> r); + final CheckpointInfoRequest request = new CheckpointInfoRequest(replicationId, targetAllocationId, targetNode, checkpoint); + transportClient.executeRetryableAction(GET_CHECKPOINT_INFO, request, responseListener, reader); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List filesToFetch, + Store store, + ActionListener listener + ) { + final Writeable.Reader reader = GetSegmentFilesResponse::new; + final ActionListener responseListener = ActionListener.map(listener, r -> r); + final GetSegmentFilesRequest request = new GetSegmentFilesRequest( + replicationId, + targetAllocationId, + targetNode, + filesToFetch, + checkpoint + ); + transportClient.executeRetryableAction(GET_SEGMENT_FILES, request, responseListener, reader); + } +} diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java index 3ca31503f176d..afbb80d263805 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java @@ -8,8 +8,11 @@ package org.opensearch.indices.replication; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.service.ClusterService; import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.ShardId; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.transport.TransportService; @@ -35,7 +38,17 @@ public SegmentReplicationSourceFactory( } public SegmentReplicationSource get(IndexShard shard) { - // TODO: Default to an implementation that uses the primary shard. - return null; + return new PrimaryShardReplicationSource( + clusterService.localNode(), + shard.routingEntry().allocationId().getId(), + transportService, + recoverySettings, + getPrimaryNode(shard.shardId()) + ); + } + + private DiscoveryNode getPrimaryNode(ShardId shardId) { + ShardRouting primaryShard = clusterService.state().routingTable().shardRoutingTable(shardId).primaryShard(); + return clusterService.state().nodes().get(primaryShard.currentNodeId()); } } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java new file mode 100644 index 0000000000000..9f70120dedd6c --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java @@ -0,0 +1,160 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.index.IndexService; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.ShardId; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.indices.replication.common.CopyState; +import org.opensearch.tasks.Task; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportChannel; +import org.opensearch.transport.TransportRequestHandler; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +/** + * Service class that handles segment replication requests from replica shards. + * Typically, the "source" is a primary shard. This code executes on the source node. + * + * @opensearch.internal + */ +public class SegmentReplicationSourceService { + + private static final Logger logger = LogManager.getLogger(SegmentReplicationSourceService.class); + + /** + * Internal actions used by the segment replication source service on the primary shard + * + * @opensearch.internal + */ + public static class Actions { + public static final String GET_CHECKPOINT_INFO = "internal:index/shard/replication/get_checkpoint_info"; + public static final String GET_SEGMENT_FILES = "internal:index/shard/replication/get_segment_files"; + } + + private final Map copyStateMap; + private final TransportService transportService; + private final IndicesService indicesService; + + // TODO mark this as injected and bind in Node + public SegmentReplicationSourceService(TransportService transportService, IndicesService indicesService) { + copyStateMap = Collections.synchronizedMap(new HashMap<>()); + this.transportService = transportService; + this.indicesService = indicesService; + + transportService.registerRequestHandler( + Actions.GET_CHECKPOINT_INFO, + ThreadPool.Names.GENERIC, + CheckpointInfoRequest::new, + new CheckpointInfoRequestHandler() + ); + transportService.registerRequestHandler( + Actions.GET_SEGMENT_FILES, + ThreadPool.Names.GENERIC, + GetSegmentFilesRequest::new, + new GetSegmentFilesRequestHandler() + ); + } + + private class CheckpointInfoRequestHandler implements TransportRequestHandler { + @Override + public void messageReceived(CheckpointInfoRequest request, TransportChannel channel, Task task) throws Exception { + final ReplicationCheckpoint checkpoint = request.getCheckpoint(); + logger.trace("Received request for checkpoint {}", checkpoint); + final CopyState copyState = getCachedCopyState(checkpoint); + channel.sendResponse( + new CheckpointInfoResponse( + copyState.getCheckpoint(), + copyState.getMetadataSnapshot(), + copyState.getInfosBytes(), + copyState.getPendingDeleteFiles() + ) + ); + } + } + + class GetSegmentFilesRequestHandler implements TransportRequestHandler { + @Override + public void messageReceived(GetSegmentFilesRequest request, TransportChannel channel, Task task) throws Exception { + if (isInCopyStateMap(request.getCheckpoint())) { + // TODO send files + } else { + // Return an empty list of files + channel.sendResponse(new GetSegmentFilesResponse(Collections.emptyList())); + } + } + } + + /** + * Operations on the {@link #copyStateMap} member. + */ + + /** + * A synchronized method that checks {@link #copyStateMap} for the given {@link ReplicationCheckpoint} key + * and returns the cached value if one is present. If the key is not present, a {@link CopyState} + * object is constructed and stored in the map before being returned. + */ + private synchronized CopyState getCachedCopyState(ReplicationCheckpoint checkpoint) throws IOException { + if (isInCopyStateMap(checkpoint)) { + final CopyState copyState = fetchFromCopyStateMap(checkpoint); + copyState.incRef(); + return copyState; + } else { + // From the checkpoint's shard ID, fetch the IndexShard + ShardId shardId = checkpoint.getShardId(); + final IndexService indexService = indicesService.indexService(shardId.getIndex()); + final IndexShard indexShard = indexService.getShard(shardId.id()); + // build the CopyState object and cache it before returning + final CopyState copyState = new CopyState(indexShard); + + /** + * Use the checkpoint from the request as the key in the map, rather than + * the checkpoint from the created CopyState. This maximizes cache hits + * if replication targets make a request with an older checkpoint. + * Replication targets are expected to fetch the checkpoint in the response + * CopyState to bring themselves up to date. + */ + addToCopyStateMap(checkpoint, copyState); + return copyState; + } + } + + /** + * Adds the input {@link CopyState} object to {@link #copyStateMap}. + * The key is the CopyState's {@link ReplicationCheckpoint} object. + */ + private void addToCopyStateMap(ReplicationCheckpoint checkpoint, CopyState copyState) { + copyStateMap.putIfAbsent(checkpoint, copyState); + } + + /** + * Given a {@link ReplicationCheckpoint}, return the corresponding + * {@link CopyState} object, if any, from {@link #copyStateMap}. + */ + private CopyState fetchFromCopyStateMap(ReplicationCheckpoint replicationCheckpoint) { + return copyStateMap.get(replicationCheckpoint); + } + + /** + * Checks if the {@link #copyStateMap} has the input {@link ReplicationCheckpoint} + * as a key by invoking {@link Map#containsKey(Object)}. + */ + private boolean isInCopyStateMap(ReplicationCheckpoint replicationCheckpoint) { + return copyStateMap.containsKey(replicationCheckpoint); + } +} diff --git a/server/src/main/java/org/opensearch/indices/replication/common/CopyState.java b/server/src/main/java/org/opensearch/indices/replication/common/CopyState.java new file mode 100644 index 0000000000000..250df3481435a --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/common/CopyState.java @@ -0,0 +1,103 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication.common; + +import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.store.ByteBuffersDataOutput; +import org.apache.lucene.store.ByteBuffersIndexOutput; +import org.opensearch.common.concurrent.GatedCloseable; +import org.opensearch.common.util.concurrent.AbstractRefCounted; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.store.Store; +import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.HashSet; +import java.util.Set; + +/** + * An Opensearch-specific version of Lucene's CopyState class that + * holds incRef'd file level details for one point-in-time segment infos. + * + * @opensearch.internal + */ +public class CopyState extends AbstractRefCounted { + + private final GatedCloseable segmentInfosRef; + private final ReplicationCheckpoint replicationCheckpoint; + private final Store.MetadataSnapshot metadataSnapshot; + private final HashSet pendingDeleteFiles; + private final byte[] infosBytes; + private GatedCloseable commitRef; + + public CopyState(IndexShard shard) throws IOException { + super("CopyState-" + shard.shardId()); + this.segmentInfosRef = shard.getSegmentInfosSnapshot(); + SegmentInfos segmentInfos = this.segmentInfosRef.get(); + this.metadataSnapshot = shard.store().getMetadata(segmentInfos); + this.replicationCheckpoint = new ReplicationCheckpoint( + shard.shardId(), + shard.getOperationPrimaryTerm(), + segmentInfos.getGeneration(), + shard.getProcessedLocalCheckpoint(), + segmentInfos.getVersion() + ); + + // Send files that are merged away in the latest SegmentInfos but not in the latest on disk Segments_N. + // This ensures that the store on replicas is in sync with the store on primaries. + this.commitRef = shard.acquireLastIndexCommit(false); + Store.MetadataSnapshot metadata = shard.store().getMetadata(this.commitRef.get()); + final Store.RecoveryDiff diff = metadata.recoveryDiff(this.metadataSnapshot); + this.pendingDeleteFiles = new HashSet<>(diff.missing); + if (this.pendingDeleteFiles.isEmpty()) { + // If there are no additional files we can release the last commit immediately. + this.commitRef.close(); + this.commitRef = null; + } + + ByteBuffersDataOutput buffer = new ByteBuffersDataOutput(); + // resource description and name are not used, but resource description cannot be null + try (ByteBuffersIndexOutput indexOutput = new ByteBuffersIndexOutput(buffer, "", null)) { + segmentInfos.write(indexOutput); + } + this.infosBytes = buffer.toArrayCopy(); + } + + @Override + protected void closeInternal() { + try { + segmentInfosRef.close(); + // commitRef may be null if there were no pending delete files + if (commitRef != null) { + commitRef.close(); + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + public ReplicationCheckpoint getCheckpoint() { + return replicationCheckpoint; + } + + public Store.MetadataSnapshot getMetadataSnapshot() { + return metadataSnapshot; + } + + public byte[] getInfosBytes() { + return infosBytes; + } + + public Set getPendingDeleteFiles() { + return pendingDeleteFiles; + } +} diff --git a/server/src/main/java/org/opensearch/indices/replication/common/SegmentReplicationTransportRequest.java b/server/src/main/java/org/opensearch/indices/replication/common/SegmentReplicationTransportRequest.java new file mode 100644 index 0000000000000..db8206d131c13 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/common/SegmentReplicationTransportRequest.java @@ -0,0 +1,49 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication.common; + +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.transport.TransportRequest; + +import java.io.IOException; + +/** + * Abstract base class for transport-layer requests related to segment replication. + * + * @opensearch.internal + */ +public abstract class SegmentReplicationTransportRequest extends TransportRequest { + + private final long replicationId; + private final String targetAllocationId; + private final DiscoveryNode targetNode; + + protected SegmentReplicationTransportRequest(long replicationId, String targetAllocationId, DiscoveryNode targetNode) { + this.replicationId = replicationId; + this.targetAllocationId = targetAllocationId; + this.targetNode = targetNode; + } + + protected SegmentReplicationTransportRequest(StreamInput in) throws IOException { + super(in); + this.replicationId = in.readLong(); + this.targetAllocationId = in.readString(); + this.targetNode = new DiscoveryNode(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeLong(this.replicationId); + out.writeString(this.targetAllocationId); + targetNode.writeTo(out); + } +} diff --git a/server/src/test/java/org/opensearch/index/engine/EngineConfigTests.java b/server/src/test/java/org/opensearch/index/engine/EngineConfigTests.java new file mode 100644 index 0000000000000..1c6d06e9bcc08 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/engine/EngineConfigTests.java @@ -0,0 +1,108 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.engine; + +import org.opensearch.Version; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.seqno.RetentionLeases; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTestCase; + +public class EngineConfigTests extends OpenSearchTestCase { + + private IndexSettings defaultIndexSettings; + + @Override + public void setUp() throws Exception { + super.setUp(); + final IndexMetadata defaultIndexMetadata = IndexMetadata.builder("test") + .settings(settings(Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + defaultIndexSettings = IndexSettingsModule.newIndexSettings("test", defaultIndexMetadata.getSettings()); + } + + public void testEngineConfig_DefaultValueForReadOnlyEngine() { + EngineConfig config = new EngineConfig( + null, + null, + defaultIndexSettings, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + () -> RetentionLeases.EMPTY, + null, + null + ); + assertFalse(config.isReadOnlyReplica()); + } + + public void testEngineConfig_ReadOnlyEngineWithSegRepDisabled() { + expectThrows(IllegalArgumentException.class, () -> createReadOnlyEngine(defaultIndexSettings)); + } + + public void testEngineConfig_ReadOnlyEngineWithSegRepEnabled() { + IndexSettings indexSettings = IndexSettingsModule.newIndexSettings( + "test", + Settings.builder() + .put(defaultIndexSettings.getSettings()) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build() + ); + EngineConfig engineConfig = createReadOnlyEngine(indexSettings); + assertTrue(engineConfig.isReadOnlyReplica()); + } + + private EngineConfig createReadOnlyEngine(IndexSettings indexSettings) { + return new EngineConfig( + null, + null, + indexSettings, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + () -> RetentionLeases.EMPTY, + null, + null, + true + ); + } +} diff --git a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java index cbae55a047a1e..b14ad15070118 100644 --- a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java @@ -211,7 +211,9 @@ import static org.hamcrest.Matchers.nullValue; import static org.mockito.Mockito.atLeastOnce; import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import static org.opensearch.index.engine.Engine.Operation.Origin.LOCAL_RESET; @@ -7384,4 +7386,33 @@ public void testMaxDocsOnReplica() throws Exception { restoreIndexWriterMaxDocs(); } } + + public void testGetSegmentInfosSnapshot() throws IOException { + IOUtils.close(store, engine); + Store store = createStore(); + InternalEngine engine = spy(createEngine(store, createTempDir())); + GatedCloseable segmentInfosSnapshot = engine.getSegmentInfosSnapshot(); + assertNotNull(segmentInfosSnapshot); + assertNotNull(segmentInfosSnapshot.get()); + verify(engine, times(1)).getLatestSegmentInfos(); + store.close(); + engine.close(); + } + + public void testGetProcessedLocalCheckpoint() throws IOException { + final long expectedLocalCheckpoint = 1L; + IOUtils.close(store, engine); + // set up mock + final LocalCheckpointTracker mockCheckpointTracker = mock(LocalCheckpointTracker.class); + when(mockCheckpointTracker.getProcessedCheckpoint()).thenReturn(expectedLocalCheckpoint); + + Store store = createStore(); + InternalEngine engine = createEngine(store, createTempDir(), (a, b) -> mockCheckpointTracker); + + long actualLocalCheckpoint = engine.getProcessedLocalCheckpoint(); + assertEquals(expectedLocalCheckpoint, actualLocalCheckpoint); + verify(mockCheckpointTracker, atLeastOnce()).getProcessedCheckpoint(); + store.close(); + engine.close(); + } } diff --git a/server/src/test/java/org/opensearch/index/engine/ReadOnlyEngineTests.java b/server/src/test/java/org/opensearch/index/engine/ReadOnlyEngineTests.java index 2106c5e1067fb..da0db02ac402e 100644 --- a/server/src/test/java/org/opensearch/index/engine/ReadOnlyEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/ReadOnlyEngineTests.java @@ -107,6 +107,7 @@ public void testReadOnlyEngine() throws Exception { lastSeqNoStats = engine.getSeqNoStats(globalCheckpoint.get()); lastDocIds = getDocIds(engine, true); assertThat(readOnlyEngine.getPersistedLocalCheckpoint(), equalTo(lastSeqNoStats.getLocalCheckpoint())); + assertThat(readOnlyEngine.getProcessedLocalCheckpoint(), equalTo(readOnlyEngine.getPersistedLocalCheckpoint())); assertThat(readOnlyEngine.getSeqNoStats(globalCheckpoint.get()).getMaxSeqNo(), equalTo(lastSeqNoStats.getMaxSeqNo())); assertThat(getDocIds(readOnlyEngine, false), equalTo(lastDocIds)); for (int i = 0; i < numDocs; i++) { @@ -131,6 +132,7 @@ public void testReadOnlyEngine() throws Exception { IOUtils.close(external, internal); // the locked down engine should still point to the previous commit assertThat(readOnlyEngine.getPersistedLocalCheckpoint(), equalTo(lastSeqNoStats.getLocalCheckpoint())); + assertThat(readOnlyEngine.getProcessedLocalCheckpoint(), equalTo(readOnlyEngine.getPersistedLocalCheckpoint())); assertThat(readOnlyEngine.getSeqNoStats(globalCheckpoint.get()).getMaxSeqNo(), equalTo(lastSeqNoStats.getMaxSeqNo())); assertThat(getDocIds(readOnlyEngine, false), equalTo(lastDocIds)); try (Engine.GetResult getResult = readOnlyEngine.get(get, readOnlyEngine::acquireSearcher)) { @@ -142,6 +144,7 @@ public void testReadOnlyEngine() throws Exception { recoveringEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); // the locked down engine should still point to the previous commit assertThat(readOnlyEngine.getPersistedLocalCheckpoint(), equalTo(lastSeqNoStats.getLocalCheckpoint())); + assertThat(readOnlyEngine.getProcessedLocalCheckpoint(), equalTo(readOnlyEngine.getPersistedLocalCheckpoint())); assertThat(readOnlyEngine.getSeqNoStats(globalCheckpoint.get()).getMaxSeqNo(), equalTo(lastSeqNoStats.getMaxSeqNo())); assertThat(getDocIds(readOnlyEngine, false), equalTo(lastDocIds)); } diff --git a/server/src/test/java/org/opensearch/index/store/StoreTests.java b/server/src/test/java/org/opensearch/index/store/StoreTests.java index fdec86e7912fd..d99bde4764adf 100644 --- a/server/src/test/java/org/opensearch/index/store/StoreTests.java +++ b/server/src/test/java/org/opensearch/index/store/StoreTests.java @@ -364,14 +364,14 @@ public void testNewChecksums() throws IOException { Store.MetadataSnapshot metadata; // check before we committed try { - store.getMetadata(null); + store.getMetadata(); fail("no index present - expected exception"); } catch (IndexNotFoundException ex) { // expected } writer.commit(); writer.close(); - metadata = store.getMetadata(null); + metadata = store.getMetadata(); assertThat(metadata.asMap().isEmpty(), is(false)); for (StoreFileMetadata meta : metadata) { try (IndexInput input = store.directory().openInput(meta.name(), IOContext.DEFAULT)) { @@ -552,7 +552,7 @@ public void testRecoveryDiff() throws IOException, InterruptedException { } writer.commit(); writer.close(); - first = store.getMetadata(null); + first = store.getMetadata(); assertDeleteContent(store, store.directory()); store.close(); } @@ -581,7 +581,7 @@ public void testRecoveryDiff() throws IOException, InterruptedException { } writer.commit(); writer.close(); - second = store.getMetadata(null); + second = store.getMetadata(); } Store.RecoveryDiff diff = first.recoveryDiff(second); assertThat(first.size(), equalTo(second.size())); @@ -610,7 +610,7 @@ public void testRecoveryDiff() throws IOException, InterruptedException { writer.deleteDocuments(new Term("id", Integer.toString(random().nextInt(numDocs)))); writer.commit(); writer.close(); - Store.MetadataSnapshot metadata = store.getMetadata(null); + Store.MetadataSnapshot metadata = store.getMetadata(); StoreFileMetadata delFile = null; for (StoreFileMetadata md : metadata) { if (md.name().endsWith(".liv")) { @@ -645,7 +645,7 @@ public void testRecoveryDiff() throws IOException, InterruptedException { writer.addDocument(docs.get(0)); writer.close(); - Store.MetadataSnapshot newCommitMetadata = store.getMetadata(null); + Store.MetadataSnapshot newCommitMetadata = store.getMetadata(); Store.RecoveryDiff newCommitDiff = newCommitMetadata.recoveryDiff(metadata); if (delFile != null) { assertThat(newCommitDiff.identical.size(), equalTo(newCommitMetadata.size() - 5)); // segments_N, del file, cfs, cfe, si for the @@ -710,7 +710,7 @@ public void testCleanupFromSnapshot() throws IOException { writer.addDocument(doc); } - Store.MetadataSnapshot firstMeta = store.getMetadata(null); + Store.MetadataSnapshot firstMeta = store.getMetadata(); if (random().nextBoolean()) { for (int i = 0; i < docs; i++) { @@ -731,7 +731,7 @@ public void testCleanupFromSnapshot() throws IOException { writer.commit(); writer.close(); - Store.MetadataSnapshot secondMeta = store.getMetadata(null); + Store.MetadataSnapshot secondMeta = store.getMetadata(); if (randomBoolean()) { store.cleanupAndVerify("test", firstMeta); @@ -1000,7 +1000,7 @@ public void testMarkCorruptedOnTruncatedSegmentsFile() throws IOException { try { if (randomBoolean()) { - store.getMetadata(null); + store.getMetadata(); } else { store.readLastCommittedSegmentsInfo(); } @@ -1138,4 +1138,15 @@ public void testGetPendingFiles() throws IOException { } } } + + public void testGetMetadataWithSegmentInfos() throws IOException { + final ShardId shardId = new ShardId("index", "_na_", 1); + Store store = new Store(shardId, INDEX_SETTINGS, new NIOFSDirectory(createTempDir()), new DummyShardLock(shardId)); + store.createEmpty(Version.LATEST); + SegmentInfos segmentInfos = Lucene.readSegmentInfos(store.directory()); + Store.MetadataSnapshot metadataSnapshot = store.getMetadata(segmentInfos); + // loose check for equality + assertEquals(segmentInfos.getSegmentsFileName(), metadataSnapshot.getSegmentsFile().name()); + store.close(); + } } diff --git a/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java index bda2a910d922e..d85b2f1e22979 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java @@ -85,7 +85,7 @@ public void testWriteFileChunksConcurrently() throws Exception { indexDoc(sourceShard, "_doc", Integer.toString(i)); } sourceShard.flush(new FlushRequest()); - Store.MetadataSnapshot sourceSnapshot = sourceShard.store().getMetadata(null); + Store.MetadataSnapshot sourceSnapshot = sourceShard.store().getMetadata(); List mdFiles = new ArrayList<>(); for (StoreFileMetadata md : sourceSnapshot) { mdFiles.add(md); diff --git a/server/src/test/java/org/opensearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/opensearch/indices/recovery/RecoverySourceHandlerTests.java index 1739f546150d9..fc5c429d74b16 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/RecoverySourceHandlerTests.java @@ -189,7 +189,7 @@ public void testSendFiles() throws Throwable { writer.commit(); writer.close(); - Store.MetadataSnapshot metadata = store.getMetadata(null); + Store.MetadataSnapshot metadata = store.getMetadata(); ReplicationLuceneIndex luceneIndex = new ReplicationLuceneIndex(); List metas = new ArrayList<>(); for (StoreFileMetadata md : metadata) { @@ -226,7 +226,7 @@ public void writeFileChunk( PlainActionFuture sendFilesFuture = new PlainActionFuture<>(); handler.sendFiles(store, metas.toArray(new StoreFileMetadata[0]), () -> 0, sendFilesFuture); sendFilesFuture.actionGet(); - Store.MetadataSnapshot targetStoreMetadata = targetStore.getMetadata(null); + Store.MetadataSnapshot targetStoreMetadata = targetStore.getMetadata(); Store.RecoveryDiff recoveryDiff = targetStoreMetadata.recoveryDiff(metadata); assertEquals(metas.size(), recoveryDiff.identical.size()); assertEquals(0, recoveryDiff.different.size()); @@ -512,7 +512,7 @@ public void testHandleCorruptedIndexOnSendSendFiles() throws Throwable { writer.close(); ReplicationLuceneIndex luceneIndex = new ReplicationLuceneIndex(); - Store.MetadataSnapshot metadata = store.getMetadata(null); + Store.MetadataSnapshot metadata = store.getMetadata(); List metas = new ArrayList<>(); for (StoreFileMetadata md : metadata) { metas.add(md); @@ -594,7 +594,7 @@ public void testHandleExceptionOnSendFiles() throws Throwable { writer.commit(); writer.close(); - Store.MetadataSnapshot metadata = store.getMetadata(null); + Store.MetadataSnapshot metadata = store.getMetadata(); List metas = new ArrayList<>(); for (StoreFileMetadata md : metadata) { metas.add(md); diff --git a/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java b/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java new file mode 100644 index 0000000000000..6bce74be569c3 --- /dev/null +++ b/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java @@ -0,0 +1,139 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.apache.lucene.util.Version; +import org.opensearch.action.ActionListener; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.internal.io.IOUtils; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.IndexShardTestCase; +import org.opensearch.index.store.Store; +import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.test.ClusterServiceUtils; +import org.opensearch.test.transport.CapturingTransport; +import org.opensearch.transport.TransportService; + +import java.util.Arrays; +import java.util.Collections; + +import static org.mockito.Mockito.mock; + +public class PrimaryShardReplicationSourceTests extends IndexShardTestCase { + + private static final long PRIMARY_TERM = 1L; + private static final long SEGMENTS_GEN = 2L; + private static final long SEQ_NO = 3L; + private static final long VERSION = 4L; + private static final long REPLICATION_ID = 123L; + + private CapturingTransport transport; + private ClusterService clusterService; + private TransportService transportService; + private PrimaryShardReplicationSource replicationSource; + private IndexShard indexShard; + private DiscoveryNode sourceNode; + + @Override + public void setUp() throws Exception { + super.setUp(); + final Settings settings = Settings.builder().put("node.name", SegmentReplicationTargetServiceTests.class.getSimpleName()).build(); + final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); + transport = new CapturingTransport(); + sourceNode = newDiscoveryNode("sourceNode"); + final DiscoveryNode localNode = newDiscoveryNode("localNode"); + clusterService = ClusterServiceUtils.createClusterService(threadPool, localNode); + transportService = transport.createTransportService( + clusterService.getSettings(), + threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + boundAddress -> clusterService.localNode(), + null, + Collections.emptySet() + ); + transportService.start(); + transportService.acceptIncomingRequests(); + + indexShard = newStartedShard(true); + + replicationSource = new PrimaryShardReplicationSource( + localNode, + indexShard.routingEntry().allocationId().toString(), + transportService, + recoverySettings, + sourceNode + ); + } + + @Override + public void tearDown() throws Exception { + IOUtils.close(transportService, clusterService, transport); + closeShards(indexShard); + super.tearDown(); + } + + public void testGetCheckpointMetadata() { + final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint( + indexShard.shardId(), + PRIMARY_TERM, + SEGMENTS_GEN, + SEQ_NO, + VERSION + ); + replicationSource.getCheckpointMetadata(REPLICATION_ID, checkpoint, mock(ActionListener.class)); + CapturingTransport.CapturedRequest[] requestList = transport.getCapturedRequestsAndClear(); + assertEquals(1, requestList.length); + CapturingTransport.CapturedRequest capturedRequest = requestList[0]; + assertEquals(SegmentReplicationSourceService.Actions.GET_CHECKPOINT_INFO, capturedRequest.action); + assertEquals(sourceNode, capturedRequest.node); + assertTrue(capturedRequest.request instanceof CheckpointInfoRequest); + } + + public void testGetSegmentFiles() { + final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint( + indexShard.shardId(), + PRIMARY_TERM, + SEGMENTS_GEN, + SEQ_NO, + VERSION + ); + StoreFileMetadata testMetadata = new StoreFileMetadata("testFile", 1L, "checksum", Version.LATEST); + replicationSource.getSegmentFiles( + REPLICATION_ID, + checkpoint, + Arrays.asList(testMetadata), + mock(Store.class), + mock(ActionListener.class) + ); + CapturingTransport.CapturedRequest[] requestList = transport.getCapturedRequestsAndClear(); + assertEquals(1, requestList.length); + CapturingTransport.CapturedRequest capturedRequest = requestList[0]; + assertEquals(SegmentReplicationSourceService.Actions.GET_SEGMENT_FILES, capturedRequest.action); + assertEquals(sourceNode, capturedRequest.node); + assertTrue(capturedRequest.request instanceof GetSegmentFilesRequest); + } + + private DiscoveryNode newDiscoveryNode(String nodeName) { + return new DiscoveryNode( + nodeName, + randomAlphaOfLength(10), + buildNewFakeTransportAddress(), + Collections.emptyMap(), + Collections.singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE), + org.opensearch.Version.CURRENT + ); + } +} diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java new file mode 100644 index 0000000000000..67c867d360e70 --- /dev/null +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java @@ -0,0 +1,161 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.opensearch.Version; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.IndexService; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.ShardId; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.indices.replication.common.CopyStateTests; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.transport.CapturingTransport; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportException; +import org.opensearch.transport.TransportResponseHandler; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.Collections; +import java.util.concurrent.TimeUnit; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SegmentReplicationSourceServiceTests extends OpenSearchTestCase { + + private ShardId testShardId; + private ReplicationCheckpoint testCheckpoint; + private IndicesService mockIndicesService; + private IndexService mockIndexService; + private IndexShard mockIndexShard; + private TestThreadPool testThreadPool; + private CapturingTransport transport; + private TransportService transportService; + private DiscoveryNode localNode; + private SegmentReplicationSourceService segmentReplicationSourceService; + + @Override + public void setUp() throws Exception { + super.setUp(); + // setup mocks + mockIndexShard = CopyStateTests.createMockIndexShard(); + testShardId = mockIndexShard.shardId(); + mockIndicesService = mock(IndicesService.class); + mockIndexService = mock(IndexService.class); + when(mockIndicesService.indexService(testShardId.getIndex())).thenReturn(mockIndexService); + when(mockIndexService.getShard(testShardId.id())).thenReturn(mockIndexShard); + + // This mirrors the creation of the ReplicationCheckpoint inside CopyState + testCheckpoint = new ReplicationCheckpoint( + testShardId, + mockIndexShard.getOperationPrimaryTerm(), + 0L, + mockIndexShard.getProcessedLocalCheckpoint(), + 0L + ); + testThreadPool = new TestThreadPool("test", Settings.EMPTY); + transport = new CapturingTransport(); + localNode = new DiscoveryNode("local", buildNewFakeTransportAddress(), Version.CURRENT); + transportService = transport.createTransportService( + Settings.EMPTY, + testThreadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + boundAddress -> localNode, + null, + Collections.emptySet() + ); + transportService.start(); + transportService.acceptIncomingRequests(); + segmentReplicationSourceService = new SegmentReplicationSourceService(transportService, mockIndicesService); + } + + @Override + public void tearDown() throws Exception { + ThreadPool.terminate(testThreadPool, 30, TimeUnit.SECONDS); + testThreadPool = null; + super.tearDown(); + } + + public void testGetSegmentFiles_EmptyResponse() { + final GetSegmentFilesRequest request = new GetSegmentFilesRequest( + 1, + "allocationId", + localNode, + Collections.emptyList(), + testCheckpoint + ); + transportService.sendRequest( + localNode, + SegmentReplicationSourceService.Actions.GET_SEGMENT_FILES, + request, + new TransportResponseHandler() { + @Override + public void handleResponse(GetSegmentFilesResponse response) { + assertEquals(0, response.files.size()); + } + + @Override + public void handleException(TransportException e) { + fail("unexpected exception: " + e); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + + @Override + public GetSegmentFilesResponse read(StreamInput in) throws IOException { + return new GetSegmentFilesResponse(in); + } + } + ); + } + + public void testCheckpointInfo() { + final CheckpointInfoRequest request = new CheckpointInfoRequest(1L, "testAllocationId", localNode, testCheckpoint); + transportService.sendRequest( + localNode, + SegmentReplicationSourceService.Actions.GET_CHECKPOINT_INFO, + request, + new TransportResponseHandler() { + @Override + public void handleResponse(CheckpointInfoResponse response) { + assertEquals(testCheckpoint, response.getCheckpoint()); + assertNotNull(response.getInfosBytes()); + // CopyStateTests sets up one pending delete file and one committed segments file + assertEquals(1, response.getPendingDeleteFiles().size()); + assertEquals(1, response.getSnapshot().size()); + } + + @Override + public void handleException(TransportException e) { + fail("unexpected exception: " + e); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + + @Override + public CheckpointInfoResponse read(StreamInput in) throws IOException { + return new CheckpointInfoResponse(in); + } + } + ); + } + +} diff --git a/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java b/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java new file mode 100644 index 0000000000000..afa38afb0cf2f --- /dev/null +++ b/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java @@ -0,0 +1,80 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication.common; + +import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.IndexFileNames; +import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.util.Version; +import org.opensearch.common.collect.Map; +import org.opensearch.common.concurrent.GatedCloseable; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.IndexShardTestCase; +import org.opensearch.index.shard.ShardId; +import org.opensearch.index.store.Store; +import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; + +import java.io.IOException; +import java.util.Set; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class CopyStateTests extends IndexShardTestCase { + + private static final long EXPECTED_LONG_VALUE = 1L; + private static final ShardId TEST_SHARD_ID = new ShardId("testIndex", "testUUID", 0); + private static final StoreFileMetadata SEGMENTS_FILE = new StoreFileMetadata(IndexFileNames.SEGMENTS, 1L, "0", Version.LATEST); + private static final StoreFileMetadata PENDING_DELETE_FILE = new StoreFileMetadata("pendingDelete.del", 1L, "1", Version.LATEST); + + private static final Store.MetadataSnapshot COMMIT_SNAPSHOT = new Store.MetadataSnapshot( + Map.of(SEGMENTS_FILE.name(), SEGMENTS_FILE, PENDING_DELETE_FILE.name(), PENDING_DELETE_FILE), + null, + 0 + ); + + private static final Store.MetadataSnapshot SI_SNAPSHOT = new Store.MetadataSnapshot( + Map.of(SEGMENTS_FILE.name(), SEGMENTS_FILE), + null, + 0 + ); + + public void testCopyStateCreation() throws IOException { + CopyState copyState = new CopyState(createMockIndexShard()); + ReplicationCheckpoint checkpoint = copyState.getCheckpoint(); + assertEquals(TEST_SHARD_ID, checkpoint.getShardId()); + // version was never set so this should be zero + assertEquals(0, checkpoint.getSegmentInfosVersion()); + assertEquals(EXPECTED_LONG_VALUE, checkpoint.getPrimaryTerm()); + + Set pendingDeleteFiles = copyState.getPendingDeleteFiles(); + assertEquals(1, pendingDeleteFiles.size()); + assertTrue(pendingDeleteFiles.contains(PENDING_DELETE_FILE)); + } + + public static IndexShard createMockIndexShard() throws IOException { + IndexShard mockShard = mock(IndexShard.class); + when(mockShard.shardId()).thenReturn(TEST_SHARD_ID); + when(mockShard.getOperationPrimaryTerm()).thenReturn(EXPECTED_LONG_VALUE); + when(mockShard.getProcessedLocalCheckpoint()).thenReturn(EXPECTED_LONG_VALUE); + + Store mockStore = mock(Store.class); + when(mockShard.store()).thenReturn(mockStore); + + SegmentInfos testSegmentInfos = new SegmentInfos(Version.LATEST.major); + when(mockShard.getSegmentInfosSnapshot()).thenReturn(new GatedCloseable<>(testSegmentInfos, () -> {})); + when(mockStore.getMetadata(testSegmentInfos)).thenReturn(SI_SNAPSHOT); + + IndexCommit mockIndexCommit = mock(IndexCommit.class); + when(mockShard.acquireLastIndexCommit(false)).thenReturn(new GatedCloseable<>(mockIndexCommit, () -> {})); + when(mockStore.getMetadata(mockIndexCommit)).thenReturn(COMMIT_SNAPSHOT); + return mockShard; + } +} From feaa747a6bf8f2a614f8a8d0fbcfbca3ea962619 Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Fri, 3 Jun 2022 10:54:40 -0700 Subject: [PATCH 15/34] [Dependency upgrade] google-oauth-client to 1.33.3 (#3500) Signed-off-by: Suraj Singh --- plugins/repository-gcs/build.gradle | 2 +- .../repository-gcs/licenses/google-oauth-client-1.33.1.jar.sha1 | 1 - .../repository-gcs/licenses/google-oauth-client-1.33.3.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-gcs/licenses/google-oauth-client-1.33.1.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/google-oauth-client-1.33.3.jar.sha1 diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 92ddc69c89f47..0e1c2125f5d81 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -73,7 +73,7 @@ dependencies { api 'com.google.cloud:google-cloud-core-http:1.93.3' api "com.google.auth:google-auth-library-credentials:${versions.google_auth}" api "com.google.auth:google-auth-library-oauth2-http:${versions.google_auth}" - api 'com.google.oauth-client:google-oauth-client:1.33.1' + api 'com.google.oauth-client:google-oauth-client:1.33.3' api 'com.google.api-client:google-api-client:1.34.0' api 'com.google.http-client:google-http-client-appengine:1.41.8' api 'com.google.http-client:google-http-client-jackson2:1.35.0' diff --git a/plugins/repository-gcs/licenses/google-oauth-client-1.33.1.jar.sha1 b/plugins/repository-gcs/licenses/google-oauth-client-1.33.1.jar.sha1 deleted file mode 100644 index 3897a85310ec6..0000000000000 --- a/plugins/repository-gcs/licenses/google-oauth-client-1.33.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0a431f1a677c5f89507591ab47a7ccdb0b18b6f7 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-oauth-client-1.33.3.jar.sha1 b/plugins/repository-gcs/licenses/google-oauth-client-1.33.3.jar.sha1 new file mode 100644 index 0000000000000..f2afaa1bc2dba --- /dev/null +++ b/plugins/repository-gcs/licenses/google-oauth-client-1.33.3.jar.sha1 @@ -0,0 +1 @@ +9d445a8649b0de731922b9a3ebf1552b5403611d \ No newline at end of file From 2fbf33555ef2bb552af2ce1fe86b6ccdcf0c74ce Mon Sep 17 00:00:00 2001 From: Cole White <42356806+shdubsh@users.noreply.github.com> Date: Fri, 3 Jun 2022 12:24:46 -0600 Subject: [PATCH 16/34] move bash flag to set statement (#3494) Passing bash with flags to the first argument of /usr/bin/env requires its own flag to interpret it correctly. Rather than use `env -S` to split the argument, have the script `set -e` to enable the same behavior explicitly in preinst and postinst scripts. Also set `-o pipefail` for consistency. Closes: #3492 Signed-off-by: Cole White --- buildSrc/src/main/resources/deb/postinst.ftl | 3 ++- buildSrc/src/main/resources/deb/preinst.ftl | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/buildSrc/src/main/resources/deb/postinst.ftl b/buildSrc/src/main/resources/deb/postinst.ftl index 605f620e16444..1fe98263a0fdf 100644 --- a/buildSrc/src/main/resources/deb/postinst.ftl +++ b/buildSrc/src/main/resources/deb/postinst.ftl @@ -1,2 +1,3 @@ -#!/usr/bin/env bash -e +#!/usr/bin/env bash +set -e -o pipefail <% commands.each {command -> %><%= command %><% } %> diff --git a/buildSrc/src/main/resources/deb/preinst.ftl b/buildSrc/src/main/resources/deb/preinst.ftl index 605f620e16444..1fe98263a0fdf 100644 --- a/buildSrc/src/main/resources/deb/preinst.ftl +++ b/buildSrc/src/main/resources/deb/preinst.ftl @@ -1,2 +1,3 @@ -#!/usr/bin/env bash -e +#!/usr/bin/env bash +set -e -o pipefail <% commands.each {command -> %><%= command %><% } %> From 6c769d4fca38f4333ec985d83044d3c56e425f71 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Mon, 6 Jun 2022 13:36:34 -0400 Subject: [PATCH 17/34] Support use of IRSA for repository-s3 plugin credentials: added YAML Rest test case (#3499) Signed-off-by: Andriy Redko --- plugins/repository-s3/build.gradle | 69 ++++- .../opensearch/repositories/s3/S3Service.java | 27 +- .../60_repository_eks_credentials.yml | 268 ++++++++++++++++++ test/fixtures/s3-fixture/Dockerfile.eks | 25 ++ test/fixtures/s3-fixture/docker-compose.yml | 17 ++ .../java/fixture/s3/S3HttpFixtureWithEKS.java | 103 +++++++ 6 files changed, 496 insertions(+), 13 deletions(-) create mode 100644 plugins/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/60_repository_eks_credentials.yml create mode 100644 test/fixtures/s3-fixture/Dockerfile.eks create mode 100644 test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithEKS.java diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 54a2593f4c6f4..ff6e2148fab37 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -131,6 +131,9 @@ String s3EC2BasePath = System.getenv("amazon_s3_base_path_ec2") String s3ECSBucket = System.getenv("amazon_s3_bucket_ecs") String s3ECSBasePath = System.getenv("amazon_s3_base_path_ecs") +String s3EKSBucket = System.getenv("amazon_s3_bucket_eks") +String s3EKSBasePath = System.getenv("amazon_s3_base_path_eks") + boolean s3DisableChunkedEncoding = (new Random(Long.parseUnsignedLong(BuildParams.testSeed.tokenize(':').get(0), 16))).nextBoolean() // If all these variables are missing then we are testing against the internal fixture instead, which has the following @@ -160,13 +163,15 @@ if (!s3TemporaryAccessKey && !s3TemporarySecretKey && !s3TemporaryBucket && !s3T throw new IllegalArgumentException("not all options specified to run against external S3 service as temporary credentials are present") } -if (!s3EC2Bucket && !s3EC2BasePath && !s3ECSBucket && !s3ECSBasePath) { +if (!s3EC2Bucket && !s3EC2BasePath && !s3ECSBucket && !s3ECSBasePath && !s3EKSBucket && !s3EKSBasePath) { s3EC2Bucket = 'ec2_bucket' s3EC2BasePath = 'ec2_base_path' s3ECSBucket = 'ecs_bucket' s3ECSBasePath = 'ecs_base_path' -} else if (!s3EC2Bucket || !s3EC2BasePath || !s3ECSBucket || !s3ECSBasePath) { - throw new IllegalArgumentException("not all options specified to run EC2/ECS tests are present") + s3EKSBucket = 'eks_bucket' + s3EKSBasePath = 'eks_base_path' +} else if (!s3EC2Bucket || !s3EC2BasePath || !s3ECSBucket || !s3ECSBasePath || !s3EKSBucket || !s3EKSBasePath) { + throw new IllegalArgumentException("not all options specified to run EC2/ECS/EKS tests are present") } processYamlRestTestResources { @@ -179,7 +184,9 @@ processYamlRestTestResources { 'ec2_base_path': s3EC2BasePath, 'ecs_bucket': s3ECSBucket, 'ecs_base_path': s3ECSBasePath, - 'disable_chunked_encoding': s3DisableChunkedEncoding, + 'eks_bucket': s3EKSBucket, + 'eks_base_path': s3EKSBasePath, + 'disable_chunked_encoding': s3DisableChunkedEncoding ] inputs.properties(expansions) MavenFilteringHack.filter(it, expansions) @@ -198,7 +205,8 @@ yamlRestTest { [ 'repository_s3/30_repository_temporary_credentials/*', 'repository_s3/40_repository_ec2_credentials/*', - 'repository_s3/50_repository_ecs_credentials/*' + 'repository_s3/50_repository_ecs_credentials/*', + 'repository_s3/60_repository_eks_credentials/*' ] ).join(",") } @@ -215,6 +223,7 @@ testClusters.yamlRestTest { testFixtures.useFixture(':test:fixtures:s3-fixture', 's3-fixture') testFixtures.useFixture(':test:fixtures:s3-fixture', 's3-fixture-with-session-token') testFixtures.useFixture(':test:fixtures:s3-fixture', 's3-fixture-with-ec2') + testFixtures.useFixture(':test:fixtures:s3-fixture', 's3-fixture-with-eks') normalization { runtimeClasspath { @@ -223,12 +232,21 @@ testClusters.yamlRestTest { } } + keystore 's3.client.integration_test_eks.role_arn', "arn:aws:iam::000000000000:role/test" + keystore 's3.client.integration_test_eks.role_session_name', "s3-test" + keystore 's3.client.integration_test_eks.access_key', "access_key" + keystore 's3.client.integration_test_eks.secret_key', "secret_key" + setting 's3.client.integration_test_permanent.endpoint', { "${-> fixtureAddress('s3-fixture', 's3-fixture', '80')}" }, IGNORE_VALUE setting 's3.client.integration_test_temporary.endpoint', { "${-> fixtureAddress('s3-fixture', 's3-fixture-with-session-token', '80')}" }, IGNORE_VALUE setting 's3.client.integration_test_ec2.endpoint', { "${-> fixtureAddress('s3-fixture', 's3-fixture-with-ec2', '80')}" }, IGNORE_VALUE + setting 's3.client.integration_test_eks.endpoint', { "${-> fixtureAddress('s3-fixture', 's3-fixture-with-eks', '80')}" }, IGNORE_VALUE + setting 's3.client.integration_test_eks.region', { "us-east-2" }, IGNORE_VALUE // to redirect InstanceProfileCredentialsProvider to custom auth point systemProperty "com.amazonaws.sdk.ec2MetadataServiceEndpointOverride", { "${-> fixtureAddress('s3-fixture', 's3-fixture-with-ec2', '80')}" }, IGNORE_VALUE + // to redirect AWSSecurityTokenServiceClient to custom auth point + systemProperty "com.amazonaws.sdk.stsEndpointOverride", { "${-> fixtureAddress('s3-fixture', 's3-fixture-with-eks', '80')}/eks_credentials_endpoint" }, IGNORE_VALUE } else { println "Using an external service to test the repository-s3 plugin" } @@ -250,7 +268,8 @@ if (useFixture) { systemProperty 'tests.rest.denylist', [ 'repository_s3/30_repository_temporary_credentials/*', 'repository_s3/40_repository_ec2_credentials/*', - 'repository_s3/50_repository_ecs_credentials/*' + 'repository_s3/50_repository_ecs_credentials/*', + 'repository_s3/60_repository_eks_credentials/*' ].join(",") } check.dependsOn(yamlRestTestMinio) @@ -277,7 +296,8 @@ if (useFixture) { 'repository_s3/10_basic/*', 'repository_s3/20_repository_permanent_credentials/*', 'repository_s3/30_repository_temporary_credentials/*', - 'repository_s3/40_repository_ec2_credentials/*' + 'repository_s3/40_repository_ec2_credentials/*', + 'repository_s3/60_repository_eks_credentials/*' ].join(",") } check.dependsOn(yamlRestTestECS) @@ -289,6 +309,41 @@ if (useFixture) { } } +// EKS +if (useFixture) { + testFixtures.useFixture(':test:fixtures:s3-fixture', 's3-fixture-with-eks') + task yamlRestTestEKS(type: RestIntegTestTask.class) { + description = "Runs tests using the EKS repository." + dependsOn('bundlePlugin') + SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); + SourceSet yamlRestTestSourceSet = sourceSets.getByName(YamlRestTestPlugin.SOURCE_SET_NAME) + setTestClassesDirs(yamlRestTestSourceSet.getOutput().getClassesDirs()) + setClasspath(yamlRestTestSourceSet.getRuntimeClasspath()) + systemProperty 'tests.rest.denylist', [ + 'repository_s3/10_basic/*', + 'repository_s3/20_repository_permanent_credentials/*', + 'repository_s3/30_repository_temporary_credentials/*', + 'repository_s3/40_repository_ec2_credentials/*', + 'repository_s3/50_repository_ecs_credentials/*' + ].join(",") + } + check.dependsOn(yamlRestTestEKS) + + testClusters.yamlRestTestEKS { + keystore 's3.client.integration_test_eks.role_arn', "arn:aws:iam::000000000000:role/test" + keystore 's3.client.integration_test_eks.role_session_name', "s3-test" + keystore 's3.client.integration_test_eks.access_key', "access_key" + keystore 's3.client.integration_test_eks.secret_key', "secret_key" + + setting 's3.client.integration_test_eks.endpoint', { "${-> fixtureAddress('s3-fixture', 's3-fixture-with-eks', '80')}" }, IGNORE_VALUE + setting 's3.client.integration_test_eks.region', { "us-east-2" }, IGNORE_VALUE + plugin tasks.bundlePlugin.archiveFile + + // to redirect AWSSecurityTokenServiceClient to custom auth point + systemProperty "com.amazonaws.sdk.stsEndpointOverride", { "${-> fixtureAddress('s3-fixture', 's3-fixture-with-eks', '80')}/eks_credentials_endpoint" }, IGNORE_VALUE + } +} + // 3rd Party Tests TaskProvider s3ThirdPartyTest = tasks.register("s3ThirdPartyTest", Test) { SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java index 6919549874445..18bb62944dede 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java @@ -41,6 +41,7 @@ import com.amazonaws.auth.STSAssumeRoleSessionCredentialsProvider; import com.amazonaws.auth.STSAssumeRoleWithWebIdentitySessionCredentialsProvider; import com.amazonaws.client.builder.AwsClientBuilder; +import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration; import com.amazonaws.http.IdleConnectionReaper; import com.amazonaws.http.SystemPropertyTlsKeyManagersProvider; import com.amazonaws.http.conn.ssl.SdkTLSSocketFactory; @@ -82,6 +83,8 @@ class S3Service implements Closeable { private static final Logger logger = LogManager.getLogger(S3Service.class); + private static final String STS_ENDPOINT_OVERRIDE_SYSTEM_PROPERTY = "com.amazonaws.sdk.stsEndpointOverride"; + private volatile Map clientsCache = emptyMap(); /** @@ -280,13 +283,25 @@ static AWSCredentialsProvider buildCredentials(Logger logger, S3ClientSettings c AWSSecurityTokenService securityTokenService = null; final String region = Strings.hasLength(clientSettings.region) ? clientSettings.region : null; + if (region != null || basicCredentials != null) { - securityTokenService = SocketAccess.doPrivileged( - () -> AWSSecurityTokenServiceClientBuilder.standard() - .withCredentials((basicCredentials != null) ? new AWSStaticCredentialsProvider(basicCredentials) : null) - .withRegion(region) - .build() - ); + securityTokenService = SocketAccess.doPrivileged(() -> { + AWSSecurityTokenServiceClientBuilder builder = AWSSecurityTokenServiceClientBuilder.standard(); + + // Use similar approach to override STS endpoint as SDKGlobalConfiguration.EC2_METADATA_SERVICE_OVERRIDE_SYSTEM_PROPERTY + final String stsEndpoint = System.getProperty(STS_ENDPOINT_OVERRIDE_SYSTEM_PROPERTY); + if (region != null && stsEndpoint != null) { + builder = builder.withEndpointConfiguration(new EndpointConfiguration(stsEndpoint, region)); + } else { + builder = builder.withRegion(region); + } + + if (basicCredentials != null) { + builder = builder.withCredentials(new AWSStaticCredentialsProvider(basicCredentials)); + } + + return builder.build(); + }); } if (irsaCredentials.getIdentityTokenFile() == null) { diff --git a/plugins/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/60_repository_eks_credentials.yml b/plugins/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/60_repository_eks_credentials.yml new file mode 100644 index 0000000000000..15f2c9612a2ba --- /dev/null +++ b/plugins/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/60_repository_eks_credentials.yml @@ -0,0 +1,268 @@ +# Integration tests for repository-s3 + +--- +setup: + + # Register repository with eks credentials + - do: + snapshot.create_repository: + repository: repository_eks + body: + type: s3 + settings: + bucket: ${eks_bucket} + client: integration_test_eks + base_path: "${eks_base_path}" + canned_acl: private + storage_class: standard + disable_chunked_encoding: ${disable_chunked_encoding} + +--- +"Snapshot and Restore with repository-s3 using eks credentials": + + # Get repository + - do: + snapshot.get_repository: + repository: repository_eks + + - match: { repository_eks.settings.bucket : ${eks_bucket} } + - match: { repository_eks.settings.client : "integration_test_eks" } + - match: { repository_eks.settings.base_path : "${eks_base_path}" } + - match: { repository_eks.settings.canned_acl : "private" } + - match: { repository_eks.settings.storage_class : "standard" } + - is_false: repository_eks.settings.access_key + - is_false: repository_eks.settings.secret_key + - is_false: repository_eks.settings.session_token + - is_false: repository_eks.settings.role_arn + - is_false: repository_eks.settings.role_session_name + - is_false: repository_eks.settings.identity_token_file + + # Index documents + - do: + bulk: + refresh: true + body: + - index: + _index: docs + _id: 1 + - snapshot: one + - index: + _index: docs + _id: 2 + - snapshot: one + - index: + _index: docs + _id: 3 + - snapshot: one + + - do: + count: + index: docs + + - match: {count: 3} + + # Create a first snapshot + - do: + snapshot.create: + repository: repository_eks + snapshot: snapshot-one + wait_for_completion: true + + - match: { snapshot.snapshot: snapshot-one } + - match: { snapshot.state : SUCCESS } + - match: { snapshot.include_global_state: true } + - match: { snapshot.shards.failed : 0 } + + - do: + snapshot.status: + repository: repository_eks + snapshot: snapshot-one + + - is_true: snapshots + - match: { snapshots.0.snapshot: snapshot-one } + - match: { snapshots.0.state : SUCCESS } + + # Index more documents + - do: + bulk: + refresh: true + body: + - index: + _index: docs + _id: 4 + - snapshot: two + - index: + _index: docs + _id: 5 + - snapshot: two + - index: + _index: docs + _id: 6 + - snapshot: two + - index: + _index: docs + _id: 7 + - snapshot: two + + - do: + count: + index: docs + + - match: {count: 7} + + # Create a second snapshot + - do: + snapshot.create: + repository: repository_eks + snapshot: snapshot-two + wait_for_completion: true + + - match: { snapshot.snapshot: snapshot-two } + - match: { snapshot.state : SUCCESS } + - match: { snapshot.shards.failed : 0 } + + - do: + snapshot.get: + repository: repository_eks + snapshot: snapshot-one,snapshot-two + + - is_true: snapshots + - match: { snapshots.0.state : SUCCESS } + - match: { snapshots.1.state : SUCCESS } + + # Delete the index + - do: + indices.delete: + index: docs + + # Restore the second snapshot + - do: + snapshot.restore: + repository: repository_eks + snapshot: snapshot-two + wait_for_completion: true + + - do: + count: + index: docs + + - match: {count: 7} + + # Delete the index again + - do: + indices.delete: + index: docs + + # Restore the first snapshot + - do: + snapshot.restore: + repository: repository_eks + snapshot: snapshot-one + wait_for_completion: true + + - do: + count: + index: docs + + - match: {count: 3} + + # Remove the snapshots + - do: + snapshot.delete: + repository: repository_eks + snapshot: snapshot-two + + - do: + snapshot.delete: + repository: repository_eks + snapshot: snapshot-one + +--- +"Register a repository with a non existing bucket": + + - do: + catch: /repository_verification_exception/ + snapshot.create_repository: + repository: repository_eks + body: + type: s3 + settings: + bucket: zHHkfSqlbnBsbpSgvCYtxrEfFLqghXtyPvvvKPNBnRCicNHQLE + client: integration_test_eks + +--- +"Register a repository with a non existing client": + + - do: + catch: /illegal_argument_exception/ + snapshot.create_repository: + repository: repository_eks + body: + type: s3 + settings: + bucket: repository_eks + client: unknown + +--- +"Register a read-only repository with a non existing bucket": + +- do: + catch: /repository_verification_exception/ + snapshot.create_repository: + repository: repository_eks + body: + type: s3 + settings: + readonly: true + bucket: zHHkfSqlbnBsbpSgvCYtxrEfFLqghXtyPvvvKPNBnRCicNHQLE + client: integration_test_eks + +--- +"Register a read-only repository with a non existing client": + +- do: + catch: /illegal_argument_exception/ + snapshot.create_repository: + repository: repository_eks + body: + type: s3 + settings: + readonly: true + bucket: repository_eks + client: unknown + +--- +"Get a non existing snapshot": + + - do: + catch: /snapshot_missing_exception/ + snapshot.get: + repository: repository_eks + snapshot: missing + +--- +"Delete a non existing snapshot": + + - do: + catch: /snapshot_missing_exception/ + snapshot.delete: + repository: repository_eks + snapshot: missing + +--- +"Restore a non existing snapshot": + + - do: + catch: /snapshot_restore_exception/ + snapshot.restore: + repository: repository_eks + snapshot: missing + wait_for_completion: true + +--- +teardown: + + # Remove our repository + - do: + snapshot.delete_repository: + repository: repository_eks diff --git a/test/fixtures/s3-fixture/Dockerfile.eks b/test/fixtures/s3-fixture/Dockerfile.eks new file mode 100644 index 0000000000000..d03960472a6a8 --- /dev/null +++ b/test/fixtures/s3-fixture/Dockerfile.eks @@ -0,0 +1,25 @@ +FROM ubuntu:18.04 + +RUN apt-get update -qqy +RUN apt-get install -qqy openjdk-11-jre-headless + +ARG fixtureClass +ARG port +ARG bucket +ARG basePath +ARG accessKey +ARG roleArn +ARG roleSessionName + +ENV S3_FIXTURE_CLASS=${fixtureClass} +ENV S3_FIXTURE_PORT=${port} +ENV S3_FIXTURE_BUCKET=${bucket} +ENV S3_FIXTURE_BASE_PATH=${basePath} +ENV S3_FIXTURE_ACCESS_KEY=${accessKey} +ENV S3_FIXTURE_ROLE_ARN=${roleArn} +ENV S3_FIXTURE_ROLE_SESSION_NAME=${roleSessionName} + +ENTRYPOINT exec java -classpath "/fixture/shared/*" \ + $S3_FIXTURE_CLASS 0.0.0.0 "$S3_FIXTURE_PORT" "$S3_FIXTURE_BUCKET" "$S3_FIXTURE_BASE_PATH" "$S3_FIXTURE_ACCESS_KEY" "$S3_FIXTURE_ROLE_ARN" "$S3_FIXTURE_ROLE_SESSION_NAME" + +EXPOSE $port diff --git a/test/fixtures/s3-fixture/docker-compose.yml b/test/fixtures/s3-fixture/docker-compose.yml index 22d101f41c318..d2b44f13c9530 100644 --- a/test/fixtures/s3-fixture/docker-compose.yml +++ b/test/fixtures/s3-fixture/docker-compose.yml @@ -92,3 +92,20 @@ services: - ./testfixtures_shared/shared:/fixture/shared ports: - "80" + + s3-fixture-with-eks: + build: + context: . + args: + fixtureClass: fixture.s3.S3HttpFixtureWithEKS + port: 80 + bucket: "eks_bucket" + basePath: "eks_base_path" + accessKey: "eks_access_key" + roleArn: "eks_role_arn" + roleSessionName: "eks_role_session_name" + dockerfile: Dockerfile.eks + volumes: + - ./testfixtures_shared/shared:/fixture/shared + ports: + - "80" diff --git a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithEKS.java b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithEKS.java new file mode 100644 index 0000000000000..b26c82a3cb7d4 --- /dev/null +++ b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithEKS.java @@ -0,0 +1,103 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package fixture.s3; + +import com.sun.net.httpserver.HttpHandler; +import org.opensearch.rest.RestStatus; + +import java.nio.charset.StandardCharsets; +import java.time.LocalDateTime; +import java.time.ZoneId; +import java.time.format.DateTimeFormatter; +import java.util.Objects; + +public class S3HttpFixtureWithEKS extends S3HttpFixture { + + private S3HttpFixtureWithEKS(final String[] args) throws Exception { + super(args); + } + + @Override + protected HttpHandler createHandler(final String[] args) { + final String accessKey = Objects.requireNonNull(args[4]); + final String eksRoleArn = Objects.requireNonNull(args[5]); + final HttpHandler delegate = super.createHandler(args); + + return exchange -> { + // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html + if ("POST".equals(exchange.getRequestMethod()) && exchange.getRequestURI().getPath().startsWith("/eks_credentials_endpoint")) { + final byte[] response = buildCredentialResponse(eksRoleArn, accessKey).getBytes(StandardCharsets.UTF_8); + exchange.getResponseHeaders().add("Content-Type", "application/xml"); + exchange.sendResponseHeaders(RestStatus.OK.getStatus(), response.length); + exchange.getResponseBody().write(response); + exchange.close(); + return; + } + + delegate.handle(exchange); + }; + } + + protected String buildCredentialResponse(final String roleArn, final String accessKey) { + // See please: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html + return "\n" + + " \n" + + " amzn1.account.AF6RHO7KZU5XRVQJGXK6HB56KR2A\n" + + " client.5498841531868486423.1548@apps.example.com\n" + + " \n" + + " " + roleArn + "\n" + + " AROACLKWSDQRAOEXAMPLE:s3\n" + + " \n" + + " \n" + + " AQoDYXdzEE0a8ANXXXXXXXXNO1ewxE5TijQyp+IEXAMPLE\n" + + " wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY\n" + + " " + LocalDateTime.now().plusMonths(1).atZone(ZoneId.of("UTC")).format(DateTimeFormatter.ISO_OFFSET_DATE_TIME) + "\n" + + " " + accessKey + "\n" + + " \n" + + " SourceIdentityValue\n" + + " www.amazon.com\n" + + " \n" + + " \n" + + " ad4156e9-bce1-11e2-82e6-6b6efEXAMPLE\n" + + " \n" + + ""; + } + + public static void main(final String[] args) throws Exception { + if (args == null || args.length < 6) { + throw new IllegalArgumentException("S3HttpFixtureWithEKS expects 6 arguments " + + "[address, port, bucket, base path, role arn, role session name]"); + } + final S3HttpFixtureWithEKS fixture = new S3HttpFixtureWithEKS(args); + fixture.start(); + } +} From e97a3ddd22e1db8b290a982a28f086d0a7413546 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Jun 2022 13:40:04 -0400 Subject: [PATCH 18/34] Bump azure-storage-common from 12.15.0 to 12.16.0 in /plugins/repository-azure (#3517) * Bump azure-storage-common in /plugins/repository-azure Bumps [azure-storage-common](https://github.com/Azure/azure-sdk-for-java) from 12.15.0 to 12.16.0. - [Release notes](https://github.com/Azure/azure-sdk-for-java/releases) - [Commits](https://github.com/Azure/azure-sdk-for-java/compare/azure-storage-blob_12.15.0...azure-storage-blob_12.16.0) --- updated-dependencies: - dependency-name: com.azure:azure-storage-common dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- plugins/repository-azure/build.gradle | 2 +- .../licenses/azure-storage-common-12.15.0.jar.sha1 | 1 - .../licenses/azure-storage-common-12.16.0.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-azure/licenses/azure-storage-common-12.15.0.jar.sha1 create mode 100644 plugins/repository-azure/licenses/azure-storage-common-12.16.0.jar.sha1 diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index dd2ad78ebed04..227d7d1b68977 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -45,7 +45,7 @@ opensearchplugin { dependencies { api 'com.azure:azure-core:1.27.0' - api 'com.azure:azure-storage-common:12.15.0' + api 'com.azure:azure-storage-common:12.16.0' api 'com.azure:azure-core-http-netty:1.12.0' api "io.netty:netty-codec-dns:${versions.netty}" api "io.netty:netty-codec-socks:${versions.netty}" diff --git a/plugins/repository-azure/licenses/azure-storage-common-12.15.0.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-common-12.15.0.jar.sha1 deleted file mode 100644 index 1f3adfc161c7f..0000000000000 --- a/plugins/repository-azure/licenses/azure-storage-common-12.15.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4d63ce8bbd20379c5e5262b1204ceac7b31a7743 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-common-12.16.0.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-common-12.16.0.jar.sha1 new file mode 100644 index 0000000000000..ebf328aa69ee8 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-storage-common-12.16.0.jar.sha1 @@ -0,0 +1 @@ +9f652b89a30269bdff6644468632726d4ba4fbd1 \ No newline at end of file From 37b48ef0beff0c27158f4dd413478f457e6b7db5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Jun 2022 13:54:56 -0400 Subject: [PATCH 19/34] Bump google-oauth-client from 1.33.3 to 1.34.0 in /plugins/discovery-gce (#3516) * Bump google-oauth-client from 1.33.3 to 1.34.0 in /plugins/discovery-gce Bumps [google-oauth-client](https://github.com/googleapis/google-oauth-java-client) from 1.33.3 to 1.34.0. - [Release notes](https://github.com/googleapis/google-oauth-java-client/releases) - [Changelog](https://github.com/googleapis/google-oauth-java-client/blob/main/CHANGELOG.md) - [Commits](https://github.com/googleapis/google-oauth-java-client/compare/v1.33.3...v1.34.0) --- updated-dependencies: - dependency-name: com.google.oauth-client:google-oauth-client dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- plugins/discovery-gce/build.gradle | 2 +- .../discovery-gce/licenses/google-oauth-client-1.33.3.jar.sha1 | 1 - .../discovery-gce/licenses/google-oauth-client-1.34.0.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/discovery-gce/licenses/google-oauth-client-1.33.3.jar.sha1 create mode 100644 plugins/discovery-gce/licenses/google-oauth-client-1.34.0.jar.sha1 diff --git a/plugins/discovery-gce/build.gradle b/plugins/discovery-gce/build.gradle index beae0d84685a4..983a2907e4e67 100644 --- a/plugins/discovery-gce/build.gradle +++ b/plugins/discovery-gce/build.gradle @@ -24,7 +24,7 @@ versions << [ dependencies { api "com.google.apis:google-api-services-compute:v1-rev160-${versions.google}" api "com.google.api-client:google-api-client:${versions.google}" - api "com.google.oauth-client:google-oauth-client:1.33.3" + api "com.google.oauth-client:google-oauth-client:1.34.0" api "com.google.http-client:google-http-client:${versions.google}" api "com.google.http-client:google-http-client-jackson2:${versions.google}" api 'com.google.code.findbugs:jsr305:3.0.2' diff --git a/plugins/discovery-gce/licenses/google-oauth-client-1.33.3.jar.sha1 b/plugins/discovery-gce/licenses/google-oauth-client-1.33.3.jar.sha1 deleted file mode 100644 index f2afaa1bc2dba..0000000000000 --- a/plugins/discovery-gce/licenses/google-oauth-client-1.33.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9d445a8649b0de731922b9a3ebf1552b5403611d \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-oauth-client-1.34.0.jar.sha1 b/plugins/discovery-gce/licenses/google-oauth-client-1.34.0.jar.sha1 new file mode 100644 index 0000000000000..57c5c16b34deb --- /dev/null +++ b/plugins/discovery-gce/licenses/google-oauth-client-1.34.0.jar.sha1 @@ -0,0 +1 @@ +a0dc471bd498c62280120037a42d410c0e36f5d6 \ No newline at end of file From ab478ba5f33d45b98a3dd2b12038680d443cc1e1 Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Mon, 6 Jun 2022 13:41:56 -0700 Subject: [PATCH 20/34] Fix the support of RestClient Node Sniffer for version 2.x and update tests (#3487) Fix the support of RestClient Node Sniffer for OpenSearch 2.x, and update unit tests for OpenSearch. The current code contains the logic to be compatible with Elasticsearch 2.x version, which is conflict with OpenSearch 2.x, so removed that part of legacy code. * Update the script create_test_nodes_info.bash to dump the response of Nodes Info API GET _nodes/http for OpenSearch 1.0 and 2.0 version, which used for unit test. * Remove the support of Elasticsearch version 2.x for the Sniffer * Update unit test to validate the Sniffer compatible with OpenSearch 1.x and 2.x * Update the API response parser to meet the array notation (in ES 6.1 and above) for the node attributes setting. It will result the value of `node.attr` setting will not be parsed as array in the Sniffer, when using the Sniffer on cluster in Elasticsearch 6.0 and above. * Replace "master" node role with "cluster_manager" in unit test Signed-off-by: Tianli Feng --- .../client/sniff/OpenSearchNodesSniffer.java | 70 +----- .../OpenSearchNodesSnifferParseTests.java | 92 +++----- .../sniff/OpenSearchNodesSnifferTests.java | 16 +- ..._nodes_http.json => 1.0.0_nodes_http.json} | 79 ++++--- .../src/test/resources/2.0.0_nodes_http.json | 144 +++++++----- .../src/test/resources/5.0.0_nodes_http.json | 217 ------------------ .../src/test/resources/6.0.0_nodes_http.json | 217 ------------------ .../resources/create_test_nodes_info.bash | 56 +++-- 8 files changed, 191 insertions(+), 700 deletions(-) rename client/sniffer/src/test/resources/{7.3.0_nodes_http.json => 1.0.0_nodes_http.json} (77%) delete mode 100644 client/sniffer/src/test/resources/5.0.0_nodes_http.json delete mode 100644 client/sniffer/src/test/resources/6.0.0_nodes_http.json diff --git a/client/sniffer/src/main/java/org/opensearch/client/sniff/OpenSearchNodesSniffer.java b/client/sniffer/src/main/java/org/opensearch/client/sniff/OpenSearchNodesSniffer.java index 2829439627dbc..c1a0fcf9a8acf 100644 --- a/client/sniffer/src/main/java/org/opensearch/client/sniff/OpenSearchNodesSniffer.java +++ b/client/sniffer/src/main/java/org/opensearch/client/sniff/OpenSearchNodesSniffer.java @@ -49,6 +49,7 @@ import java.io.InputStream; import java.net.URI; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -241,74 +242,23 @@ private static Node readNode(String nodeId, JsonParser parser, Scheme scheme) th } Map> realAttributes = new HashMap<>(protoAttributes.size()); - List keys = new ArrayList<>(protoAttributes.keySet()); - for (String key : keys) { - if (key.endsWith(".0")) { - String realKey = key.substring(0, key.length() - 2); - List values = new ArrayList<>(); - int i = 0; - while (true) { - String value = protoAttributes.remove(realKey + "." + i); - if (value == null) { - break; - } - values.add(value); - i++; - } - realAttributes.put(realKey, unmodifiableList(values)); - } - } for (Map.Entry entry : protoAttributes.entrySet()) { - realAttributes.put(entry.getKey(), singletonList(entry.getValue())); - } - - if (version.startsWith("2.")) { - /* - * 2.x doesn't send roles, instead we try to read them from - * attributes. - */ - boolean clientAttribute = v2RoleAttributeValue(realAttributes, "client", false); - Boolean masterAttribute = v2RoleAttributeValue(realAttributes, "master", null); - Boolean dataAttribute = v2RoleAttributeValue(realAttributes, "data", null); - if ((masterAttribute == null && false == clientAttribute) || masterAttribute) { - roles.add("master"); + if (entry.getValue().startsWith("[")) { + // Convert string array to list + String value = entry.getValue(); + String[] values = value.substring(1, value.length() - 1).split(", "); + realAttributes.put(entry.getKey(), unmodifiableList(Arrays.asList(values))); + } else { + realAttributes.put(entry.getKey(), singletonList(entry.getValue())); } - if ((dataAttribute == null && false == clientAttribute) || dataAttribute) { - roles.add("data"); - } - } else { - assert sawRoles : "didn't see roles for [" + nodeId + "]"; } + + assert sawRoles : "didn't see roles for [" + nodeId + "]"; assert boundHosts.contains(publishedHost) : "[" + nodeId + "] doesn't make sense! publishedHost should be in boundHosts"; logger.trace("adding node [" + nodeId + "]"); return new Node(publishedHost, boundHosts, name, version, new Roles(roles), unmodifiableMap(realAttributes)); } - /** - * Returns {@code defaultValue} if the attribute didn't come back, - * {@code true} or {@code false} if it did come back as - * either of those, or throws an IOException if the attribute - * came back in a strange way. - */ - private static Boolean v2RoleAttributeValue(Map> attributes, String name, Boolean defaultValue) - throws IOException { - List valueList = attributes.remove(name); - if (valueList == null) { - return defaultValue; - } - if (valueList.size() != 1) { - throw new IOException("expected only a single attribute value for [" + name + "] but got " + valueList); - } - switch (valueList.get(0)) { - case "true": - return true; - case "false": - return false; - default: - throw new IOException("expected [" + name + "] to be either [true] or [false] but was [" + valueList.get(0) + "]"); - } - } - /** * The supported host schemes. */ diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferParseTests.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferParseTests.java index a9ff47eab5366..58b60ac13dee8 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferParseTests.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferParseTests.java @@ -45,8 +45,8 @@ import java.io.IOException; import java.io.InputStream; import java.util.Arrays; -import java.util.HashMap; import java.util.HashSet; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Set; @@ -85,59 +85,31 @@ private void checkFile(String file, Node... expected) throws IOException { } } - public void test2x() throws IOException { - checkFile( - "2.0.0_nodes_http.json", - node(9200, "m1", "2.0.0", true, false, false), - node(9201, "m2", "2.0.0", true, true, false), - node(9202, "m3", "2.0.0", true, false, false), - node(9203, "d1", "2.0.0", false, true, false), - node(9204, "d2", "2.0.0", false, true, false), - node(9205, "d3", "2.0.0", false, true, false), - node(9206, "c1", "2.0.0", false, false, false), - node(9207, "c2", "2.0.0", false, false, false) - ); - } - - public void test5x() throws IOException { - checkFile( - "5.0.0_nodes_http.json", - node(9200, "m1", "5.0.0", true, false, true), - node(9201, "m2", "5.0.0", true, true, true), - node(9202, "m3", "5.0.0", true, false, true), - node(9203, "d1", "5.0.0", false, true, true), - node(9204, "d2", "5.0.0", false, true, true), - node(9205, "d3", "5.0.0", false, true, true), - node(9206, "c1", "5.0.0", false, false, true), - node(9207, "c2", "5.0.0", false, false, true) - ); - } - - public void test6x() throws IOException { + public void test1x() throws IOException { checkFile( - "6.0.0_nodes_http.json", - node(9200, "m1", "6.0.0", true, false, true), - node(9201, "m2", "6.0.0", true, true, true), - node(9202, "m3", "6.0.0", true, false, true), - node(9203, "d1", "6.0.0", false, true, true), - node(9204, "d2", "6.0.0", false, true, true), - node(9205, "d3", "6.0.0", false, true, true), - node(9206, "c1", "6.0.0", false, false, true), - node(9207, "c2", "6.0.0", false, false, true) + "1.0.0_nodes_http.json", + node(9200, "m1", "1.0.0", "master", "ingest"), + node(9201, "m2", "1.0.0", "master", "data", "ingest"), + node(9202, "m3", "1.0.0", "master", "ingest"), + node(9203, "d1", "1.0.0", "data", "ingest"), + node(9204, "d2", "1.0.0", "data", "ingest"), + node(9205, "d3", "1.0.0", "data", "ingest"), + node(9206, "c1", "1.0.0", "ingest"), + node(9207, "c2", "1.0.0", "ingest") ); } - public void test7x() throws IOException { + public void test2x() throws IOException { checkFile( - "7.3.0_nodes_http.json", - node(9200, "m1", "7.3.0", "master", "ingest"), - node(9201, "m2", "7.3.0", "master", "data", "ingest"), - node(9202, "m3", "7.3.0", "master", "ingest"), - node(9203, "d1", "7.3.0", "data", "ingest", "ml"), - node(9204, "d2", "7.3.0", "data", "ingest"), - node(9205, "d3", "7.3.0", "data", "ingest"), - node(9206, "c1", "7.3.0", "ingest"), - node(9207, "c2", "7.3.0", "ingest") + "2.0.0_nodes_http.json", + node(9200, "m1", "2.0.0", "cluster_manager", "ingest"), + node(9201, "m2", "2.0.0", "cluster_manager", "data", "ingest"), + node(9202, "m3", "2.0.0", "cluster_manager", "ingest"), + node(9203, "d1", "2.0.0", "data", "ingest"), + node(9204, "d2", "2.0.0", "data", "ingest"), + node(9205, "d3", "2.0.0", "data", "ingest"), + node(9206, "c1", "2.0.0", "ingest"), + node(9207, "c2", "2.0.0", "ingest") ); } @@ -163,20 +135,6 @@ public void testParsingPublishAddressWithES7Format() throws IOException { assertEquals("http", nodes.get(0).getHost().getSchemeName()); } - private Node node(int port, String name, String version, boolean master, boolean data, boolean ingest) { - final Set roles = new TreeSet<>(); - if (master) { - roles.add("master"); - } - if (data) { - roles.add("data"); - } - if (ingest) { - roles.add("ingest"); - } - return node(port, name, version, roles); - } - private Node node(int port, String name, String version, String... roles) { return node(port, name, version, new TreeSet<>(Arrays.asList(roles))); } @@ -184,11 +142,15 @@ private Node node(int port, String name, String version, String... roles) { private Node node(int port, String name, String version, Set roles) { HttpHost host = new HttpHost("127.0.0.1", port); Set boundHosts = new HashSet<>(2); - boundHosts.add(host); boundHosts.add(new HttpHost("[::1]", port)); - Map> attributes = new HashMap<>(); + boundHosts.add(host); + Map> attributes = new LinkedHashMap<>(); // LinkedHashMap to preserve insertion order attributes.put("dummy", singletonList("everyone_has_me")); attributes.put("number", singletonList(name.substring(1))); + if (!version.startsWith("1.0") && !version.startsWith("1.1")) { + // Shard Indexing Pressure feature is added in version 1.2.0 + attributes.put("shard_indexing_pressure_enabled", singletonList(Boolean.TRUE.toString())); + } attributes.put("array", Arrays.asList(name.substring(0, 1), name.substring(1))); return new Node(host, boundHosts, name, version, new Roles(new TreeSet<>(roles)), attributes); } diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferTests.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferTests.java index 993844524c2d1..8cc6f5f006861 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferTests.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferTests.java @@ -234,7 +234,7 @@ private static SniffResponse buildSniffResponse(OpenSearchNodesSniffer.Scheme sc final Set nodeRoles = new TreeSet<>(); if (randomBoolean()) { - nodeRoles.add("master"); + nodeRoles.add("cluster_manager"); } if (randomBoolean()) { nodeRoles.add("data"); @@ -283,12 +283,12 @@ private static SniffResponse buildSniffResponse(OpenSearchNodesSniffer.Scheme sc generator.writeEndObject(); } - List roles = Arrays.asList(new String[] { "master", "data", "ingest" }); + List roles = Arrays.asList(new String[] { "cluster_manager", "data", "ingest" }); Collections.shuffle(roles, getRandom()); generator.writeArrayFieldStart("roles"); for (String role : roles) { - if ("master".equals(role) && node.getRoles().isMasterEligible()) { - generator.writeString("master"); + if ("cluster_manager".equals(role) && node.getRoles().isMasterEligible()) { + generator.writeString("cluster_manager"); } if ("data".equals(role) && node.getRoles().isData()) { generator.writeString("data"); @@ -307,13 +307,7 @@ private static SniffResponse buildSniffResponse(OpenSearchNodesSniffer.Scheme sc if (numAttributes > 0) { generator.writeObjectFieldStart("attributes"); for (Map.Entry> entry : attributes.entrySet()) { - if (entry.getValue().size() == 1) { - generator.writeStringField(entry.getKey(), entry.getValue().get(0)); - } else { - for (int v = 0; v < entry.getValue().size(); v++) { - generator.writeStringField(entry.getKey() + "." + v, entry.getValue().get(v)); - } - } + generator.writeStringField(entry.getKey(), entry.getValue().toString()); } generator.writeEndObject(); } diff --git a/client/sniffer/src/test/resources/7.3.0_nodes_http.json b/client/sniffer/src/test/resources/1.0.0_nodes_http.json similarity index 77% rename from client/sniffer/src/test/resources/7.3.0_nodes_http.json rename to client/sniffer/src/test/resources/1.0.0_nodes_http.json index 9e85511fadb62..5557f0c7955c2 100644 --- a/client/sniffer/src/test/resources/7.3.0_nodes_http.json +++ b/client/sniffer/src/test/resources/1.0.0_nodes_http.json @@ -11,17 +11,17 @@ "transport_address": "127.0.0.1:9300", "host": "127.0.0.1", "ip": "127.0.0.1", - "version": "7.3.0", - "build_hash": "8f0685b", + "version": "1.0.0", + "build_type": "tar", + "build_hash": "34550c5b17124ddc59458ef774f6b43a086522e3", "roles": [ - "master", - "ingest" + "ingest", + "master" ], "attributes": { "dummy": "everyone_has_me", "number": "1", - "array.0": "m", - "array.1": "1" + "array": "[m, 1]" }, "http": { "bound_address": [ @@ -37,18 +37,18 @@ "transport_address": "127.0.0.1:9301", "host": "127.0.0.1", "ip": "127.0.0.1", - "version": "7.3.0", - "build_hash": "8f0685b", + "version": "1.0.0", + "build_type": "tar", + "build_hash": "34550c5b17124ddc59458ef774f6b43a086522e3", "roles": [ - "master", "data", - "ingest" + "ingest", + "master" ], "attributes": { "dummy": "everyone_has_me", "number": "2", - "array.0": "m", - "array.1": "2" + "array": "[m, 2]" }, "http": { "bound_address": [ @@ -64,17 +64,17 @@ "transport_address": "127.0.0.1:9302", "host": "127.0.0.1", "ip": "127.0.0.1", - "version": "7.3.0", - "build_hash": "8f0685b", + "version": "1.0.0", + "build_type": "tar", + "build_hash": "34550c5b17124ddc59458ef774f6b43a086522e3", "roles": [ - "master", - "ingest" + "ingest", + "master" ], "attributes": { "dummy": "everyone_has_me", "number": "3", - "array.0": "m", - "array.1": "3" + "array": "[m, 3]" }, "http": { "bound_address": [ @@ -90,18 +90,17 @@ "transport_address": "127.0.0.1:9303", "host": "127.0.0.1", "ip": "127.0.0.1", - "version": "7.3.0", - "build_hash": "8f0685b", + "version": "1.0.0", + "build_type": "tar", + "build_hash": "34550c5b17124ddc59458ef774f6b43a086522e3", "roles": [ "data", - "ingest", - "ml" + "ingest" ], "attributes": { "dummy": "everyone_has_me", "number": "1", - "array.0": "d", - "array.1": "1" + "array": "[d, 1]" }, "http": { "bound_address": [ @@ -117,8 +116,9 @@ "transport_address": "127.0.0.1:9304", "host": "127.0.0.1", "ip": "127.0.0.1", - "version": "7.3.0", - "build_hash": "8f0685b", + "version": "1.0.0", + "build_type": "tar", + "build_hash": "34550c5b17124ddc59458ef774f6b43a086522e3", "roles": [ "data", "ingest" @@ -126,8 +126,7 @@ "attributes": { "dummy": "everyone_has_me", "number": "2", - "array.0": "d", - "array.1": "2" + "array": "[d, 2]" }, "http": { "bound_address": [ @@ -143,8 +142,9 @@ "transport_address": "127.0.0.1:9305", "host": "127.0.0.1", "ip": "127.0.0.1", - "version": "7.3.0", - "build_hash": "8f0685b", + "version": "1.0.0", + "build_type": "tar", + "build_hash": "34550c5b17124ddc59458ef774f6b43a086522e3", "roles": [ "data", "ingest" @@ -152,8 +152,7 @@ "attributes": { "dummy": "everyone_has_me", "number": "3", - "array.0": "d", - "array.1": "3" + "array": "[d, 3]" }, "http": { "bound_address": [ @@ -169,16 +168,16 @@ "transport_address": "127.0.0.1:9306", "host": "127.0.0.1", "ip": "127.0.0.1", - "version": "7.3.0", - "build_hash": "8f0685b", + "version": "1.0.0", + "build_type": "tar", + "build_hash": "34550c5b17124ddc59458ef774f6b43a086522e3", "roles": [ "ingest" ], "attributes": { "dummy": "everyone_has_me", "number": "1", - "array.0": "c", - "array.1": "1" + "array": "[c, 1]" }, "http": { "bound_address": [ @@ -194,16 +193,16 @@ "transport_address": "127.0.0.1:9307", "host": "127.0.0.1", "ip": "127.0.0.1", - "version": "7.3.0", - "build_hash": "8f0685b", + "version": "1.0.0", + "build_type": "tar", + "build_hash": "34550c5b17124ddc59458ef774f6b43a086522e3", "roles": [ "ingest" ], "attributes": { "dummy": "everyone_has_me", "number": "2", - "array.0": "c", - "array.1": "2" + "array": "[c, 2]" }, "http": { "bound_address": [ diff --git a/client/sniffer/src/test/resources/2.0.0_nodes_http.json b/client/sniffer/src/test/resources/2.0.0_nodes_http.json index 4e8dbbcba58c4..e1b75d460d7d9 100644 --- a/client/sniffer/src/test/resources/2.0.0_nodes_http.json +++ b/client/sniffer/src/test/resources/2.0.0_nodes_http.json @@ -1,4 +1,9 @@ { + "_nodes": { + "total": 8, + "successful": 8, + "failed": 0 + }, "cluster_name": "opensearch", "nodes": { "qr-SOrELSaGW8SlU8nflBw": { @@ -7,20 +12,22 @@ "host": "127.0.0.1", "ip": "127.0.0.1", "version": "2.0.0", - "build": "de54438", - "http_address": "127.0.0.1:9200", + "build_type": "tar", + "build_hash": "bae3b4e4178c20ac24fece8e82099abe3b2630d0", + "roles": [ + "cluster_manager", + "ingest" + ], "attributes": { "dummy": "everyone_has_me", "number": "1", - "array.0": "m", - "data": "false", - "array.1": "1", - "master": "true" + "array": "[m, 1]", + "shard_indexing_pressure_enabled": "true" }, "http": { "bound_address": [ - "127.0.0.1:9200", - "[::1]:9200" + "[::1]:9200", + "127.0.0.1:9200" ], "publish_address": "127.0.0.1:9200", "max_content_length_in_bytes": 104857600 @@ -32,19 +39,23 @@ "host": "127.0.0.1", "ip": "127.0.0.1", "version": "2.0.0", - "build": "de54438", - "http_address": "127.0.0.1:9201", + "build_type": "tar", + "build_hash": "bae3b4e4178c20ac24fece8e82099abe3b2630d0", + "roles": [ + "cluster_manager", + "data", + "ingest" + ], "attributes": { "dummy": "everyone_has_me", "number": "2", - "array.0": "m", - "array.1": "2", - "master": "true" + "shard_indexing_pressure_enabled": "true", + "array": "[m, 2]" }, "http": { "bound_address": [ - "127.0.0.1:9201", - "[::1]:9201" + "[::1]:9201", + "127.0.0.1:9201" ], "publish_address": "127.0.0.1:9201", "max_content_length_in_bytes": 104857600 @@ -56,20 +67,22 @@ "host": "127.0.0.1", "ip": "127.0.0.1", "version": "2.0.0", - "build": "de54438", - "http_address": "127.0.0.1:9202", + "build_type": "tar", + "build_hash": "bae3b4e4178c20ac24fece8e82099abe3b2630d0", + "roles": [ + "cluster_manager", + "ingest" + ], "attributes": { "dummy": "everyone_has_me", "number": "3", - "array.0": "m", - "data": "false", - "array.1": "3", - "master": "true" + "shard_indexing_pressure_enabled": "true", + "array": "[m, 3]" }, "http": { "bound_address": [ - "127.0.0.1:9202", - "[::1]:9202" + "[::1]:9202", + "127.0.0.1:9202" ], "publish_address": "127.0.0.1:9202", "max_content_length_in_bytes": 104857600 @@ -81,19 +94,22 @@ "host": "127.0.0.1", "ip": "127.0.0.1", "version": "2.0.0", - "build": "de54438", - "http_address": "127.0.0.1:9203", + "build_type": "tar", + "build_hash": "bae3b4e4178c20ac24fece8e82099abe3b2630d0", + "roles": [ + "data", + "ingest" + ], "attributes": { "dummy": "everyone_has_me", "number": "1", - "array.0": "d", - "array.1": "1", - "master": "false" + "shard_indexing_pressure_enabled": "true", + "array": "[d, 1]" }, "http": { "bound_address": [ - "127.0.0.1:9203", - "[::1]:9203" + "[::1]:9203", + "127.0.0.1:9203" ], "publish_address": "127.0.0.1:9203", "max_content_length_in_bytes": 104857600 @@ -105,19 +121,22 @@ "host": "127.0.0.1", "ip": "127.0.0.1", "version": "2.0.0", - "build": "de54438", - "http_address": "127.0.0.1:9204", + "build_type": "tar", + "build_hash": "bae3b4e4178c20ac24fece8e82099abe3b2630d0", + "roles": [ + "data", + "ingest" + ], "attributes": { "dummy": "everyone_has_me", "number": "2", - "array.0": "d", - "array.1": "2", - "master": "false" + "shard_indexing_pressure_enabled": "true", + "array": "[d, 2]" }, "http": { "bound_address": [ - "127.0.0.1:9204", - "[::1]:9204" + "[::1]:9204", + "127.0.0.1:9204" ], "publish_address": "127.0.0.1:9204", "max_content_length_in_bytes": 104857600 @@ -129,19 +148,22 @@ "host": "127.0.0.1", "ip": "127.0.0.1", "version": "2.0.0", - "build": "de54438", - "http_address": "127.0.0.1:9205", + "build_type": "tar", + "build_hash": "bae3b4e4178c20ac24fece8e82099abe3b2630d0", + "roles": [ + "data", + "ingest" + ], "attributes": { "dummy": "everyone_has_me", "number": "3", - "array.0": "d", - "array.1": "3", - "master": "false" + "shard_indexing_pressure_enabled": "true", + "array": "[d, 3]" }, "http": { "bound_address": [ - "127.0.0.1:9205", - "[::1]:9205" + "[::1]:9205", + "127.0.0.1:9205" ], "publish_address": "127.0.0.1:9205", "max_content_length_in_bytes": 104857600 @@ -153,20 +175,21 @@ "host": "127.0.0.1", "ip": "127.0.0.1", "version": "2.0.0", - "build": "de54438", - "http_address": "127.0.0.1:9206", + "build_type": "tar", + "build_hash": "bae3b4e4178c20ac24fece8e82099abe3b2630d0", + "roles": [ + "ingest" + ], "attributes": { "dummy": "everyone_has_me", "number": "1", - "array.0": "c", - "data": "false", - "array.1": "1", - "master": "false" + "shard_indexing_pressure_enabled": "true", + "array": "[c, 1]" }, "http": { "bound_address": [ - "127.0.0.1:9206", - "[::1]:9206" + "[::1]:9206", + "127.0.0.1:9206" ], "publish_address": "127.0.0.1:9206", "max_content_length_in_bytes": 104857600 @@ -178,20 +201,21 @@ "host": "127.0.0.1", "ip": "127.0.0.1", "version": "2.0.0", - "build": "de54438", - "http_address": "127.0.0.1:9207", + "build_type": "tar", + "build_hash": "bae3b4e4178c20ac24fece8e82099abe3b2630d0", + "roles": [ + "ingest" + ], "attributes": { "dummy": "everyone_has_me", "number": "2", - "array.0": "c", - "data": "false", - "array.1": "2", - "master": "false" + "shard_indexing_pressure_enabled": "true", + "array": "[c, 2]" }, "http": { "bound_address": [ - "127.0.0.1:9207", - "[::1]:9207" + "[::1]:9207", + "127.0.0.1:9207" ], "publish_address": "127.0.0.1:9207", "max_content_length_in_bytes": 104857600 diff --git a/client/sniffer/src/test/resources/5.0.0_nodes_http.json b/client/sniffer/src/test/resources/5.0.0_nodes_http.json deleted file mode 100644 index 4eb0443bc09d8..0000000000000 --- a/client/sniffer/src/test/resources/5.0.0_nodes_http.json +++ /dev/null @@ -1,217 +0,0 @@ -{ - "_nodes": { - "total": 8, - "successful": 8, - "failed": 0 - }, - "cluster_name": "opensearch", - "nodes": { - "0S4r3NurTYSFSb8R9SxwWA": { - "name": "m1", - "transport_address": "127.0.0.1:9300", - "host": "127.0.0.1", - "ip": "127.0.0.1", - "version": "5.0.0", - "build_hash": "253032b", - "roles": [ - "master", - "ingest" - ], - "attributes": { - "dummy": "everyone_has_me", - "number": "1", - "array.0": "m", - "array.1": "1" - }, - "http": { - "bound_address": [ - "[::1]:9200", - "127.0.0.1:9200" - ], - "publish_address": "127.0.0.1:9200", - "max_content_length_in_bytes": 104857600 - } - }, - "k_CBrMXARkS57Qb5-3Mw5g": { - "name": "m2", - "transport_address": "127.0.0.1:9301", - "host": "127.0.0.1", - "ip": "127.0.0.1", - "version": "5.0.0", - "build_hash": "253032b", - "roles": [ - "master", - "data", - "ingest" - ], - "attributes": { - "dummy": "everyone_has_me", - "number": "2", - "array.0": "m", - "array.1": "2" - }, - "http": { - "bound_address": [ - "[::1]:9201", - "127.0.0.1:9201" - ], - "publish_address": "127.0.0.1:9201", - "max_content_length_in_bytes": 104857600 - } - }, - "6eynRPQ1RleJTeGDuTR9mw": { - "name": "m3", - "transport_address": "127.0.0.1:9302", - "host": "127.0.0.1", - "ip": "127.0.0.1", - "version": "5.0.0", - "build_hash": "253032b", - "roles": [ - "master", - "ingest" - ], - "attributes": { - "dummy": "everyone_has_me", - "number": "3", - "array.0": "m", - "array.1": "3" - }, - "http": { - "bound_address": [ - "[::1]:9202", - "127.0.0.1:9202" - ], - "publish_address": "127.0.0.1:9202", - "max_content_length_in_bytes": 104857600 - } - }, - "cbGC-ay1QNWaESvEh5513w": { - "name": "d1", - "transport_address": "127.0.0.1:9303", - "host": "127.0.0.1", - "ip": "127.0.0.1", - "version": "5.0.0", - "build_hash": "253032b", - "roles": [ - "data", - "ingest" - ], - "attributes": { - "dummy": "everyone_has_me", - "number": "1", - "array.0": "d", - "array.1": "1" - }, - "http": { - "bound_address": [ - "[::1]:9203", - "127.0.0.1:9203" - ], - "publish_address": "127.0.0.1:9203", - "max_content_length_in_bytes": 104857600 - } - }, - "LexndPpXR2ytYsU5fTElnQ": { - "name": "d2", - "transport_address": "127.0.0.1:9304", - "host": "127.0.0.1", - "ip": "127.0.0.1", - "version": "5.0.0", - "build_hash": "253032b", - "roles": [ - "data", - "ingest" - ], - "attributes": { - "dummy": "everyone_has_me", - "number": "2", - "array.0": "d", - "array.1": "2" - }, - "http": { - "bound_address": [ - "[::1]:9204", - "127.0.0.1:9204" - ], - "publish_address": "127.0.0.1:9204", - "max_content_length_in_bytes": 104857600 - } - }, - "SbNG1DKYSBu20zfOz2gDZQ": { - "name": "d3", - "transport_address": "127.0.0.1:9305", - "host": "127.0.0.1", - "ip": "127.0.0.1", - "version": "5.0.0", - "build_hash": "253032b", - "roles": [ - "data", - "ingest" - ], - "attributes": { - "dummy": "everyone_has_me", - "number": "3", - "array.0": "d", - "array.1": "3" - }, - "http": { - "bound_address": [ - "[::1]:9205", - "127.0.0.1:9205" - ], - "publish_address": "127.0.0.1:9205", - "max_content_length_in_bytes": 104857600 - } - }, - "fM4H-m2WTDWmsGsL7jIJew": { - "name": "c1", - "transport_address": "127.0.0.1:9306", - "host": "127.0.0.1", - "ip": "127.0.0.1", - "version": "5.0.0", - "build_hash": "253032b", - "roles": [ - "ingest" - ], - "attributes": { - "dummy": "everyone_has_me", - "number": "1", - "array.0": "c", - "array.1": "1" - }, - "http": { - "bound_address": [ - "[::1]:9206", - "127.0.0.1:9206" - ], - "publish_address": "127.0.0.1:9206", - "max_content_length_in_bytes": 104857600 - } - }, - "pFoh7d0BTbqqI3HKd9na5A": { - "name": "c2", - "transport_address": "127.0.0.1:9307", - "host": "127.0.0.1", - "ip": "127.0.0.1", - "version": "5.0.0", - "build_hash": "253032b", - "roles": [ - "ingest" - ], - "attributes": { - "dummy": "everyone_has_me", - "number": "2", - "array.0": "c", - "array.1": "2" - }, - "http": { - "bound_address": [ - "[::1]:9207", - "127.0.0.1:9207" - ], - "publish_address": "127.0.0.1:9207", - "max_content_length_in_bytes": 104857600 - } - } - } -} diff --git a/client/sniffer/src/test/resources/6.0.0_nodes_http.json b/client/sniffer/src/test/resources/6.0.0_nodes_http.json deleted file mode 100644 index adc8f535d6aad..0000000000000 --- a/client/sniffer/src/test/resources/6.0.0_nodes_http.json +++ /dev/null @@ -1,217 +0,0 @@ -{ - "_nodes": { - "total": 8, - "successful": 8, - "failed": 0 - }, - "cluster_name": "opensearch", - "nodes": { - "ikXK_skVTfWkhONhldnbkw": { - "name": "m1", - "transport_address": "127.0.0.1:9300", - "host": "127.0.0.1", - "ip": "127.0.0.1", - "version": "6.0.0", - "build_hash": "8f0685b", - "roles": [ - "master", - "ingest" - ], - "attributes": { - "dummy": "everyone_has_me", - "number": "1", - "array.0": "m", - "array.1": "1" - }, - "http": { - "bound_address": [ - "[::1]:9200", - "127.0.0.1:9200" - ], - "publish_address": "127.0.0.1:9200", - "max_content_length_in_bytes": 104857600 - } - }, - "TMHa34w4RqeuYoHCfJGXZg": { - "name": "m2", - "transport_address": "127.0.0.1:9301", - "host": "127.0.0.1", - "ip": "127.0.0.1", - "version": "6.0.0", - "build_hash": "8f0685b", - "roles": [ - "master", - "data", - "ingest" - ], - "attributes": { - "dummy": "everyone_has_me", - "number": "2", - "array.0": "m", - "array.1": "2" - }, - "http": { - "bound_address": [ - "[::1]:9201", - "127.0.0.1:9201" - ], - "publish_address": "127.0.0.1:9201", - "max_content_length_in_bytes": 104857600 - } - }, - "lzaMRJTVT166sgVZdQ5thA": { - "name": "m3", - "transport_address": "127.0.0.1:9302", - "host": "127.0.0.1", - "ip": "127.0.0.1", - "version": "6.0.0", - "build_hash": "8f0685b", - "roles": [ - "master", - "ingest" - ], - "attributes": { - "dummy": "everyone_has_me", - "number": "3", - "array.0": "m", - "array.1": "3" - }, - "http": { - "bound_address": [ - "[::1]:9202", - "127.0.0.1:9202" - ], - "publish_address": "127.0.0.1:9202", - "max_content_length_in_bytes": 104857600 - } - }, - "tGP5sUecSd6BLTWk1NWF8Q": { - "name": "d1", - "transport_address": "127.0.0.1:9303", - "host": "127.0.0.1", - "ip": "127.0.0.1", - "version": "6.0.0", - "build_hash": "8f0685b", - "roles": [ - "data", - "ingest" - ], - "attributes": { - "dummy": "everyone_has_me", - "number": "1", - "array.0": "d", - "array.1": "1" - }, - "http": { - "bound_address": [ - "[::1]:9203", - "127.0.0.1:9203" - ], - "publish_address": "127.0.0.1:9203", - "max_content_length_in_bytes": 104857600 - } - }, - "c1UgW5ROTkSa2YnM_T56tw": { - "name": "d2", - "transport_address": "127.0.0.1:9304", - "host": "127.0.0.1", - "ip": "127.0.0.1", - "version": "6.0.0", - "build_hash": "8f0685b", - "roles": [ - "data", - "ingest" - ], - "attributes": { - "dummy": "everyone_has_me", - "number": "2", - "array.0": "d", - "array.1": "2" - }, - "http": { - "bound_address": [ - "[::1]:9204", - "127.0.0.1:9204" - ], - "publish_address": "127.0.0.1:9204", - "max_content_length_in_bytes": 104857600 - } - }, - "QM9yjqjmS72MstpNYV_trg": { - "name": "d3", - "transport_address": "127.0.0.1:9305", - "host": "127.0.0.1", - "ip": "127.0.0.1", - "version": "6.0.0", - "build_hash": "8f0685b", - "roles": [ - "data", - "ingest" - ], - "attributes": { - "dummy": "everyone_has_me", - "number": "3", - "array.0": "d", - "array.1": "3" - }, - "http": { - "bound_address": [ - "[::1]:9205", - "127.0.0.1:9205" - ], - "publish_address": "127.0.0.1:9205", - "max_content_length_in_bytes": 104857600 - } - }, - "wLtzAssoQYeX_4TstgCj0Q": { - "name": "c1", - "transport_address": "127.0.0.1:9306", - "host": "127.0.0.1", - "ip": "127.0.0.1", - "version": "6.0.0", - "build_hash": "8f0685b", - "roles": [ - "ingest" - ], - "attributes": { - "dummy": "everyone_has_me", - "number": "1", - "array.0": "c", - "array.1": "1" - }, - "http": { - "bound_address": [ - "[::1]:9206", - "127.0.0.1:9206" - ], - "publish_address": "127.0.0.1:9206", - "max_content_length_in_bytes": 104857600 - } - }, - "ONOzpst8TH-ZebG7fxGwaA": { - "name": "c2", - "transport_address": "127.0.0.1:9307", - "host": "127.0.0.1", - "ip": "127.0.0.1", - "version": "6.0.0", - "build_hash": "8f0685b", - "roles": [ - "ingest" - ], - "attributes": { - "dummy": "everyone_has_me", - "number": "2", - "array.0": "c", - "array.1": "2" - }, - "http": { - "bound_address": [ - "[::1]:9207", - "127.0.0.1:9207" - ], - "publish_address": "127.0.0.1:9207", - "max_content_length_in_bytes": 104857600 - } - } - } -} diff --git a/client/sniffer/src/test/resources/create_test_nodes_info.bash b/client/sniffer/src/test/resources/create_test_nodes_info.bash index 06350be4ba205..78e67562d815b 100644 --- a/client/sniffer/src/test/resources/create_test_nodes_info.bash +++ b/client/sniffer/src/test/resources/create_test_nodes_info.bash @@ -21,15 +21,11 @@ work=$(mktemp -d) pushd ${work} >> /dev/null echo Working in ${work} -wget https://download.elasticsearch.org/elasticsearch/release/org/elasticsearch/distribution/tar/elasticsearch/2.0.0/elasticsearch-2.0.0.tar.gz -wget https://artifacts-no-kpi.elastic.co/downloads/elasticsearch/elasticsearch-5.0.0.tar.gz -wget https://artifacts-no-kpi.elastic.co/downloads/elasticsearch/elasticsearch-6.0.0.tar.gz -sha1sum -c - << __SHAs -e369d8579bd3a2e8b5344278d5043f19f14cac88 elasticsearch-2.0.0.tar.gz -d25f6547bccec9f0b5ea7583815f96a6f50849e0 elasticsearch-5.0.0.tar.gz -__SHAs +wget https://artifacts.opensearch.org/releases/core/opensearch/1.0.0/opensearch-min-1.0.0-linux-x64.tar.gz +wget https://artifacts.opensearch.org/releases/core/opensearch/2.0.0/opensearch-min-2.0.0-linux-x64.tar.gz sha512sum -c - << __SHAs -25bb622d2fc557d8b8eded634a9b333766f7b58e701359e1bcfafee390776eb323cb7ea7a5e02e8803e25d8b1d3aabec0ec1b0cf492d0bab5689686fe440181c elasticsearch-6.0.0.tar.gz +96595cd3b173188d8a3f0f18d7bfa2457782839d06b519f01a99b4dc0280f81b08ba1d01bd1aef454feaa574cbbd04d3ad9a1f6a829182627e914f3e58f2899f opensearch-min-1.0.0-linux-x64.tar.gz +5b91456a2eb517bc48f13bec0a3f9c220494bd5fe979946dce6cfc3fa7ca00b003927157194d62f2a1c36c850eda74c70b93fbffa91bb082b2e1a17985d50976 opensearch-min-2.0.0-linux-x64.tar.gz __SHAs @@ -40,37 +36,38 @@ function do_version() { mkdir -p ${version} pushd ${version} >> /dev/null - tar xf ../opensearch-${version}.tar.gz + tar xf ../opensearch-min-${version}-linux-x64.tar.gz local http_port=9200 for node in ${nodes}; do mkdir ${node} cp -r opensearch-${version}/* ${node} - local master=$([[ "$node" =~ ^m.* ]] && echo true || echo false) - local data=$([[ "$node" =~ ^d.* ]] && echo true || echo false) - # m2 is always master and data for these test just so we have a node like that - data=$([[ "$node" == 'm2' ]] && echo true || echo ${data}) - local attr=$([ ${version} == '2.0.0' ] && echo '' || echo '.attr') + local cluster_manager=$([[ "$node" =~ ^m.* ]] && echo 'cluster_manager,' || echo '') + # 'cluster_manager' role is add in version 2.x and above, use 'master' role in 1.x + cluster_manager=$([[ ! "$cluster_manager" == '' && ${version} =~ ^1\. ]] && echo 'master,' || echo ${cluster_manager}) + local data=$([[ "$node" =~ ^d.* ]] && echo 'data,' || echo '') + # m2 is always cluster_manager and data for these test just so we have a node like that + data=$([[ "$node" == 'm2' ]] && echo 'data,' || echo ${data}) + # setting name 'cluster.initial_cluster_manager_nodes' is add in version 2.x and above + local initial_cluster_manager_nodes=$([[ ${version} =~ ^1\. ]] && echo 'initial_master_nodes' || echo 'initial_cluster_manager_nodes') local transport_port=$((http_port+100)) - cat >> ${node}/config/opensearch.yml << __ES_YML + cat >> ${node}/config/opensearch.yml << __OPENSEARCH_YML node.name: ${node} -node.master: ${master} -node.data: ${data} -node${attr}.dummy: everyone_has_me -node${attr}.number: ${node:1} -node${attr}.array: [${node:0:1}, ${node:1}] +node.roles: [${cluster_manager} ${data} ingest] +node.attr.dummy: everyone_has_me +node.attr.number: ${node:1} +node.attr.array: [${node:0:1}, ${node:1}] http.port: ${http_port} transport.tcp.port: ${transport_port} -discovery.zen.minimum_master_nodes: 3 -discovery.zen.ping.unicast.hosts: ['localhost:9300','localhost:9301','localhost:9302'] -__ES_YML +cluster.${initial_cluster_manager_nodes}: [m1, m2, m3] +discovery.seed_hosts: ['localhost:9300','localhost:9301','localhost:9302'] +__OPENSEARCH_YML - if [ ${version} != '2.0.0' ]; then - perl -pi -e 's/-Xm([sx]).+/-Xm${1}512m/g' ${node}/config/jvm.options - fi + # configure the JVM heap size + perl -pi -e 's/-Xm([sx]).+/-Xm${1}512m/g' ${node}/config/jvm.options echo "starting ${version}/${node}..." - ${node}/bin/opensearch -d -p ${node}/pidfile + ${node}/bin/opensearch -d -p pidfile ((http_port++)) done @@ -99,9 +96,8 @@ __ES_YML popd >> /dev/null } -JAVA_HOME=$JAVA8_HOME do_version 2.0.0 -JAVA_HOME=$JAVA8_HOME do_version 5.0.0 -JAVA_HOME=$JAVA8_HOME do_version 6.0.0 +JAVA_HOME=$JAVA11_HOME do_version 1.0.0 +JAVA_HOME=$JAVA11_HOME do_version 2.0.0 popd >> /dev/null rm -rf ${work} From 16766288f5061c4f526af8151ffaf9214d4e0fb0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Jun 2022 12:19:08 -0700 Subject: [PATCH 21/34] Bump com.diffplug.spotless from 6.6.1 to 6.7.0 (#3513) Bumps com.diffplug.spotless from 6.6.1 to 6.7.0. --- updated-dependencies: - dependency-name: com.diffplug.spotless dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index 9f2c483fa8de0..8b32d3393fe81 100644 --- a/build.gradle +++ b/build.gradle @@ -48,7 +48,7 @@ plugins { id 'lifecycle-base' id 'opensearch.docker-support' id 'opensearch.global-build-info' - id "com.diffplug.spotless" version "6.6.1" apply false + id "com.diffplug.spotless" version "6.7.0" apply false id "org.gradle.test-retry" version "1.4.0" apply false id "test-report-aggregation" id 'jacoco-report-aggregation' From 811a58085382a5551f3371efd3e94a8d982ec408 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Jun 2022 17:05:19 -0700 Subject: [PATCH 22/34] Bump guava from 18.0 to 23.0 in /plugins/ingest-attachment (#3357) * Bump guava from 18.0 to 23.0 in /plugins/ingest-attachment Bumps [guava](https://github.com/google/guava) from 18.0 to 23.0. - [Release notes](https://github.com/google/guava/releases) - [Commits](https://github.com/google/guava/compare/v18.0...v23.0) --- updated-dependencies: - dependency-name: com.google.guava:guava dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Add more ingorance of using internal java API sun.misc.Unsafe Signed-off-by: Tianli Feng Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Co-authored-by: Tianli Feng --- plugins/ingest-attachment/build.gradle | 14 ++++++++++++-- .../ingest-attachment/licenses/guava-18.0.jar.sha1 | 1 - .../ingest-attachment/licenses/guava-23.0.jar.sha1 | 1 + 3 files changed, 13 insertions(+), 3 deletions(-) delete mode 100644 plugins/ingest-attachment/licenses/guava-18.0.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/guava-23.0.jar.sha1 diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 1452d871a605b..456b652ff82a3 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -54,7 +54,7 @@ dependencies { api "org.apache.tika:tika-langdetect-optimaize:${versions.tika}" // Optimaize libraries/dependencies runtimeOnly "com.optimaize.languagedetector:language-detector:0.6" - runtimeOnly 'com.google.guava:guava:18.0' + runtimeOnly 'com.google.guava:guava:23.0' // Other dependencies api 'org.tukaani:xz:1.9' api 'commons-io:commons-io:2.11.0' @@ -119,11 +119,21 @@ forbiddenPatterns { thirdPartyAudit { ignoreMissingClasses() ignoreViolations( + // uses internal java api: sun.misc.Unsafe 'com.google.common.cache.Striped64', 'com.google.common.cache.Striped64$1', 'com.google.common.cache.Striped64$Cell', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$1', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$2', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$3', + 'com.google.common.hash.Striped64', + 'com.google.common.hash.Striped64$1', + 'com.google.common.hash.Striped64$Cell', 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator', - 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1' + 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1', + 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper', + 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1' ) } diff --git a/plugins/ingest-attachment/licenses/guava-18.0.jar.sha1 b/plugins/ingest-attachment/licenses/guava-18.0.jar.sha1 deleted file mode 100644 index 87f7acb8158ec..0000000000000 --- a/plugins/ingest-attachment/licenses/guava-18.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cce0823396aa693798f8882e64213b1772032b09 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/guava-23.0.jar.sha1 b/plugins/ingest-attachment/licenses/guava-23.0.jar.sha1 new file mode 100644 index 0000000000000..197134628d939 --- /dev/null +++ b/plugins/ingest-attachment/licenses/guava-23.0.jar.sha1 @@ -0,0 +1 @@ +c947004bb13d18182be60077ade044099e4f26f1 \ No newline at end of file From 81a77aaae66fc993d52c461164ebcefcd6fd2124 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Tue, 7 Jun 2022 18:15:36 -0700 Subject: [PATCH 23/34] Added bwc version 2.0.1 (#3452) Signed-off-by: Kunal Kotwani Co-authored-by: opensearch-ci-bot --- .ci/bwcVersions | 1 + server/src/main/java/org/opensearch/Version.java | 1 + 2 files changed, 2 insertions(+) diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 245c112356178..0461af4966e92 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -41,4 +41,5 @@ BWC_VERSION: - "1.3.2" - "1.3.3" - "2.0.0" + - "2.0.1" - "2.1.0" diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index a69c1f3c3bcb1..04907ee5d054b 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -88,6 +88,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_1_3_2 = new Version(1030299, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_1_3_3 = new Version(1030399, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_2_0_0 = new Version(2000099, org.apache.lucene.util.Version.LUCENE_9_1_0); + public static final Version V_2_0_1 = new Version(2000199, org.apache.lucene.util.Version.LUCENE_9_1_0); public static final Version V_2_1_0 = new Version(2010099, org.apache.lucene.util.Version.LUCENE_9_2_0); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version CURRENT = V_3_0_0; From 60c8ca9a566ce5ab839fc3ac70528c5c83b6eae5 Mon Sep 17 00:00:00 2001 From: Xue Zhou <85715413+xuezhou25@users.noreply.github.com> Date: Thu, 9 Jun 2022 19:00:29 +0000 Subject: [PATCH 24/34] Add release notes for 1.3.3 (#3549) Signed-off-by: Xue Zhou --- release-notes/opensearch.release-notes-1.3.3.md | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 release-notes/opensearch.release-notes-1.3.3.md diff --git a/release-notes/opensearch.release-notes-1.3.3.md b/release-notes/opensearch.release-notes-1.3.3.md new file mode 100644 index 0000000000000..fd80e526166f0 --- /dev/null +++ b/release-notes/opensearch.release-notes-1.3.3.md @@ -0,0 +1,10 @@ +## Version 1.3.3 Release Notes + +### Upgrades +* Upgrade google-oauth-client to 1.33.3 ([#3502](https://github.com/opensearch-project/OpenSearch/pull/3502)) +* Upgrade log4j-core to 2.17.1 ([#3508](https://github.com/opensearch-project/OpenSearch/pull/3508)) +* Upgrade jdom2 to 2.0.6.1 ([#3509](https://github.com/opensearch-project/OpenSearch/pull/3509)) + +### Bug Fixes +* Fixing org.opensearch.monitor.os.OsProbeTests::testLogWarnCpuMessageOnlyOnes when CGroups are not available ([#2101](https://github.com/opensearch-project/OpenSearch/pull/2101)) +* Fixing org.opensearch.monitor.os.OsProbeTests > testLogWarnCpuMessageOnlyOnes when cgroups are available but cgroup stats is not ([#3448](https://github.com/opensearch-project/OpenSearch/pull/3448)) From fc541544bef481f307623ef9f9b011b18c88b779 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Thu, 9 Jun 2022 16:39:00 -0500 Subject: [PATCH 25/34] [Upgrade] Lucene-9.3.0-snapshot-b7231bb (#3537) Upgrades to latest snapshot of lucene 9.3; including reducing maxFullFlushMergeWaitMillis in LuceneTest.testWrapLiveDocsNotExposeAbortedDocuments to 0 ms to ensure aborted docs are not merged away in the test with the new mergeOnRefresh default policy. Signed-off-by: Nicholas Walter Knize --- buildSrc/version.properties | 2 +- .../lucene-expressions-9.3.0-snapshot-823df23.jar.sha1 | 1 - .../lucene-expressions-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + .../lucene-analysis-icu-9.3.0-snapshot-823df23.jar.sha1 | 1 - .../lucene-analysis-icu-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + .../lucene-analysis-kuromoji-9.3.0-snapshot-823df23.jar.sha1 | 1 - .../lucene-analysis-kuromoji-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + .../lucene-analysis-nori-9.3.0-snapshot-823df23.jar.sha1 | 1 - .../lucene-analysis-nori-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + .../lucene-analysis-phonetic-9.3.0-snapshot-823df23.jar.sha1 | 1 - .../lucene-analysis-phonetic-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + .../lucene-analysis-smartcn-9.3.0-snapshot-823df23.jar.sha1 | 1 - .../lucene-analysis-smartcn-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + .../lucene-analysis-stempel-9.3.0-snapshot-823df23.jar.sha1 | 1 - .../lucene-analysis-stempel-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + .../lucene-analysis-morfologik-9.3.0-snapshot-823df23.jar.sha1 | 1 - .../lucene-analysis-morfologik-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + .../lucene-analysis-common-9.3.0-snapshot-823df23.jar.sha1 | 1 - .../lucene-analysis-common-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + .../lucene-backward-codecs-9.3.0-snapshot-823df23.jar.sha1 | 1 - .../lucene-backward-codecs-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + server/licenses/lucene-core-9.3.0-snapshot-823df23.jar.sha1 | 1 - server/licenses/lucene-core-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + .../licenses/lucene-grouping-9.3.0-snapshot-823df23.jar.sha1 | 1 - .../licenses/lucene-grouping-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + .../lucene-highlighter-9.3.0-snapshot-823df23.jar.sha1 | 1 - .../lucene-highlighter-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + server/licenses/lucene-join-9.3.0-snapshot-823df23.jar.sha1 | 1 - server/licenses/lucene-join-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + server/licenses/lucene-memory-9.3.0-snapshot-823df23.jar.sha1 | 1 - server/licenses/lucene-memory-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + server/licenses/lucene-misc-9.3.0-snapshot-823df23.jar.sha1 | 1 - server/licenses/lucene-misc-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + server/licenses/lucene-queries-9.3.0-snapshot-823df23.jar.sha1 | 1 - server/licenses/lucene-queries-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + .../lucene-queryparser-9.3.0-snapshot-823df23.jar.sha1 | 1 - .../lucene-queryparser-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + server/licenses/lucene-sandbox-9.3.0-snapshot-823df23.jar.sha1 | 1 - server/licenses/lucene-sandbox-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + .../lucene-spatial-extras-9.3.0-snapshot-823df23.jar.sha1 | 1 - .../lucene-spatial-extras-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + .../licenses/lucene-spatial3d-9.3.0-snapshot-823df23.jar.sha1 | 1 - .../licenses/lucene-spatial3d-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + server/licenses/lucene-suggest-9.3.0-snapshot-823df23.jar.sha1 | 1 - server/licenses/lucene-suggest-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + .../test/java/org/opensearch/common/lucene/LuceneTests.java | 3 +++ 46 files changed, 26 insertions(+), 23 deletions(-) delete mode 100644 modules/lang-expression/licenses/lucene-expressions-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 modules/lang-expression/licenses/lucene-expressions-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 server/licenses/lucene-analysis-common-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 server/licenses/lucene-analysis-common-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 server/licenses/lucene-backward-codecs-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 server/licenses/lucene-backward-codecs-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 server/licenses/lucene-core-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 server/licenses/lucene-core-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 server/licenses/lucene-grouping-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 server/licenses/lucene-grouping-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 server/licenses/lucene-highlighter-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 server/licenses/lucene-highlighter-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 server/licenses/lucene-join-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 server/licenses/lucene-join-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 server/licenses/lucene-memory-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 server/licenses/lucene-memory-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 server/licenses/lucene-misc-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 server/licenses/lucene-misc-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 server/licenses/lucene-queries-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 server/licenses/lucene-queries-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 server/licenses/lucene-queryparser-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 server/licenses/lucene-queryparser-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 server/licenses/lucene-sandbox-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 server/licenses/lucene-sandbox-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-extras-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 server/licenses/lucene-spatial-extras-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 server/licenses/lucene-spatial3d-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 server/licenses/lucene-spatial3d-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 server/licenses/lucene-suggest-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 server/licenses/lucene-suggest-9.3.0-snapshot-b7231bb.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index fe2cfe6a63ee6..87dbad73229b4 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ opensearch = 3.0.0 -lucene = 9.3.0-snapshot-823df23 +lucene = 9.3.0-snapshot-b7231bb bundled_jdk_vendor = adoptium bundled_jdk = 17.0.3+7 diff --git a/modules/lang-expression/licenses/lucene-expressions-9.3.0-snapshot-823df23.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index 540a48bf7415f..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -610ec9bb8001a2d2ea88e3384eb516017504139e \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.3.0-snapshot-b7231bb.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..f527a3b68b6a3 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +57ae445a0050ad492ef494b692b486dfe718b564 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0-snapshot-823df23.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index 7bc128d4562fa..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -43f2ea45a2d12b4c75c7ac11b85ec736c73bc07f \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0-snapshot-b7231bb.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..51cbf51d90626 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +b10e5bdae6df879b770060e0006bbc1c780c886d \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0-snapshot-823df23.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index bad2a0bdcfa2a..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fb46807684a5b0e28a02b2a1ea3d528e4c25aa05 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0-snapshot-b7231bb.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..ff57bbc283385 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +f0ddc3072fd16012dafc74928f87fdfd7669ea4a \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0-snapshot-823df23.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index b2c62bcbbade1..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -be94b15085b6390ed64a8e8a4f5afbcb2d4d5181 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0-snapshot-b7231bb.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..13dd3c8a8bb24 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +d761fa983d9c21099c433731d5519651737750c1 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0-snapshot-823df23.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index c7f8fd797c589..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3a6f705a7df2007f5583215420da0725f844ac4f \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0-snapshot-b7231bb.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..5cba6f6700769 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +43abbbe7c3c789ac448f898981acf54e487407a6 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0-snapshot-823df23.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index 28424c2dd1c7a..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ea9931a34288fa6cbd894e244a101e86926ebfb8 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0-snapshot-b7231bb.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..62097dc39ae20 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +55df9442a35fe09d4f3f98bd2dda4d1a1dbfd996 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0-snapshot-823df23.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index d7c4b20a29db2..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c339ce0a3b02d92a804081f5ff44b99f7a468caf \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0-snapshot-b7231bb.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..1666e4aae21a6 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +102cbb1d619b96e1f3e524520658b9327a93aba1 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0-snapshot-823df23.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index b4a9090408165..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a8faa5faa38ab8f545e12cf3dd914e934a2f2bfe \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0-snapshot-b7231bb.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..3a2d3cec6b952 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +0d5dc4dfb74d698e51dc9b95268faf6dde4b0815 \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-analysis-common-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index ab4abfd7d6a49..0000000000000 --- a/server/licenses/lucene-analysis-common-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8dbb5828e79780989a8758b7cbb5a1aacac0004f \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-analysis-common-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..4cb292ad20c1f --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +6c6a9569777e4f01c90ed840e5a04234dfcaf42e \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-backward-codecs-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index 8ff6a25c9547e..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -68ebd183f1e9edde9f2f37c60f784e4f03555eec \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-backward-codecs-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..3878ed346c9ce --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +a7ef963f9f9f15fc5018c5fa68bae5cf65692ca9 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-core-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index 2ec15eb0012c5..0000000000000 --- a/server/licenses/lucene-core-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ea3cb640597d93168765174207542c6765c1fe15 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-core-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..9f9f6be85c57c --- /dev/null +++ b/server/licenses/lucene-core-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +da113c963d62f0c8786d7c294dbbb63d5d7953ab \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-grouping-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index 7b6c561ddeedf..0000000000000 --- a/server/licenses/lucene-grouping-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ab2bcdbade5976e127c7e9393bf7a7e25a957d9a \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-grouping-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..92d0c41c6f4d2 --- /dev/null +++ b/server/licenses/lucene-grouping-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +54f65917cfa6c9c54cd0354ba333aa7e0f2980e5 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-highlighter-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index b2aa53fcdfb83..0000000000000 --- a/server/licenses/lucene-highlighter-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -31ce6ff9188dea49dc4b4d082b498332cc7b86e7 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-highlighter-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..ecab2abeb6220 --- /dev/null +++ b/server/licenses/lucene-highlighter-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +d73ebe32147c9a12d321c0b1273d5e5d797b705f \ No newline at end of file diff --git a/server/licenses/lucene-join-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-join-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index 7918597d46763..0000000000000 --- a/server/licenses/lucene-join-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c387884f0bc00fb1c064754a69e1e81dff12c755 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-join-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..725fc883c272b --- /dev/null +++ b/server/licenses/lucene-join-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +797c92ffe35af37ab1783906fb93ed95a145a701 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-memory-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index a87d3de9e2310..0000000000000 --- a/server/licenses/lucene-memory-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e278a2cfe1500b76da770aa29ecd487fea5f8dc3 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-memory-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..312a65edb6e24 --- /dev/null +++ b/server/licenses/lucene-memory-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +5714d64c39021c65dece8ee979d9ea39a327bb87 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-misc-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index 18a165097d2be..0000000000000 --- a/server/licenses/lucene-misc-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -77933cdffbcd0f56888a50fd1d9fb39cf6148f1a \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-misc-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..b701384ab601d --- /dev/null +++ b/server/licenses/lucene-misc-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +4d401c55114367e574ed51e914661f0a97f91e88 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-queries-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index 4d148f3a840c8..0000000000000 --- a/server/licenses/lucene-queries-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8d521efa3a111e2feab1a7f07a0cc944bbdcddf4 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-queries-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..ec2f7508d35cc --- /dev/null +++ b/server/licenses/lucene-queries-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +0f165ff86546565d32a508c82ca80ac2840bcf38 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-queryparser-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index c6e913767696a..0000000000000 --- a/server/licenses/lucene-queryparser-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -30d6f8f757a007248804ed5db624a125ada24154 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-queryparser-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..40a125ccada21 --- /dev/null +++ b/server/licenses/lucene-queryparser-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +d6fb5af1873628dc026e18b5438042143a9a9824 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-sandbox-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index 22b7769ee3b4d..0000000000000 --- a/server/licenses/lucene-sandbox-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8dd68761fade2dc4d2ea0d9d476a5172cfd22cd2 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-sandbox-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..b4784be40d072 --- /dev/null +++ b/server/licenses/lucene-sandbox-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +c48ab8982e6bf9429eded6a06d640db922eb2b69 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-spatial-extras-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index 22d9211a3b623..0000000000000 --- a/server/licenses/lucene-spatial-extras-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -044ac03b461aaae4568f64948f783e87dae85a8b \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-spatial-extras-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..6f39582081758 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +d757dc379fee639f54d0574443c5a6fd0b70613a \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-spatial3d-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index 66998393ed970..0000000000000 --- a/server/licenses/lucene-spatial3d-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -53a02ec5b0eabe7fdf97fea1b19eeca5a6cf1122 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-spatial3d-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..b5986970cb4da --- /dev/null +++ b/server/licenses/lucene-spatial3d-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +6a4e6de9b40cd027233a3ed00774810c36457a6c \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-suggest-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index e5aca63b21732..0000000000000 --- a/server/licenses/lucene-suggest-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a57b91ee1c6f3f666dcac697ce6a7de9bd5abba7 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-suggest-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..682a0ee88868f --- /dev/null +++ b/server/licenses/lucene-suggest-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +e793761c4a4292de0d52f066787ab5f3133382cd \ No newline at end of file diff --git a/server/src/test/java/org/opensearch/common/lucene/LuceneTests.java b/server/src/test/java/org/opensearch/common/lucene/LuceneTests.java index 4c179309f16ba..776b44d346fb5 100644 --- a/server/src/test/java/org/opensearch/common/lucene/LuceneTests.java +++ b/server/src/test/java/org/opensearch/common/lucene/LuceneTests.java @@ -591,6 +591,9 @@ public void testWrapLiveDocsNotExposeAbortedDocuments() throws Exception { Directory dir = newDirectory(); IndexWriterConfig config = newIndexWriterConfig().setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) .setMergePolicy(new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETES_FIELD, MatchAllDocsQuery::new, newMergePolicy())); + // override 500ms default introduced in + // https://issues.apache.org/jira/browse/LUCENE-10078 + config.setMaxFullFlushMergeWaitMillis(0); IndexWriter writer = new IndexWriter(dir, config); int numDocs = between(1, 10); List liveDocs = new ArrayList<>(); From fb1375987643f7c99ae77442401caffca5dddc25 Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Fri, 10 Jun 2022 11:02:53 +0530 Subject: [PATCH 26/34] [Remote Store] Upload segments to remote store post refresh (#3460) * Add RemoteDirectory interface to copy segment files to/from remote store Signed-off-by: Sachin Kale Co-authored-by: Sachin Kale * Add index level setting for remote store Signed-off-by: Sachin Kale Co-authored-by: Sachin Kale * Add RemoteDirectoryFactory and use RemoteDirectory instance in RefreshListener Co-authored-by: Sachin Kale Signed-off-by: Sachin Kale * Upload segment to remote store post refresh Signed-off-by: Sachin Kale Co-authored-by: Sachin Kale --- .../opensearch/index/shard/IndexShardIT.java | 3 +- .../cluster/metadata/IndexMetadata.java | 11 + .../common/settings/IndexScopedSettings.java | 4 +- .../opensearch/common/util/FeatureFlags.java | 6 + .../org/opensearch/index/IndexModule.java | 4 + .../org/opensearch/index/IndexService.java | 28 ++- .../org/opensearch/index/IndexSettings.java | 9 + .../opensearch/index/shard/IndexShard.java | 16 +- .../shard/RemoteStoreRefreshListener.java | 87 ++++++++ .../index/store/RemoteDirectory.java | 193 ++++++++++++++++++ .../index/store/RemoteDirectoryFactory.java | 37 ++++ .../index/store/RemoteIndexInput.java | 85 ++++++++ .../index/store/RemoteIndexOutput.java | 99 +++++++++ .../opensearch/indices/IndicesService.java | 8 +- .../opensearch/plugins/IndexStorePlugin.java | 17 ++ .../common/util/FeatureFlagTests.java | 7 + .../opensearch/index/IndexSettingsTests.java | 39 ++++ .../RemoteStoreRefreshListenerTests.java | 139 +++++++++++++ .../store/RemoteDirectoryFactoryTests.java | 65 ++++++ .../index/store/RemoteDirectoryTests.java | 158 ++++++++++++++ .../index/store/RemoteIndexInputTests.java | 99 +++++++++ .../index/store/RemoteIndexOutputTests.java | 68 ++++++ ...dicesLifecycleListenerSingleNodeTests.java | 3 +- .../index/shard/IndexShardTestCase.java | 3 +- 24 files changed, 1176 insertions(+), 12 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java create mode 100644 server/src/main/java/org/opensearch/index/store/RemoteDirectory.java create mode 100644 server/src/main/java/org/opensearch/index/store/RemoteDirectoryFactory.java create mode 100644 server/src/main/java/org/opensearch/index/store/RemoteIndexInput.java create mode 100644 server/src/main/java/org/opensearch/index/store/RemoteIndexOutput.java create mode 100644 server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java create mode 100644 server/src/test/java/org/opensearch/index/store/RemoteDirectoryFactoryTests.java create mode 100644 server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java create mode 100644 server/src/test/java/org/opensearch/index/store/RemoteIndexInputTests.java create mode 100644 server/src/test/java/org/opensearch/index/store/RemoteIndexOutputTests.java diff --git a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java index 888881d43eb11..2bf73b34247b3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java @@ -675,7 +675,8 @@ public static final IndexShard newIndexShard( () -> {}, RetentionLeaseSyncer.EMPTY, cbs, - SegmentReplicationCheckpointPublisher.EMPTY + SegmentReplicationCheckpointPublisher.EMPTY, + null ); } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index ec70e642ababc..442137fb70e1f 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -283,6 +283,17 @@ public Iterator> settings() { Property.Final ); + public static final String SETTING_REMOTE_STORE = "index.remote_store"; + /** + * Used to specify if the index data should be persisted in the remote store. + */ + public static final Setting INDEX_REMOTE_STORE_SETTING = Setting.boolSetting( + SETTING_REMOTE_STORE, + false, + Property.IndexScope, + Property.Final + ); + public static final String SETTING_AUTO_EXPAND_REPLICAS = "index.auto_expand_replicas"; public static final Setting INDEX_AUTO_EXPAND_REPLICAS_SETTING = AutoExpandReplicas.SETTING; diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index ba2666b53d7a8..75d7081e7729a 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -217,7 +217,9 @@ public final class IndexScopedSettings extends AbstractScopedSettings { */ public static final Map FEATURE_FLAGGED_INDEX_SETTINGS = Map.of( FeatureFlags.REPLICATION_TYPE, - IndexMetadata.INDEX_REPLICATION_TYPE_SETTING + IndexMetadata.INDEX_REPLICATION_TYPE_SETTING, + FeatureFlags.REMOTE_STORE, + IndexMetadata.INDEX_REMOTE_STORE_SETTING ); public static final IndexScopedSettings DEFAULT_SCOPED_SETTINGS = new IndexScopedSettings(Settings.EMPTY, BUILT_IN_INDEX_SETTINGS); diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index 0b31e3814667a..fa39dc9ac5aa0 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -23,6 +23,12 @@ public class FeatureFlags { */ public static final String REPLICATION_TYPE = "opensearch.experimental.feature.replication_type.enabled"; + /** + * Gates the visibility of the index setting that allows persisting data to remote store along with local disk. + * Once the feature is ready for production release, this feature flag can be removed. + */ + public static final String REMOTE_STORE = "opensearch.experimental.feature.remote_store.enabled"; + /** * Used to test feature flags whose values are expected to be booleans. * This method returns true if the value is "true" (case-insensitive), diff --git a/server/src/main/java/org/opensearch/index/IndexModule.java b/server/src/main/java/org/opensearch/index/IndexModule.java index 49daf8293656c..2cea0e4e3e95c 100644 --- a/server/src/main/java/org/opensearch/index/IndexModule.java +++ b/server/src/main/java/org/opensearch/index/IndexModule.java @@ -70,6 +70,7 @@ import org.opensearch.index.shard.SearchOperationListener; import org.opensearch.index.similarity.SimilarityService; import org.opensearch.index.store.FsDirectoryFactory; +import org.opensearch.index.store.RemoteDirectoryFactory; import org.opensearch.indices.IndicesQueryCache; import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; @@ -118,6 +119,8 @@ public final class IndexModule { private static final FsDirectoryFactory DEFAULT_DIRECTORY_FACTORY = new FsDirectoryFactory(); + private static final RemoteDirectoryFactory REMOTE_DIRECTORY_FACTORY = new RemoteDirectoryFactory(); + private static final IndexStorePlugin.RecoveryStateFactory DEFAULT_RECOVERY_STATE_FACTORY = RecoveryState::new; public static final Setting INDEX_STORE_TYPE_SETTING = new Setting<>( @@ -516,6 +519,7 @@ public IndexService newIndexService( client, queryCache, directoryFactory, + REMOTE_DIRECTORY_FACTORY, eventListener, readerWrapperFactory, mapperRegistry, diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index 0a6d1501f2bea..f699278919d6b 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -81,6 +81,7 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardClosedException; import org.opensearch.index.shard.IndexingOperationListener; +import org.opensearch.index.shard.RemoteStoreRefreshListener; import org.opensearch.index.shard.SearchOperationListener; import org.opensearch.index.shard.ShardId; import org.opensearch.index.shard.ShardNotFoundException; @@ -96,6 +97,9 @@ import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.plugins.IndexStorePlugin; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.Repository; +import org.opensearch.repositories.RepositoryMissingException; import org.opensearch.script.ScriptService; import org.opensearch.search.aggregations.support.ValuesSourceRegistry; import org.opensearch.threadpool.ThreadPool; @@ -136,6 +140,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust private final NodeEnvironment nodeEnv; private final ShardStoreDeleter shardStoreDeleter; private final IndexStorePlugin.DirectoryFactory directoryFactory; + private final IndexStorePlugin.RemoteDirectoryFactory remoteDirectoryFactory; private final IndexStorePlugin.RecoveryStateFactory recoveryStateFactory; private final CheckedFunction readerWrapper; private final IndexCache indexCache; @@ -190,6 +195,7 @@ public IndexService( Client client, QueryCache queryCache, IndexStorePlugin.DirectoryFactory directoryFactory, + IndexStorePlugin.RemoteDirectoryFactory remoteDirectoryFactory, IndexEventListener eventListener, Function> wrapperFactory, MapperRegistry mapperRegistry, @@ -260,6 +266,7 @@ public IndexService( this.eventListener = eventListener; this.nodeEnv = nodeEnv; this.directoryFactory = directoryFactory; + this.remoteDirectoryFactory = remoteDirectoryFactory; this.recoveryStateFactory = recoveryStateFactory; this.engineFactory = Objects.requireNonNull(engineFactory); this.engineConfigFactory = Objects.requireNonNull(engineConfigFactory); @@ -430,7 +437,8 @@ public synchronized IndexShard createShard( final ShardRouting routing, final Consumer globalCheckpointSyncer, final RetentionLeaseSyncer retentionLeaseSyncer, - final SegmentReplicationCheckpointPublisher checkpointPublisher + final SegmentReplicationCheckpointPublisher checkpointPublisher, + final RepositoriesService repositoriesService ) throws IOException { Objects.requireNonNull(retentionLeaseSyncer); /* @@ -504,6 +512,21 @@ public synchronized IndexShard createShard( } }; Directory directory = directoryFactory.newDirectory(this.indexSettings, path); + Directory remoteDirectory = null; + RemoteStoreRefreshListener remoteStoreRefreshListener = null; + if (this.indexSettings.isRemoteStoreEnabled()) { + try { + Repository repository = repositoriesService.repository(clusterService.state().metadata().clusterUUID()); + remoteDirectory = remoteDirectoryFactory.newDirectory(this.indexSettings, path, repository); + remoteStoreRefreshListener = new RemoteStoreRefreshListener(directory, remoteDirectory); + } catch (RepositoryMissingException e) { + throw new IllegalArgumentException( + "Repository should be created before creating index with remote_store enabled setting", + e + ); + } + } + store = new Store( shardId, this.indexSettings, @@ -533,7 +556,8 @@ public synchronized IndexShard createShard( () -> globalCheckpointSyncer.accept(shardId), retentionLeaseSyncer, circuitBreakerService, - this.indexSettings.isSegRepEnabled() && routing.primary() ? checkpointPublisher : null + this.indexSettings.isSegRepEnabled() && routing.primary() ? checkpointPublisher : null, + remoteStoreRefreshListener ); eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created"); eventListener.afterIndexShardCreated(indexShard); diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index e40acb94ee498..ed3f6002be073 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -534,6 +534,7 @@ public final class IndexSettings { private final Settings nodeSettings; private final int numberOfShards; private final ReplicationType replicationType; + private final boolean isRemoteStoreEnabled; // volatile fields are updated via #updateIndexMetadata(IndexMetadata) under lock private volatile Settings settings; private volatile IndexMetadata indexMetadata; @@ -686,6 +687,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti this.indexMetadata = indexMetadata; numberOfShards = settings.getAsInt(IndexMetadata.SETTING_NUMBER_OF_SHARDS, null); replicationType = ReplicationType.parseString(settings.get(IndexMetadata.SETTING_REPLICATION_TYPE)); + isRemoteStoreEnabled = settings.getAsBoolean(IndexMetadata.SETTING_REMOTE_STORE, false); this.searchThrottled = INDEX_SEARCH_THROTTLED.get(settings); this.queryStringLenient = QUERY_STRING_LENIENT_SETTING.get(settings); @@ -927,6 +929,13 @@ public boolean isSegRepEnabled() { return ReplicationType.SEGMENT.equals(replicationType); } + /** + * Returns if remote store is enabled for this index. + */ + public boolean isRemoteStoreEnabled() { + return isRemoteStoreEnabled; + } + /** * Returns the node settings. The settings returned from {@link #getSettings()} are a merged version of the * index settings and the node settings where node settings are overwritten by index settings. diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 5d11c34ca205c..bad412003df26 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -304,6 +304,8 @@ Runnable getGlobalCheckpointSyncer() { private volatile boolean useRetentionLeasesInPeerRecovery; private final ReferenceManager.RefreshListener checkpointRefreshListener; + private final RemoteStoreRefreshListener remoteStoreRefreshListener; + public IndexShard( final ShardRouting shardRouting, final IndexSettings indexSettings, @@ -325,7 +327,8 @@ public IndexShard( final Runnable globalCheckpointSyncer, final RetentionLeaseSyncer retentionLeaseSyncer, final CircuitBreakerService circuitBreakerService, - @Nullable final SegmentReplicationCheckpointPublisher checkpointPublisher + @Nullable final SegmentReplicationCheckpointPublisher checkpointPublisher, + @Nullable final RemoteStoreRefreshListener remoteStoreRefreshListener ) throws IOException { super(shardRouting.shardId(), indexSettings); assert shardRouting.initializing(); @@ -413,6 +416,7 @@ public boolean shouldCache(Query query) { } else { this.checkpointRefreshListener = null; } + this.remoteStoreRefreshListener = remoteStoreRefreshListener; } public ThreadPool getThreadPool() { @@ -3139,11 +3143,13 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) { } }; - final List internalRefreshListener; + final List internalRefreshListener = new ArrayList<>(); + internalRefreshListener.add(new RefreshMetricUpdater(refreshMetric)); + if (remoteStoreRefreshListener != null && shardRouting.primary()) { + internalRefreshListener.add(remoteStoreRefreshListener); + } if (this.checkpointRefreshListener != null) { - internalRefreshListener = Arrays.asList(new RefreshMetricUpdater(refreshMetric), checkpointRefreshListener); - } else { - internalRefreshListener = Collections.singletonList(new RefreshMetricUpdater(refreshMetric)); + internalRefreshListener.add(checkpointRefreshListener); } return this.engineConfigFactory.newEngineConfig( diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java new file mode 100644 index 0000000000000..4b549ec485c0e --- /dev/null +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java @@ -0,0 +1,87 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.shard; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.search.ReferenceManager; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IOContext; + +import java.io.IOException; +import java.nio.file.NoSuchFileException; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; + +/** + * RefreshListener implementation to upload newly created segment files to the remote store + */ +public class RemoteStoreRefreshListener implements ReferenceManager.RefreshListener { + + private final Directory storeDirectory; + private final Directory remoteDirectory; + // ToDo: This can be a map with metadata of the uploaded file as value of the map (GitHub #3398) + private final Set filesUploadedToRemoteStore; + private static final Logger logger = LogManager.getLogger(RemoteStoreRefreshListener.class); + + public RemoteStoreRefreshListener(Directory storeDirectory, Directory remoteDirectory) throws IOException { + this.storeDirectory = storeDirectory; + this.remoteDirectory = remoteDirectory; + // ToDo: Handle failures in reading list of files (GitHub #3397) + this.filesUploadedToRemoteStore = new HashSet<>(Arrays.asList(remoteDirectory.listAll())); + } + + @Override + public void beforeRefresh() throws IOException { + // Do Nothing + } + + /** + * Upload new segment files created as part of the last refresh to the remote segment store. + * The method also deletes segment files from remote store which are not part of local filesystem. + * @param didRefresh true if the refresh opened a new reference + * @throws IOException in case of I/O error in reading list of local files + */ + @Override + public void afterRefresh(boolean didRefresh) throws IOException { + if (didRefresh) { + Set localFiles = Set.of(storeDirectory.listAll()); + localFiles.stream().filter(file -> !filesUploadedToRemoteStore.contains(file)).forEach(file -> { + try { + remoteDirectory.copyFrom(storeDirectory, file, file, IOContext.DEFAULT); + filesUploadedToRemoteStore.add(file); + } catch (NoSuchFileException e) { + logger.info( + () -> new ParameterizedMessage("The file {} does not exist anymore. It can happen in case of temp files", file), + e + ); + } catch (IOException e) { + // ToDO: Handle transient and permanent un-availability of the remote store (GitHub #3397) + logger.warn(() -> new ParameterizedMessage("Exception while uploading file {} to the remote segment store", file), e); + } + }); + + Set remoteFilesToBeDeleted = new HashSet<>(); + // ToDo: Instead of deleting files in sync, mark them and delete in async/periodic flow (GitHub #3142) + filesUploadedToRemoteStore.stream().filter(file -> !localFiles.contains(file)).forEach(file -> { + try { + remoteDirectory.deleteFile(file); + remoteFilesToBeDeleted.add(file); + } catch (IOException e) { + // ToDO: Handle transient and permanent un-availability of the remote store (GitHub #3397) + logger.warn(() -> new ParameterizedMessage("Exception while deleting file {} from the remote segment store", file), e); + } + }); + + remoteFilesToBeDeleted.forEach(filesUploadedToRemoteStore::remove); + } + } +} diff --git a/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java new file mode 100644 index 0000000000000..2f8f977537327 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java @@ -0,0 +1,193 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store; + +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.store.Lock; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobMetadata; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.nio.file.NoSuchFileException; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.Set; + +/** + * A {@code RemoteDirectory} provides an abstraction layer for storing a list of files to a remote store. + * A remoteDirectory contains only files (no sub-folder hierarchy). This class does not support all the methods in + * the Directory interface. Currently, it contains implementation of methods which are used to copy files to/from + * the remote store. Implementation of remaining methods will be added as remote store is integrated with + * replication, peer recovery etc. + * + * @opensearch.internal + */ +public final class RemoteDirectory extends Directory { + + private final BlobContainer blobContainer; + + public RemoteDirectory(BlobContainer blobContainer) { + this.blobContainer = blobContainer; + } + + /** + * Returns names of all files stored in this directory. The output must be in sorted (UTF-16, + * java's {@link String#compareTo}) order. + */ + @Override + public String[] listAll() throws IOException { + return blobContainer.listBlobs().keySet().stream().sorted().toArray(String[]::new); + } + + /** + * Removes an existing file in the directory. + * + *

This method will not throw an exception when the file doesn't exist and simply ignores this case. + * This is a deviation from the {@code Directory} interface where it is expected to throw either + * {@link NoSuchFileException} or {@link FileNotFoundException} if {@code name} points to a non-existing file. + * + * @param name the name of an existing file. + * @throws IOException if the file exists but could not be deleted. + */ + @Override + public void deleteFile(String name) throws IOException { + // ToDo: Add a check for file existence + blobContainer.deleteBlobsIgnoringIfNotExists(Collections.singletonList(name)); + } + + /** + * Creates and returns a new instance of {@link RemoteIndexOutput} which will be used to copy files to the remote + * store. + * + *

In the {@link Directory} interface, it is expected to throw {@link java.nio.file.FileAlreadyExistsException} + * if the file already exists in the remote store. As this method does not open a file, it does not throw the + * exception. + * + * @param name the name of the file to copy to remote store. + */ + @Override + public IndexOutput createOutput(String name, IOContext context) { + return new RemoteIndexOutput(name, blobContainer); + } + + /** + * Opens a stream for reading an existing file and returns {@link RemoteIndexInput} enclosing the stream. + * + * @param name the name of an existing file. + * @throws IOException in case of I/O error + * @throws NoSuchFileException if the file does not exist + */ + @Override + public IndexInput openInput(String name, IOContext context) throws IOException { + return new RemoteIndexInput(name, blobContainer.readBlob(name), fileLength(name)); + } + + /** + * Closes the directory by deleting all the files in this directory + */ + @Override + public void close() throws IOException { + blobContainer.delete(); + } + + /** + * Returns the byte length of a file in the directory. + * + * @param name the name of an existing file. + * @throws IOException in case of I/O error + * @throws NoSuchFileException if the file does not exist + */ + @Override + public long fileLength(String name) throws IOException { + // ToDo: Instead of calling remote store each time, keep a cache with segment metadata + Map metadata = blobContainer.listBlobsByPrefix(name); + if (metadata.containsKey(name)) { + return metadata.get(name).length(); + } + throw new NoSuchFileException(name); + } + + /** + * Guaranteed to throw an exception and leave the directory unmodified. + * Once soft deleting is supported segment files in the remote store, this method will provide details of + * number of files marked as deleted but not actually deleted from the remote store. + * + * @throws UnsupportedOperationException always + */ + @Override + public Set getPendingDeletions() throws IOException { + throw new UnsupportedOperationException(); + } + + /** + * Guaranteed to throw an exception and leave the directory unmodified. + * Temporary IndexOutput is not required while working with Remote store. + * + * @throws UnsupportedOperationException always + */ + @Override + public IndexOutput createTempOutput(String prefix, String suffix, IOContext context) { + throw new UnsupportedOperationException(); + } + + /** + * Guaranteed to throw an exception and leave the directory unmodified. + * Segment upload to the remote store will be permanent and does not require a separate sync API. + * This may change in the future if segment upload to remote store happens via cache and we need sync API to write + * the cache contents to the store permanently. + * + * @throws UnsupportedOperationException always + */ + @Override + public void sync(Collection names) throws IOException { + throw new UnsupportedOperationException(); + } + + /** + * Guaranteed to throw an exception and leave the directory unmodified. + * Once metadata to be stored with each shard is finalized, syncMetaData method will be used to sync the directory + * metadata to the remote store. + * + * @throws UnsupportedOperationException always + */ + @Override + public void syncMetaData() { + throw new UnsupportedOperationException(); + } + + /** + * Guaranteed to throw an exception and leave the directory unmodified. + * As this method is used by IndexWriter to publish commits, the implementation of this method is required when + * IndexWriter is backed by RemoteDirectory. + * + * @throws UnsupportedOperationException always + */ + @Override + public void rename(String source, String dest) throws IOException { + throw new UnsupportedOperationException(); + + } + + /** + * Guaranteed to throw an exception and leave the directory unmodified. + * Once locking segment files in remote store is supported, implementation of this method is required with + * remote store specific LockFactory. + * + * @throws UnsupportedOperationException always + */ + @Override + public Lock obtainLock(String name) throws IOException { + throw new UnsupportedOperationException(); + } +} diff --git a/server/src/main/java/org/opensearch/index/store/RemoteDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/RemoteDirectoryFactory.java new file mode 100644 index 0000000000000..eb7912a1f4a2b --- /dev/null +++ b/server/src/main/java/org/opensearch/index/store/RemoteDirectoryFactory.java @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store; + +import org.apache.lucene.store.Directory; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.shard.ShardPath; +import org.opensearch.plugins.IndexStorePlugin; +import org.opensearch.repositories.Repository; +import org.opensearch.repositories.blobstore.BlobStoreRepository; + +import java.io.IOException; + +/** + * Factory for a remote store directory + * + * @opensearch.internal + */ +public class RemoteDirectoryFactory implements IndexStorePlugin.RemoteDirectoryFactory { + + @Override + public Directory newDirectory(IndexSettings indexSettings, ShardPath path, Repository repository) throws IOException { + assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; + BlobPath blobPath = new BlobPath(); + blobPath = blobPath.add(indexSettings.getIndex().getName()).add(String.valueOf(path.getShardId().getId())); + BlobContainer blobContainer = ((BlobStoreRepository) repository).blobStore().blobContainer(blobPath); + return new RemoteDirectory(blobContainer); + } +} diff --git a/server/src/main/java/org/opensearch/index/store/RemoteIndexInput.java b/server/src/main/java/org/opensearch/index/store/RemoteIndexInput.java new file mode 100644 index 0000000000000..24e1128dec1b5 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/store/RemoteIndexInput.java @@ -0,0 +1,85 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store; + +import org.apache.lucene.store.IndexInput; + +import java.io.IOException; +import java.io.InputStream; + +/** + * Class for input from a file in a {@link RemoteDirectory}. Used for all input operations from the remote store. + * Currently, only methods from {@link IndexInput} that are required for reading a file from remote store are + * implemented. Remaining methods will be implemented as we open up remote store for other use cases like replication, + * peer recovery etc. + * ToDo: Extend ChecksumIndexInput + * @see RemoteDirectory + * + * @opensearch.internal + */ +public class RemoteIndexInput extends IndexInput { + + private final InputStream inputStream; + private final long size; + + public RemoteIndexInput(String name, InputStream inputStream, long size) { + super(name); + this.inputStream = inputStream; + this.size = size; + } + + @Override + public byte readByte() throws IOException { + byte[] buffer = new byte[1]; + inputStream.read(buffer); + return buffer[0]; + } + + @Override + public void readBytes(byte[] b, int offset, int len) throws IOException { + inputStream.read(b, offset, len); + } + + @Override + public void close() throws IOException { + inputStream.close(); + } + + @Override + public long length() { + return size; + } + + @Override + public void seek(long pos) throws IOException { + inputStream.skip(pos); + } + + /** + * Guaranteed to throw an exception and leave the RemoteIndexInput unmodified. + * This method is not implemented as it is not used for the file transfer to/from the remote store. + * + * @throws UnsupportedOperationException always + */ + @Override + public long getFilePointer() { + throw new UnsupportedOperationException(); + } + + /** + * Guaranteed to throw an exception and leave the RemoteIndexInput unmodified. + * This method is not implemented as it is not used for the file transfer to/from the remote store. + * + * @throws UnsupportedOperationException always + */ + @Override + public IndexInput slice(String sliceDescription, long offset, long length) throws IOException { + throw new UnsupportedOperationException(); + } +} diff --git a/server/src/main/java/org/opensearch/index/store/RemoteIndexOutput.java b/server/src/main/java/org/opensearch/index/store/RemoteIndexOutput.java new file mode 100644 index 0000000000000..2af65452a6eac --- /dev/null +++ b/server/src/main/java/org/opensearch/index/store/RemoteIndexOutput.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store; + +import org.apache.lucene.store.DataInput; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.lucene.store.InputStreamIndexInput; + +import java.io.IOException; + +/** + * Class for output to a file in a {@link RemoteDirectory}. Used for all output operations to the remote store. + * Currently, only methods from {@link IndexOutput} that are required for uploading a segment file to remote store are + * implemented. Remaining methods will be implemented as we open up remote store for other use cases like replication, + * peer recovery etc. + * ToDo: Extend ChecksumIndexInput + * @see RemoteDirectory + * + * @opensearch.internal + */ +public class RemoteIndexOutput extends IndexOutput { + + private final BlobContainer blobContainer; + + public RemoteIndexOutput(String name, BlobContainer blobContainer) { + super(name, name); + this.blobContainer = blobContainer; + } + + @Override + public void copyBytes(DataInput input, long numBytes) throws IOException { + assert input instanceof IndexInput : "input should be instance of IndexInput"; + blobContainer.writeBlob(getName(), new InputStreamIndexInput((IndexInput) input, numBytes), numBytes, false); + } + + /** + * This is a no-op. Once segment file upload to the remote store is complete, we don't need to explicitly close + * the stream. It is taken care by internal APIs of client of the remote store. + */ + @Override + public void close() throws IOException { + // do nothing + } + + /** + * Guaranteed to throw an exception and leave the RemoteIndexOutput unmodified. + * This method is not implemented as it is not used for the file transfer to/from the remote store. + * + * @throws UnsupportedOperationException always + */ + @Override + public void writeByte(byte b) throws IOException { + throw new UnsupportedOperationException(); + } + + /** + * Guaranteed to throw an exception and leave the RemoteIndexOutput unmodified. + * This method is not implemented as it is not used for the file transfer to/from the remote store. + * + * @throws UnsupportedOperationException always + */ + @Override + public void writeBytes(byte[] byteArray, int offset, int length) throws IOException { + throw new UnsupportedOperationException(); + } + + /** + * Guaranteed to throw an exception and leave the RemoteIndexOutput unmodified. + * This method is not implemented as it is not used for the file transfer to/from the remote store. + * + * @throws UnsupportedOperationException always + */ + @Override + public long getFilePointer() { + throw new UnsupportedOperationException(); + } + + /** + * Guaranteed to throw an exception and leave the RemoteIndexOutput unmodified. + * This method is not implemented as it is not directly used for the file transfer to/from the remote store. + * But the checksum is important to verify integrity of the data and that means implementing this method will + * be required for the segment upload as well. + * + * @throws UnsupportedOperationException always + */ + @Override + public long getChecksum() throws IOException { + throw new UnsupportedOperationException(); + } + +} diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 79fd2893fb78c..b2f6f10c19638 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -859,7 +859,13 @@ public IndexShard createShard( IndexService indexService = indexService(shardRouting.index()); assert indexService != null; RecoveryState recoveryState = indexService.createRecoveryState(shardRouting, targetNode, sourceNode); - IndexShard indexShard = indexService.createShard(shardRouting, globalCheckpointSyncer, retentionLeaseSyncer, checkpointPublisher); + IndexShard indexShard = indexService.createShard( + shardRouting, + globalCheckpointSyncer, + retentionLeaseSyncer, + checkpointPublisher, + repositoriesService + ); indexShard.addShardFailureCallback(onShardFailure); indexShard.startRecovery(recoveryState, recoveryTargetService, recoveryListener, repositoriesService, mapping -> { assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS diff --git a/server/src/main/java/org/opensearch/plugins/IndexStorePlugin.java b/server/src/main/java/org/opensearch/plugins/IndexStorePlugin.java index 2f549fec54759..52ddf6dcf2753 100644 --- a/server/src/main/java/org/opensearch/plugins/IndexStorePlugin.java +++ b/server/src/main/java/org/opensearch/plugins/IndexStorePlugin.java @@ -39,6 +39,7 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.ShardPath; import org.opensearch.indices.recovery.RecoveryState; +import org.opensearch.repositories.Repository; import java.io.IOException; import java.util.Collections; @@ -66,6 +67,22 @@ interface DirectoryFactory { Directory newDirectory(IndexSettings indexSettings, ShardPath shardPath) throws IOException; } + /** + * An interface that describes how to create a new remote directory instance per shard. + */ + @FunctionalInterface + interface RemoteDirectoryFactory { + /** + * Creates a new remote directory per shard. This method is called once per shard on shard creation. + * @param indexSettings the shards index settings + * @param shardPath the path the shard is using + * @param repository to get the BlobContainer details + * @return a new RemoteDirectory instance + * @throws IOException if an IOException occurs while opening the directory + */ + Directory newDirectory(IndexSettings indexSettings, ShardPath shardPath, Repository repository) throws IOException; + } + /** * The {@link DirectoryFactory} mappings for this plugin. When an index is created the store type setting * {@link org.opensearch.index.IndexModule#INDEX_STORE_TYPE_SETTING} on the index will be examined and either use the default or a diff --git a/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java b/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java index 1084f9c658db4..a4f2b242564e2 100644 --- a/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java +++ b/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java @@ -21,6 +21,7 @@ public class FeatureFlagTests extends OpenSearchTestCase { @BeforeClass public static void enableFeature() { AccessController.doPrivileged((PrivilegedAction) () -> System.setProperty(FeatureFlags.REPLICATION_TYPE, "true")); + AccessController.doPrivileged((PrivilegedAction) () -> System.setProperty(FeatureFlags.REMOTE_STORE, "true")); } public void testReplicationTypeFeatureFlag() { @@ -40,4 +41,10 @@ public void testNonBooleanFeatureFlag() { assertNotNull(System.getProperty(javaVersionProperty)); assertFalse(FeatureFlags.isEnabled(javaVersionProperty)); } + + public void testRemoteStoreFeatureFlag() { + String remoteStoreFlag = FeatureFlags.REMOTE_STORE; + assertNotNull(System.getProperty(remoteStoreFlag)); + assertTrue(FeatureFlags.isEnabled(remoteStoreFlag)); + } } diff --git a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java index 71433673eef5a..4b3dc041b9f54 100644 --- a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java @@ -41,6 +41,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.translog.Translog; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; @@ -56,6 +57,7 @@ import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.core.StringContains.containsString; import static org.hamcrest.object.HasToString.hasToString; +import static org.opensearch.common.settings.IndexScopedSettings.FEATURE_FLAGGED_INDEX_SETTINGS; public class IndexSettingsTests extends OpenSearchTestCase { @@ -753,4 +755,41 @@ public void testIgnoreTranslogRetentionSettingsIfSoftDeletesEnabled() { assertThat(indexSettings.getTranslogRetentionAge().millis(), equalTo(-1L)); assertThat(indexSettings.getTranslogRetentionSize().getBytes(), equalTo(-1L)); } + + public void testRemoteStoreDefaultSetting() { + IndexMetadata metadata = newIndexMeta( + "index", + Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).build() + ); + IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); + assertFalse(settings.isRemoteStoreEnabled()); + } + + public void testRemoteStoreExplicitSetting() { + IndexMetadata metadata = newIndexMeta( + "index", + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_REMOTE_STORE, true) + .build() + ); + IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); + assertTrue(settings.isRemoteStoreEnabled()); + } + + public void testUpdateRemoteStoreFails() { + Set> remoteStoreSettingSet = new HashSet<>(); + remoteStoreSettingSet.add(FEATURE_FLAGGED_INDEX_SETTINGS.get(FeatureFlags.REMOTE_STORE)); + IndexScopedSettings settings = new IndexScopedSettings(Settings.EMPTY, remoteStoreSettingSet); + IllegalArgumentException error = expectThrows( + IllegalArgumentException.class, + () -> settings.updateSettings( + Settings.builder().put("index.remote_store", randomBoolean()).build(), + Settings.builder(), + Settings.builder(), + "index" + ) + ); + assertEquals(error.getMessage(), "final index setting [index.remote_store], not updateable"); + } } diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java new file mode 100644 index 0000000000000..af92d821a9043 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java @@ -0,0 +1,139 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.shard; + +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IOContext; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.nio.file.NoSuchFileException; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.doThrow; + +public class RemoteStoreRefreshListenerTests extends OpenSearchTestCase { + private Directory storeDirectory; + private Directory remoteDirectory; + + private RemoteStoreRefreshListener remoteStoreRefreshListener; + + public void setup(String[] remoteFiles) throws IOException { + storeDirectory = mock(Directory.class); + remoteDirectory = mock(Directory.class); + when(remoteDirectory.listAll()).thenReturn(remoteFiles); + remoteStoreRefreshListener = new RemoteStoreRefreshListener(storeDirectory, remoteDirectory); + } + + public void testAfterRefreshFalse() throws IOException { + setup(new String[0]); + remoteStoreRefreshListener.afterRefresh(false); + verify(storeDirectory, times(0)).listAll(); + } + + public void testAfterRefreshTrueNoLocalFiles() throws IOException { + setup(new String[0]); + + when(storeDirectory.listAll()).thenReturn(new String[0]); + + remoteStoreRefreshListener.afterRefresh(true); + verify(storeDirectory).listAll(); + verify(remoteDirectory, times(0)).copyFrom(any(), any(), any(), any()); + verify(remoteDirectory, times(0)).deleteFile(any()); + } + + public void testAfterRefreshOnlyUploadFiles() throws IOException { + setup(new String[0]); + + String[] localFiles = new String[] { "segments_1", "0.si", "0.cfs", "0.cfe" }; + when(storeDirectory.listAll()).thenReturn(localFiles); + + remoteStoreRefreshListener.afterRefresh(true); + verify(storeDirectory).listAll(); + verify(remoteDirectory).copyFrom(storeDirectory, "segments_1", "segments_1", IOContext.DEFAULT); + verify(remoteDirectory).copyFrom(storeDirectory, "0.si", "0.si", IOContext.DEFAULT); + verify(remoteDirectory).copyFrom(storeDirectory, "0.cfs", "0.cfs", IOContext.DEFAULT); + verify(remoteDirectory).copyFrom(storeDirectory, "0.cfe", "0.cfe", IOContext.DEFAULT); + verify(remoteDirectory, times(0)).deleteFile(any()); + } + + public void testAfterRefreshOnlyUploadAndDelete() throws IOException { + setup(new String[] { "0.si", "0.cfs" }); + + String[] localFiles = new String[] { "segments_1", "1.si", "1.cfs", "1.cfe" }; + when(storeDirectory.listAll()).thenReturn(localFiles); + + remoteStoreRefreshListener.afterRefresh(true); + verify(storeDirectory).listAll(); + verify(remoteDirectory).copyFrom(storeDirectory, "segments_1", "segments_1", IOContext.DEFAULT); + verify(remoteDirectory).copyFrom(storeDirectory, "1.si", "1.si", IOContext.DEFAULT); + verify(remoteDirectory).copyFrom(storeDirectory, "1.cfs", "1.cfs", IOContext.DEFAULT); + verify(remoteDirectory).copyFrom(storeDirectory, "1.cfe", "1.cfe", IOContext.DEFAULT); + verify(remoteDirectory).deleteFile("0.si"); + verify(remoteDirectory).deleteFile("0.cfs"); + } + + public void testAfterRefreshOnlyDelete() throws IOException { + setup(new String[] { "0.si", "0.cfs" }); + + String[] localFiles = new String[] { "0.si" }; + when(storeDirectory.listAll()).thenReturn(localFiles); + + remoteStoreRefreshListener.afterRefresh(true); + verify(storeDirectory).listAll(); + verify(remoteDirectory, times(0)).copyFrom(any(), any(), any(), any()); + verify(remoteDirectory).deleteFile("0.cfs"); + } + + public void testAfterRefreshTempLocalFile() throws IOException { + setup(new String[0]); + + String[] localFiles = new String[] { "segments_1", "0.si", "0.cfs.tmp" }; + when(storeDirectory.listAll()).thenReturn(localFiles); + doThrow(new NoSuchFileException("0.cfs.tmp")).when(remoteDirectory) + .copyFrom(storeDirectory, "0.cfs.tmp", "0.cfs.tmp", IOContext.DEFAULT); + + remoteStoreRefreshListener.afterRefresh(true); + verify(storeDirectory).listAll(); + verify(remoteDirectory).copyFrom(storeDirectory, "segments_1", "segments_1", IOContext.DEFAULT); + verify(remoteDirectory).copyFrom(storeDirectory, "0.si", "0.si", IOContext.DEFAULT); + verify(remoteDirectory, times(0)).deleteFile(any()); + } + + public void testAfterRefreshConsecutive() throws IOException { + setup(new String[0]); + + String[] localFiles = new String[] { "segments_1", "0.si", "0.cfs", "0.cfe" }; + when(storeDirectory.listAll()).thenReturn(localFiles); + doThrow(new IOException("0.cfs")).when(remoteDirectory).copyFrom(storeDirectory, "0.cfs", "0.cfe", IOContext.DEFAULT); + doThrow(new IOException("0.cfe")).when(remoteDirectory).copyFrom(storeDirectory, "0.cfe", "0.cfe", IOContext.DEFAULT); + + remoteStoreRefreshListener.afterRefresh(true); + verify(storeDirectory).listAll(); + verify(remoteDirectory).copyFrom(storeDirectory, "segments_1", "segments_1", IOContext.DEFAULT); + verify(remoteDirectory).copyFrom(storeDirectory, "0.si", "0.si", IOContext.DEFAULT); + verify(remoteDirectory).copyFrom(storeDirectory, "0.cfs", "0.cfs", IOContext.DEFAULT); + verify(remoteDirectory).copyFrom(storeDirectory, "0.cfe", "0.cfe", IOContext.DEFAULT); + verify(remoteDirectory, times(0)).deleteFile(any()); + + String[] localFilesSecondRefresh = new String[] { "segments_1", "0.cfs", "1.cfs", "1.cfe" }; + when(storeDirectory.listAll()).thenReturn(localFilesSecondRefresh); + + remoteStoreRefreshListener.afterRefresh(true); + + verify(remoteDirectory).copyFrom(storeDirectory, "0.cfs", "0.cfs", IOContext.DEFAULT); + verify(remoteDirectory).copyFrom(storeDirectory, "1.cfs", "1.cfs", IOContext.DEFAULT); + verify(remoteDirectory).copyFrom(storeDirectory, "1.cfe", "1.cfe", IOContext.DEFAULT); + verify(remoteDirectory).deleteFile("0.si"); + } +} diff --git a/server/src/test/java/org/opensearch/index/store/RemoteDirectoryFactoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteDirectoryFactoryTests.java new file mode 100644 index 0000000000000..d781fad9ab99c --- /dev/null +++ b/server/src/test/java/org/opensearch/index/store/RemoteDirectoryFactoryTests.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store; + +import org.apache.lucene.store.Directory; +import org.junit.Before; +import org.mockito.ArgumentCaptor; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.shard.ShardId; +import org.opensearch.index.shard.ShardPath; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.Collections; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.verify; + +public class RemoteDirectoryFactoryTests extends OpenSearchTestCase { + + private RemoteDirectoryFactory remoteDirectoryFactory; + + @Before + public void setup() { + remoteDirectoryFactory = new RemoteDirectoryFactory(); + } + + public void testNewDirectory() throws IOException { + Settings settings = Settings.builder().build(); + IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("foo", settings); + Path tempDir = createTempDir().resolve(indexSettings.getUUID()).resolve("0"); + ShardPath shardPath = new ShardPath(false, tempDir, tempDir, new ShardId(indexSettings.getIndex(), 0)); + BlobStoreRepository repository = mock(BlobStoreRepository.class); + BlobStore blobStore = mock(BlobStore.class); + BlobContainer blobContainer = mock(BlobContainer.class); + when(repository.blobStore()).thenReturn(blobStore); + when(blobStore.blobContainer(any())).thenReturn(blobContainer); + when(blobContainer.listBlobs()).thenReturn(Collections.emptyMap()); + + Directory directory = remoteDirectoryFactory.newDirectory(indexSettings, shardPath, repository); + assertTrue(directory instanceof RemoteDirectory); + ArgumentCaptor blobPathCaptor = ArgumentCaptor.forClass(BlobPath.class); + verify(blobStore).blobContainer(blobPathCaptor.capture()); + BlobPath blobPath = blobPathCaptor.getValue(); + assertEquals("foo/0/", blobPath.buildAsString()); + + directory.listAll(); + verify(blobContainer).listBlobs(); + } +} diff --git a/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java new file mode 100644 index 0000000000000..c2c365d9140df --- /dev/null +++ b/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java @@ -0,0 +1,158 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store; + +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.junit.Before; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobMetadata; +import org.opensearch.common.blobstore.support.PlainBlobMetadata; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.NoSuchFileException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.mockito.Mockito.*; + +public class RemoteDirectoryTests extends OpenSearchTestCase { + private BlobContainer blobContainer; + + private RemoteDirectory remoteDirectory; + + @Before + public void setup() { + blobContainer = mock(BlobContainer.class); + remoteDirectory = new RemoteDirectory(blobContainer); + } + + public void testListAllEmpty() throws IOException { + when(blobContainer.listBlobs()).thenReturn(Collections.emptyMap()); + + String[] actualFileNames = remoteDirectory.listAll(); + String[] expectedFileName = new String[] {}; + assertArrayEquals(expectedFileName, actualFileNames); + } + + public void testListAll() throws IOException { + Map fileNames = Stream.of("abc", "xyz", "pqr", "lmn", "jkl") + .collect(Collectors.toMap(filename -> filename, filename -> new PlainBlobMetadata(filename, 100))); + + when(blobContainer.listBlobs()).thenReturn(fileNames); + + String[] actualFileNames = remoteDirectory.listAll(); + String[] expectedFileName = new String[] { "abc", "jkl", "lmn", "pqr", "xyz" }; + assertArrayEquals(expectedFileName, actualFileNames); + } + + public void testListAllException() throws IOException { + when(blobContainer.listBlobs()).thenThrow(new IOException("Error reading blob store")); + + assertThrows(IOException.class, () -> remoteDirectory.listAll()); + } + + public void testDeleteFile() throws IOException { + remoteDirectory.deleteFile("segment_1"); + + verify(blobContainer).deleteBlobsIgnoringIfNotExists(Collections.singletonList("segment_1")); + } + + public void testDeleteFileException() throws IOException { + doThrow(new IOException("Error writing to blob store")).when(blobContainer) + .deleteBlobsIgnoringIfNotExists(Collections.singletonList("segment_1")); + + assertThrows(IOException.class, () -> remoteDirectory.deleteFile("segment_1")); + } + + public void testCreateOutput() { + IndexOutput indexOutput = remoteDirectory.createOutput("segment_1", IOContext.DEFAULT); + assertTrue(indexOutput instanceof RemoteIndexOutput); + assertEquals("segment_1", indexOutput.getName()); + } + + public void testOpenInput() throws IOException { + InputStream mockInputStream = mock(InputStream.class); + when(blobContainer.readBlob("segment_1")).thenReturn(mockInputStream); + Map fileInfo = new HashMap<>(); + fileInfo.put("segment_1", new PlainBlobMetadata("segment_1", 100)); + when(blobContainer.listBlobsByPrefix("segment_1")).thenReturn(fileInfo); + + IndexInput indexInput = remoteDirectory.openInput("segment_1", IOContext.DEFAULT); + assertTrue(indexInput instanceof RemoteIndexInput); + assertEquals(100, indexInput.length()); + } + + public void testOpenInputIOException() throws IOException { + when(blobContainer.readBlob("segment_1")).thenThrow(new IOException("Error while reading")); + + assertThrows(IOException.class, () -> remoteDirectory.openInput("segment_1", IOContext.DEFAULT)); + } + + public void testOpenInputNoSuchFileException() throws IOException { + InputStream mockInputStream = mock(InputStream.class); + when(blobContainer.readBlob("segment_1")).thenReturn(mockInputStream); + when(blobContainer.listBlobsByPrefix("segment_1")).thenThrow(new NoSuchFileException("segment_1")); + + assertThrows(NoSuchFileException.class, () -> remoteDirectory.openInput("segment_1", IOContext.DEFAULT)); + } + + public void testClose() throws IOException { + remoteDirectory.close(); + + verify(blobContainer).delete(); + } + + public void testCloseIOException() throws IOException { + when(blobContainer.delete()).thenThrow(new IOException("Error while writing to blob store")); + + assertThrows(IOException.class, () -> remoteDirectory.close()); + } + + public void testFileLength() throws IOException { + Map fileInfo = new HashMap<>(); + fileInfo.put("segment_1", new PlainBlobMetadata("segment_1", 100)); + when(blobContainer.listBlobsByPrefix("segment_1")).thenReturn(fileInfo); + + assertEquals(100, remoteDirectory.fileLength("segment_1")); + } + + public void testFileLengthIOException() throws IOException { + when(blobContainer.listBlobsByPrefix("segment_1")).thenThrow(new NoSuchFileException("segment_1")); + + assertThrows(IOException.class, () -> remoteDirectory.fileLength("segment_1")); + } + + public void testGetPendingDeletions() { + assertThrows(UnsupportedOperationException.class, () -> remoteDirectory.getPendingDeletions()); + } + + public void testCreateTempOutput() { + assertThrows(UnsupportedOperationException.class, () -> remoteDirectory.createTempOutput("segment_1", "tmp", IOContext.DEFAULT)); + } + + public void testSync() { + assertThrows(UnsupportedOperationException.class, () -> remoteDirectory.sync(Collections.emptyList())); + } + + public void testRename() { + assertThrows(UnsupportedOperationException.class, () -> remoteDirectory.rename("segment_1", "segment_2")); + } + + public void testObtainLock() { + assertThrows(UnsupportedOperationException.class, () -> remoteDirectory.obtainLock("segment_1")); + } + +} diff --git a/server/src/test/java/org/opensearch/index/store/RemoteIndexInputTests.java b/server/src/test/java/org/opensearch/index/store/RemoteIndexInputTests.java new file mode 100644 index 0000000000000..c2f81c035e424 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/store/RemoteIndexInputTests.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store; + +import org.junit.Before; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.io.InputStream; + +import static org.mockito.Mockito.*; + +public class RemoteIndexInputTests extends OpenSearchTestCase { + + private static final String FILENAME = "segment_1"; + private static final long FILESIZE = 200; + + private InputStream inputStream; + private RemoteIndexInput remoteIndexInput; + + @Before + public void setup() { + inputStream = mock(InputStream.class); + remoteIndexInput = new RemoteIndexInput(FILENAME, inputStream, FILESIZE); + } + + public void testReadByte() throws IOException { + InputStream inputStream = spy(InputStream.class); + remoteIndexInput = new RemoteIndexInput(FILENAME, inputStream, FILESIZE); + + when(inputStream.read()).thenReturn(10); + + assertEquals(10, remoteIndexInput.readByte()); + + verify(inputStream).read(any()); + } + + public void testReadByteIOException() throws IOException { + when(inputStream.read(any())).thenThrow(new IOException("Error reading")); + + assertThrows(IOException.class, () -> remoteIndexInput.readByte()); + } + + public void testReadBytes() throws IOException { + byte[] buffer = new byte[10]; + remoteIndexInput.readBytes(buffer, 10, 20); + + verify(inputStream).read(buffer, 10, 20); + } + + public void testReadBytesIOException() throws IOException { + byte[] buffer = new byte[10]; + when(inputStream.read(buffer, 10, 20)).thenThrow(new IOException("Error reading")); + + assertThrows(IOException.class, () -> remoteIndexInput.readBytes(buffer, 10, 20)); + } + + public void testClose() throws IOException { + remoteIndexInput.close(); + + verify(inputStream).close(); + } + + public void testCloseIOException() throws IOException { + doThrow(new IOException("Error closing")).when(inputStream).close(); + + assertThrows(IOException.class, () -> remoteIndexInput.close()); + } + + public void testLength() { + assertEquals(FILESIZE, remoteIndexInput.length()); + } + + public void testSeek() throws IOException { + remoteIndexInput.seek(10); + + verify(inputStream).skip(10); + } + + public void testSeekIOException() throws IOException { + when(inputStream.skip(10)).thenThrow(new IOException("Error reading")); + + assertThrows(IOException.class, () -> remoteIndexInput.seek(10)); + } + + public void testGetFilePointer() { + assertThrows(UnsupportedOperationException.class, () -> remoteIndexInput.getFilePointer()); + } + + public void testSlice() { + assertThrows(UnsupportedOperationException.class, () -> remoteIndexInput.slice("Slice middle", 50, 100)); + } +} diff --git a/server/src/test/java/org/opensearch/index/store/RemoteIndexOutputTests.java b/server/src/test/java/org/opensearch/index/store/RemoteIndexOutputTests.java new file mode 100644 index 0000000000000..64975f2ac4892 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/store/RemoteIndexOutputTests.java @@ -0,0 +1,68 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store; + +import org.apache.lucene.store.IndexInput; +import org.junit.Before; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.lucene.store.InputStreamIndexInput; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.*; + +public class RemoteIndexOutputTests extends OpenSearchTestCase { + private static final String FILENAME = "segment_1"; + + private BlobContainer blobContainer; + + private RemoteIndexOutput remoteIndexOutput; + + @Before + public void setup() { + blobContainer = mock(BlobContainer.class); + remoteIndexOutput = new RemoteIndexOutput(FILENAME, blobContainer); + } + + public void testCopyBytes() throws IOException { + IndexInput indexInput = mock(IndexInput.class); + remoteIndexOutput.copyBytes(indexInput, 100); + + verify(blobContainer).writeBlob(eq(FILENAME), any(InputStreamIndexInput.class), eq(100L), eq(false)); + } + + public void testCopyBytesIOException() throws IOException { + doThrow(new IOException("Error writing")).when(blobContainer) + .writeBlob(eq(FILENAME), any(InputStreamIndexInput.class), eq(100L), eq(false)); + + IndexInput indexInput = mock(IndexInput.class); + assertThrows(IOException.class, () -> remoteIndexOutput.copyBytes(indexInput, 100)); + } + + public void testWriteByte() { + byte b = 10; + assertThrows(UnsupportedOperationException.class, () -> remoteIndexOutput.writeByte(b)); + } + + public void testWriteBytes() { + byte[] buffer = new byte[10]; + assertThrows(UnsupportedOperationException.class, () -> remoteIndexOutput.writeBytes(buffer, 50, 60)); + } + + public void testGetFilePointer() { + assertThrows(UnsupportedOperationException.class, () -> remoteIndexOutput.getFilePointer()); + } + + public void testGetChecksum() { + assertThrows(UnsupportedOperationException.class, () -> remoteIndexOutput.getChecksum()); + } +} diff --git a/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java b/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java index 0989bf869f18e..213a22539971f 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java @@ -153,7 +153,8 @@ public void afterIndexRemoved(Index index, IndexSettings indexSettings, IndexRem newRouting, s -> {}, RetentionLeaseSyncer.EMPTY, - SegmentReplicationCheckpointPublisher.EMPTY + SegmentReplicationCheckpointPublisher.EMPTY, + null ); IndexShardTestCase.updateRoutingEntry(shard, newRouting); assertEquals(5, counter.get()); diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 371fa6d102304..62c52ab636255 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -525,7 +525,8 @@ protected IndexShard newShard( globalCheckpointSyncer, retentionLeaseSyncer, breakerService, - checkpointPublisher + checkpointPublisher, + null ); indexShard.addShardFailureCallback(DEFAULT_SHARD_FAILURE_HANDLER); success = true; From 92b3cbb0e5827b1eebf0d61f7eaf2211e9be22ab Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Mon, 13 Jun 2022 10:55:09 -0400 Subject: [PATCH 27/34] Fixing VerifyVersionConstantsIT test failure (#3574) Signed-off-by: Andriy Redko --- server/src/main/java/org/opensearch/Version.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index 04907ee5d054b..ec33e674c4d5f 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -89,7 +89,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_1_3_3 = new Version(1030399, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_2_0_0 = new Version(2000099, org.apache.lucene.util.Version.LUCENE_9_1_0); public static final Version V_2_0_1 = new Version(2000199, org.apache.lucene.util.Version.LUCENE_9_1_0); - public static final Version V_2_1_0 = new Version(2010099, org.apache.lucene.util.Version.LUCENE_9_2_0); + public static final Version V_2_1_0 = new Version(2010099, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version CURRENT = V_3_0_0; From a543224c4d0af654dc8b011b5512e2561eab8d97 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jun 2022 21:27:01 -0700 Subject: [PATCH 28/34] Bump jettison from 1.4.1 to 1.5.0 in /plugins/discovery-azure-classic (#3571) * Bump jettison from 1.4.1 to 1.5.0 in /plugins/discovery-azure-classic Bumps [jettison](https://github.com/jettison-json/jettison) from 1.4.1 to 1.5.0. - [Release notes](https://github.com/jettison-json/jettison/releases) - [Commits](https://github.com/jettison-json/jettison/compare/jettison-1.4.1...jettison-1.5.0) --- updated-dependencies: - dependency-name: org.codehaus.jettison:jettison dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- plugins/discovery-azure-classic/build.gradle | 2 +- .../discovery-azure-classic/licenses/jettison-1.4.1.jar.sha1 | 1 - .../discovery-azure-classic/licenses/jettison-1.5.0.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/discovery-azure-classic/licenses/jettison-1.4.1.jar.sha1 create mode 100644 plugins/discovery-azure-classic/licenses/jettison-1.5.0.jar.sha1 diff --git a/plugins/discovery-azure-classic/build.gradle b/plugins/discovery-azure-classic/build.gradle index 575b8858b16ba..5755ff55bfff9 100644 --- a/plugins/discovery-azure-classic/build.gradle +++ b/plugins/discovery-azure-classic/build.gradle @@ -59,7 +59,7 @@ dependencies { api "com.sun.jersey:jersey-client:${versions.jersey}" api "com.sun.jersey:jersey-core:${versions.jersey}" api "com.sun.jersey:jersey-json:${versions.jersey}" - api 'org.codehaus.jettison:jettison:1.4.1' + api 'org.codehaus.jettison:jettison:1.5.0' api 'com.sun.xml.bind:jaxb-impl:2.2.3-1' // HACK: javax.xml.bind was removed from default modules in java 9, so we pull the api in here, diff --git a/plugins/discovery-azure-classic/licenses/jettison-1.4.1.jar.sha1 b/plugins/discovery-azure-classic/licenses/jettison-1.4.1.jar.sha1 deleted file mode 100644 index 815d87d917f2e..0000000000000 --- a/plugins/discovery-azure-classic/licenses/jettison-1.4.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8d16bbcbac93446942c9e5da04530159afbe3e65 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/jettison-1.5.0.jar.sha1 b/plugins/discovery-azure-classic/licenses/jettison-1.5.0.jar.sha1 new file mode 100644 index 0000000000000..ec93f83474541 --- /dev/null +++ b/plugins/discovery-azure-classic/licenses/jettison-1.5.0.jar.sha1 @@ -0,0 +1 @@ +933c7df7a4b78c9a9322f431014ea699b1fc0cc0 \ No newline at end of file From e5ad24044505627cb4920a9ed861214c59a9ebb0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jun 2022 21:39:23 -0700 Subject: [PATCH 29/34] Bump google-api-services-storage from v1-rev20200814-1.30.10 to v1-rev20220608-1.32.1 in /plugins/repository-gcs (#3573) * Bump google-api-services-storage in /plugins/repository-gcs Bumps google-api-services-storage from v1-rev20200814-1.30.10 to v1-rev20220608-1.32.1. --- updated-dependencies: - dependency-name: com.google.apis:google-api-services-storage dependency-type: direct:production ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Upgrade Google HTTP Client to 1.42.0 Signed-off-by: Xue Zhou Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Co-authored-by: Xue Zhou --- plugins/repository-gcs/build.gradle | 4 ++-- ...oogle-api-services-storage-v1-rev20200814-1.30.10.jar.sha1 | 1 - ...google-api-services-storage-v1-rev20220608-1.32.1.jar.sha1 | 1 + .../licenses/google-http-client-1.35.0.jar.sha1 | 1 - .../licenses/google-http-client-1.42.0.jar.sha1 | 1 + 5 files changed, 4 insertions(+), 4 deletions(-) delete mode 100644 plugins/repository-gcs/licenses/google-api-services-storage-v1-rev20200814-1.30.10.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/google-api-services-storage-v1-rev20220608-1.32.1.jar.sha1 delete mode 100644 plugins/repository-gcs/licenses/google-http-client-1.35.0.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/google-http-client-1.42.0.jar.sha1 diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 0e1c2125f5d81..097e96fcd8fdc 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -58,7 +58,7 @@ dependencies { api 'com.google.cloud:google-cloud-core:2.5.10' runtimeOnly 'com.google.guava:guava:30.1.1-jre' api 'com.google.guava:failureaccess:1.0.1' - api 'com.google.http-client:google-http-client:1.35.0' + api 'com.google.http-client:google-http-client:1.42.0' api "commons-logging:commons-logging:${versions.commonslogging}" api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" api "commons-codec:commons-codec:${versions.commonscodec}" @@ -82,7 +82,7 @@ dependencies { api 'io.grpc:grpc-context:1.46.0' api 'io.opencensus:opencensus-api:0.18.0' api 'io.opencensus:opencensus-contrib-http-util:0.18.0' - api 'com.google.apis:google-api-services-storage:v1-rev20200814-1.30.10' + api 'com.google.apis:google-api-services-storage:v1-rev20220608-1.32.1' testImplementation project(':test:fixtures:gcs-fixture') } diff --git a/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev20200814-1.30.10.jar.sha1 b/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev20200814-1.30.10.jar.sha1 deleted file mode 100644 index e399aa5865413..0000000000000 --- a/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev20200814-1.30.10.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fe3b480958961fc7144da10ce3653065d5eb5490 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev20220608-1.32.1.jar.sha1 b/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev20220608-1.32.1.jar.sha1 new file mode 100644 index 0000000000000..07aaadb2664b2 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev20220608-1.32.1.jar.sha1 @@ -0,0 +1 @@ +74724addc6cecac408dad3a6a26423b7647b3724 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-1.35.0.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-1.35.0.jar.sha1 deleted file mode 100644 index 802a6ab3a8d04..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-1.35.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f2348dd57d5417c29388bd430f5055dca863c600 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-1.42.0.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-1.42.0.jar.sha1 new file mode 100644 index 0000000000000..9c20d9f12d4b0 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-1.42.0.jar.sha1 @@ -0,0 +1 @@ +4f319ce80ba6888d04a38234916c43d5486842a5 \ No newline at end of file From 836a9c4910f5eb4e7fcaca5573bd3bf47073d416 Mon Sep 17 00:00:00 2001 From: Rishab Nahata Date: Tue, 14 Jun 2022 10:13:22 +0530 Subject: [PATCH 30/34] Add flat_skew setting to node overload decider (#3563) * Add flat_skew setting to node overload decider Signed-off-by: Rishab Nahata --- .../allocation/AwarenessAllocationIT.java | 139 +++++++++++ .../NodeLoadAwareAllocationDecider.java | 26 +- .../common/settings/ClusterSettings.java | 1 + .../NodeLoadAwareAllocationTests.java | 222 ++++++++++++++---- 4 files changed, 338 insertions(+), 50 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/AwarenessAllocationIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/AwarenessAllocationIT.java index 224db09d99a99..2b73c5da27606 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/AwarenessAllocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/AwarenessAllocationIT.java @@ -45,14 +45,17 @@ import org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; +import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; +import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import static org.opensearch.cluster.routing.ShardRoutingState.STARTED; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.empty; @@ -351,4 +354,140 @@ public void testAwarenessZonesIncrementalNodes() { assertThat(counts.get(B_1), equalTo(2)); assertThat(counts.get(noZoneNode), equalTo(2)); } + + public void testThreeZoneOneReplicaWithForceZoneValueAndLoadAwareness() throws Exception { + int nodeCountPerAZ = 5; + int numOfShards = 30; + int numOfReplica = 1; + Settings commonSettings = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .put("cluster.routing.allocation.load_awareness.skew_factor", "0.0") + .put("cluster.routing.allocation.load_awareness.provisioned_capacity", Integer.toString(nodeCountPerAZ * 3)) + .build(); + + logger.info("--> starting 15 nodes on zones 'a' & 'b' & 'c'"); + List nodes_in_zone_a = internalCluster().startNodes( + nodeCountPerAZ, + Settings.builder().put(commonSettings).put("node.attr.zone", "a").build() + ); + List nodes_in_zone_b = internalCluster().startNodes( + nodeCountPerAZ, + Settings.builder().put(commonSettings).put("node.attr.zone", "b").build() + ); + List nodes_in_zone_c = internalCluster().startNodes( + nodeCountPerAZ, + Settings.builder().put(commonSettings).put("node.attr.zone", "c").build() + ); + + // Creating index with 30 primary and 1 replica + createIndex( + "test-1", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numOfShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numOfReplica) + .build() + ); + + ClusterHealthResponse health = client().admin() + .cluster() + .prepareHealth() + .setIndices("test-1") + .setWaitForEvents(Priority.LANGUID) + .setWaitForGreenStatus() + .setWaitForNodes(Integer.toString(nodeCountPerAZ * 3)) + .setWaitForNoRelocatingShards(true) + .setWaitForNoInitializingShards(true) + .execute() + .actionGet(); + assertFalse(health.isTimedOut()); + + ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); + ObjectIntHashMap counts = new ObjectIntHashMap<>(); + + for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) { + for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) { + for (ShardRouting shardRouting : indexShardRoutingTable) { + counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).getName(), 1); + } + } + } + + assertThat(counts.size(), equalTo(nodeCountPerAZ * 3)); + // All shards should be started + assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(numOfShards * (numOfReplica + 1))); + + // stopping half nodes in zone a + int nodesToStop = nodeCountPerAZ / 2; + List nodeDataPathSettings = new ArrayList<>(); + for (int i = 0; i < nodesToStop; i++) { + nodeDataPathSettings.add(internalCluster().dataPathSettings(nodes_in_zone_a.get(i))); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodes_in_zone_a.get(i))); + } + + client().admin().cluster().prepareReroute().setRetryFailed(true).get(); + health = client().admin() + .cluster() + .prepareHealth() + .setIndices("test-1") + .setWaitForEvents(Priority.LANGUID) + .setWaitForNodes(Integer.toString(nodeCountPerAZ * 3 - nodesToStop)) + .setWaitForNoRelocatingShards(true) + .setWaitForNoInitializingShards(true) + .execute() + .actionGet(); + assertFalse(health.isTimedOut()); + + // Creating another index with 30 primary and 1 replica + createIndex( + "test-2", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numOfShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numOfReplica) + .build() + ); + + health = client().admin() + .cluster() + .prepareHealth() + .setIndices("test-1", "test-2") + .setWaitForEvents(Priority.LANGUID) + .setWaitForNodes(Integer.toString(nodeCountPerAZ * 3 - nodesToStop)) + .setWaitForNoRelocatingShards(true) + .setWaitForNoInitializingShards(true) + .execute() + .actionGet(); + assertFalse(health.isTimedOut()); + + // Restarting the nodes back + for (int i = 0; i < nodesToStop; i++) { + internalCluster().startNode( + Settings.builder() + .put("node.name", nodes_in_zone_a.get(i)) + .put(nodeDataPathSettings.get(i)) + .put(commonSettings) + .put("node.attr.zone", "a") + .build() + ); + } + client().admin().cluster().prepareReroute().setRetryFailed(true).get(); + + health = client().admin() + .cluster() + .prepareHealth() + .setIndices("test-1", "test-2") + .setWaitForEvents(Priority.LANGUID) + .setWaitForNodes(Integer.toString(nodeCountPerAZ * 3)) + .setWaitForGreenStatus() + .setWaitForActiveShards(2 * numOfShards * (numOfReplica + 1)) + .setWaitForNoRelocatingShards(true) + .setWaitForNoInitializingShards(true) + .execute() + .actionGet(); + clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); + + // All shards should be started now and cluster health should be green + assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(2 * numOfShards * (numOfReplica + 1))); + assertThat(health.isTimedOut(), equalTo(false)); + } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/NodeLoadAwareAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/NodeLoadAwareAllocationDecider.java index 8e2824163709d..c43fb3be214a9 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/NodeLoadAwareAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/NodeLoadAwareAllocationDecider.java @@ -33,11 +33,13 @@ * *

* and prevent allocation on the surviving nodes of the under capacity cluster - * based on overload factor defined as a percentage by + * based on overload factor defined as a percentage and flat skew as absolute allowed skewness by + *

*
  * cluster.routing.allocation.load_awareness.skew_factor: X
+ * cluster.routing.allocation.load_awareness.flat_skew: N
  * 
- * The total limit per node based on skew_factor doesn't limit primaries that previously + * The total limit per node based on skew_factor and flat_skew doesn't limit primaries that previously * existed on the disk as those shards are force allocated by * {@link AllocationDeciders#canForceAllocatePrimary(ShardRouting, RoutingNode, RoutingAllocation)} * however new primaries due to index creation, snapshot restore etc can be controlled via the below settings. @@ -74,6 +76,13 @@ public class NodeLoadAwareAllocationDecider extends AllocationDecider { Setting.Property.Dynamic, Property.NodeScope ); + public static final Setting CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_FLAT_SKEW_SETTING = Setting.intSetting( + "cluster.routing.allocation.load_awareness.flat_skew", + 2, + 2, + Property.Dynamic, + Property.NodeScope + ); private volatile int provisionedCapacity; @@ -81,12 +90,15 @@ public class NodeLoadAwareAllocationDecider extends AllocationDecider { private volatile boolean allowUnassignedPrimaries; + private volatile int flatSkew; + private static final Logger logger = LogManager.getLogger(NodeLoadAwareAllocationDecider.class); public NodeLoadAwareAllocationDecider(Settings settings, ClusterSettings clusterSettings) { this.skewFactor = CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_SKEW_FACTOR_SETTING.get(settings); this.provisionedCapacity = CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_PROVISIONED_CAPACITY_SETTING.get(settings); this.allowUnassignedPrimaries = CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_ALLOW_UNASSIGNED_PRIMARIES_SETTING.get(settings); + this.flatSkew = CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_FLAT_SKEW_SETTING.get(settings); clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_SKEW_FACTOR_SETTING, this::setSkewFactor); clusterSettings.addSettingsUpdateConsumer( CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_PROVISIONED_CAPACITY_SETTING, @@ -96,6 +108,7 @@ public NodeLoadAwareAllocationDecider(Settings settings, ClusterSettings cluster CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_ALLOW_UNASSIGNED_PRIMARIES_SETTING, this::setAllowUnassignedPrimaries ); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_FLAT_SKEW_SETTING, this::setFlatSkew); } private void setAllowUnassignedPrimaries(boolean allowUnassignedPrimaries) { @@ -110,6 +123,10 @@ private void setProvisionedCapacity(int provisionedCapacity) { this.provisionedCapacity = provisionedCapacity; } + private void setFlatSkew(int flatSkew) { + this.flatSkew = flatSkew; + } + @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { return underCapacity(shardRouting, node, allocation, (count, limit) -> count >= limit); @@ -146,7 +163,7 @@ private Decision underCapacity( Metadata metadata = allocation.metadata(); float expectedAvgShardsPerNode = (float) metadata.getTotalNumberOfShards() / provisionedCapacity; int nodeShardCount = node.numberOfOwningShards(); - int limit = (int) Math.ceil(expectedAvgShardsPerNode * (1 + skewFactor / 100.0)); + int limit = flatSkew + (int) Math.ceil(expectedAvgShardsPerNode * (1 + skewFactor / 100.0)); if (decider.test(nodeShardCount, limit)) { logger.debug( () -> new ParameterizedMessage( @@ -163,10 +180,11 @@ private Decision underCapacity( Decision.NO, NAME, "too many shards [%d] allocated to this node, limit per node [%d] considering" - + " overload factor [%.2f] based on capacity [%d]", + + " overload factor [%.2f] and flat skew [%d] based on capacity [%d]", nodeShardCount, limit, skewFactor, + flatSkew, provisionedCapacity ); } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index be92bf1643aee..9ba56dfa6456f 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -556,6 +556,7 @@ public void apply(Settings value, Settings current, Settings previous) { NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_PROVISIONED_CAPACITY_SETTING, NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_SKEW_FACTOR_SETTING, NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_ALLOW_UNASSIGNED_PRIMARIES_SETTING, + NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_FLAT_SKEW_SETTING, ShardIndexingPressureSettings.SHARD_INDEXING_PRESSURE_ENABLED, ShardIndexingPressureSettings.SHARD_INDEXING_PRESSURE_ENFORCED, ShardIndexingPressureSettings.REQUEST_SIZE_WINDOW, diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/NodeLoadAwareAllocationTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/NodeLoadAwareAllocationTests.java index d2e7e0e7e636a..c4dcae84581cb 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/NodeLoadAwareAllocationTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/NodeLoadAwareAllocationTests.java @@ -22,7 +22,6 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.UnassignedInfo; -import org.opensearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.NodeLoadAwareAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.opensearch.common.settings.Settings; @@ -106,9 +105,11 @@ public void testNewUnassignedPrimaryAllocationOnOverload() { .nodes(DiscoveryNodes.builder(newState.nodes()).add(newNode("node1", singletonMap("zone", "zone_1")))) .build(); - // 4 existing shards from this node's local store get started + // 4 existing shards from this node's local store get started and cluster rebalances newState = strategy.reroute(newState, "reroute"); - newState = startInitializingShardsAndReroute(strategy, newState); + while (newState.getRoutingNodes().shardsWithState(INITIALIZING).isEmpty() == false) { + newState = startInitializingShardsAndReroute(strategy, newState); + } assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(32)); // add back node2 when skewness is still breached @@ -282,11 +283,14 @@ public void testExistingPrimariesAllocationOnOverload() { newState = ClusterState.builder(newState).metadata(metadata).routingTable(updatedRoutingTable).build(); newState = strategy.reroute(newState, "reroute"); - newState = startInitializingShardsAndReroute(strategy, newState); + while (newState.getRoutingNodes().shardsWithState(INITIALIZING).isEmpty() == false) { + newState = startInitializingShardsAndReroute(strategy, newState); + } + // 28 shards should be assigned (14 on each node -> 8 * 1.5 + 2) logger.info("limits should be applied on newly create primaries"); - assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(24)); - assertThat(newState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(16)); + assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(28)); + assertThat(newState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(12)); assertEquals( 12L, @@ -298,7 +302,7 @@ public void testExistingPrimariesAllocationOnOverload() { ); assertEquals( - 4L, + 0L, newState.getRoutingNodes() .shardsWithState(UNASSIGNED) .stream() @@ -306,7 +310,7 @@ public void testExistingPrimariesAllocationOnOverload() { .count() ); - assertThat(newState.getRoutingNodes().node("node4").size(), equalTo(12)); + assertThat(newState.getRoutingNodes().node("node4").size(), equalTo(14)); logger.info("--> Remove node4 from zone holding primaries"); newState = removeNodes(newState, strategy, "node4"); @@ -339,10 +343,10 @@ public void testExistingPrimariesAllocationOnOverload() { logger.info("--> do another reroute, make sure nothing moves"); assertThat(strategy.reroute(newState, "reroute").routingTable(), sameInstance(newState.routingTable())); - assertThat(newState.getRoutingNodes().node("node4").size(), equalTo(12)); - assertThat(newState.getRoutingNodes().node("node5").size(), equalTo(12)); + assertThat(newState.getRoutingNodes().node("node4").size(), equalTo(14)); + assertThat(newState.getRoutingNodes().node("node5").size(), equalTo(14)); - assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(24)); + assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(28)); newState = ClusterState.builder(newState) .nodes(DiscoveryNodes.builder(newState.nodes()).add(newNode("node1", singletonMap("zone", "zone_1")))) @@ -436,7 +440,8 @@ public void testSingleZoneOneReplicaLimitsShardAllocationOnOverload() { newState = startInitializingShardsAndReroute(strategy, newState); } - assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(30)); + // Each node can take 12 shards each (2 + ceil(8*1.2)) + assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(36)); for (ShardRouting shard : newState.getRoutingNodes().shardsWithState(UNASSIGNED)) { assertEquals(shard.unassignedInfo().getReason(), UnassignedInfo.Reason.NODE_LEFT); @@ -458,10 +463,12 @@ public void testSingleZoneOneReplicaLimitsShardAllocationOnOverload() { newState = ClusterState.builder(newState).metadata(metadata).routingTable(updatedRoutingTable).build(); newState = strategy.reroute(newState, "reroute"); - newState = startInitializingShardsAndReroute(strategy, newState); + while (!newState.getRoutingNodes().shardsWithState(INITIALIZING).isEmpty()) { + newState = startInitializingShardsAndReroute(strategy, newState); + } - assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(60)); - assertThat(newState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(20)); + assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(66)); + assertThat(newState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(14)); logger.info("add another index with 60 shards"); metadata = Metadata.builder(newState.metadata()) @@ -482,8 +489,8 @@ public void testSingleZoneOneReplicaLimitsShardAllocationOnOverload() { newState = startInitializingShardsAndReroute(strategy, newState); } - assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(120)); - assertThat(newState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(20)); + assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(126)); + assertThat(newState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(14)); logger.info("change settings to allow unassigned primaries"); strategy = createAllocationServiceWithAdditionalSettings( @@ -499,7 +506,7 @@ public void testSingleZoneOneReplicaLimitsShardAllocationOnOverload() { ); for (RoutingNode node : newState.getRoutingNodes()) { - assertThat(node.size(), equalTo(40)); + assertThat(node.size(), equalTo(42)); } logger.info("add another index with 5 shards"); @@ -513,15 +520,15 @@ public void testSingleZoneOneReplicaLimitsShardAllocationOnOverload() { ) .build(); updatedRoutingTable = RoutingTable.builder(newState.routingTable()).addAsNew(metadata.index("test3")).build(); - // increases avg shard per node to 145/5 = 29, overload factor 1.2, total allowed 35 per node and NO primaries get assigned - // since total owning shards are 40 per node already + // increases avg shard per node to 145/5 = 29, overload factor 1.2, total allowed 35+2=37 per node and NO primaries get assigned + // since total owning shards are 42 per node already newState = ClusterState.builder(newState).metadata(metadata).routingTable(updatedRoutingTable).build(); newState = strategy.reroute(newState, "reroute"); newState = startInitializingShardsAndReroute(strategy, newState); - assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(120)); - assertThat(newState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(25)); + assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(126)); + assertThat(newState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(19)); assertThat(newState.getRoutingNodes().shardsWithState(UNASSIGNED).stream().filter(ShardRouting::primary).count(), equalTo(5L)); } @@ -600,21 +607,24 @@ public void testThreeZoneTwoReplicaLimitsShardAllocationOnOverload() { newState = startInitializingShardsAndReroute(strategy, newState); } - assertThat(newState.getRoutingNodes().node("node14").size(), equalTo(5)); - assertThat(newState.getRoutingNodes().node("node15").size(), equalTo(5)); + assertThat(newState.getRoutingNodes().node("node14").size(), equalTo(7)); + assertThat(newState.getRoutingNodes().node("node15").size(), equalTo(7)); // add the removed node newState = addNodes(newState, strategy, "zone3", "node11"); - assertThat(newState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(5)); - newState = startInitializingShardsAndReroute(strategy, newState); - assertThat(newState.getRoutingNodes().node("node11").size(), equalTo(5)); + assertThat(newState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(6)); + while (newState.getRoutingNodes().shardsWithState(INITIALIZING).isEmpty() == false) { + newState = startInitializingShardsAndReroute(strategy, newState); + } + assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(60)); // add the removed node newState = addNodes(newState, strategy, "zone3", "node12"); - assertThat(newState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(5)); - newState = startInitializingShardsAndReroute(strategy, newState); + while (newState.getRoutingNodes().shardsWithState(INITIALIZING).isEmpty() == false) { + newState = startInitializingShardsAndReroute(strategy, newState); + } assertThat(newState.getRoutingNodes().node("node12").size(), equalTo(5)); // add the removed node @@ -674,13 +684,14 @@ public void testThreeZoneOneReplicaLimitsShardAllocationOnOverload() { logger.info("--> add five new node in new zone and reroute"); clusterState = addNodes(clusterState, strategy, "zone2", "node6", "node7", "node8", "node9", "node10"); + // Each node can take 7 shards each now (2 + ceil(4*1.2)) assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(30)); - assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(25)); + assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(30)); logger.info("--> complete relocation"); clusterState = startInitializingShardsAndReroute(strategy, clusterState); - assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(55)); + assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(60)); logger.info("--> do another reroute, make sure nothing moves"); assertThat(strategy.reroute(clusterState, "reroute").routingTable(), sameInstance(clusterState.routingTable())); @@ -707,6 +718,7 @@ public void testThreeZoneOneReplicaLimitsShardAllocationOnOverload() { newState = startInitializingShardsAndReroute(strategy, newState); } + // Each node can now have 5 shards each assertThat(newState.getRoutingNodes().node("node14").size(), equalTo(5)); assertThat(newState.getRoutingNodes().node("node15").size(), equalTo(5)); @@ -791,8 +803,9 @@ public void testThreeZoneTwoReplicaLimitsShardAllocationOnOverloadAcrossZones() newState = startInitializingShardsAndReroute(strategy, newState); } // ensure minority zone doesn't get overloaded - assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(53)); - assertThat(newState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(10)); + // each node can take 10 shards each (2 + ceil(7*1.1)) + assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(61)); + assertThat(newState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(2)); for (ShardRouting shard : newState.getRoutingNodes().shardsWithState(UNASSIGNED)) { assertEquals(shard.unassignedInfo().getReason(), UnassignedInfo.Reason.NODE_LEFT); } @@ -912,15 +925,20 @@ public void testSingleZoneOneReplicaLimitsReplicaAllocationOnOverload() { clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(20)); - // assert replicas are not assigned but primaries are - logger.info("--> replicas are not initializing"); - assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(0)); + // Each node can take 11 shards each (2 + ceil(8*1.1)), hence 2 replicas will also start + logger.info("--> 2 replicas are initializing"); + assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(2)); for (ShardRouting shard : clusterState.getRoutingNodes().shardsWithState(UNASSIGNED)) { assertEquals(shard.unassignedInfo().getReason(), UnassignedInfo.Reason.INDEX_CREATED); assertFalse(shard.primary()); } + logger.info("--> start the shards (replicas)"); + while (clusterState.getRoutingNodes().shardsWithState(INITIALIZING).isEmpty() == false) { + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + } + logger.info("--> do another reroute, make sure nothing moves"); assertThat(strategy.reroute(clusterState, "reroute").routingTable(), sameInstance(clusterState.routingTable())); @@ -929,10 +947,12 @@ public void testSingleZoneOneReplicaLimitsReplicaAllocationOnOverload() { assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(18)); - clusterState = startInitializingShardsAndReroute(strategy, clusterState); + while (clusterState.getRoutingNodes().shardsWithState(INITIALIZING).isEmpty() == false) { + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + } logger.info("--> replicas are started"); - assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(38)); + assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(40)); for (ShardRouting shard : clusterState.getRoutingNodes().shardsWithState(UNASSIGNED)) { assertEquals(shard.unassignedInfo().getReason(), UnassignedInfo.Reason.INDEX_CREATED); @@ -1012,11 +1032,12 @@ public void testThreeZoneTwoReplicaLimitsReplicaAllocationUnderFullZoneFailure() newState = startInitializingShardsAndReroute(strategy, newState); } - assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(50)); - assertThat(newState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(10)); + // Each node can take 7 shards max ( 2 + ceil(4*1.2)) + assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(60)); + assertThat(newState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(0)); for (RoutingNode node : newState.getRoutingNodes()) { - assertThat(node.size(), equalTo(5)); + assertThat(node.size(), equalTo(6)); } // add the removed node @@ -1025,9 +1046,7 @@ public void testThreeZoneTwoReplicaLimitsReplicaAllocationUnderFullZoneFailure() .build(); newState = strategy.reroute(newState, "reroute"); - assertThat(newState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(5)); newState = startInitializingShardsAndReroute(strategy, newState); - assertThat(newState.getRoutingNodes().node("node11").size(), equalTo(5)); // add the removed node newState = ClusterState.builder(newState) @@ -1035,9 +1054,7 @@ public void testThreeZoneTwoReplicaLimitsReplicaAllocationUnderFullZoneFailure() .build(); newState = strategy.reroute(newState, "reroute"); - assertThat(newState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(5)); newState = startInitializingShardsAndReroute(strategy, newState); - assertThat(newState.getRoutingNodes().node("node12").size(), equalTo(5)); // add the removed node newState = ClusterState.builder(newState) @@ -1068,6 +1085,120 @@ public void testThreeZoneTwoReplicaLimitsReplicaAllocationUnderFullZoneFailure() assertThat(newState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(0)); } + public void testThreeZoneOneReplicaWithSkewFactorZeroAllShardsAssignedAfterRecovery() { + AllocationService strategy = createAllocationServiceWithAdditionalSettings( + org.opensearch.common.collect.Map.of( + NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_PROVISIONED_CAPACITY_SETTING.getKey(), + 15, + NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_SKEW_FACTOR_SETTING.getKey(), + 0, + "cluster.routing.allocation.awareness.force.zone.values", + "zone1,zone2,zone3" + ) + ); + + logger.info("Building initial routing table for 'testThreeZoneOneReplicaWithSkewFactorZeroAllShardsAssignedAfterRecovery'"); + + Metadata metadata = Metadata.builder() + .put(IndexMetadata.builder("test").settings(settings(Version.CURRENT)).numberOfShards(30).numberOfReplicas(1)) + .build(); + + RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metadata.index("test")).build(); + + ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(initialRoutingTable) + .build(); + + logger.info("--> adding five nodes on same zone and do rerouting"); + clusterState = addNodes(clusterState, strategy, "zone1", "node1", "node2", "node3", "node4", "node5"); + clusterState = strategy.reroute(clusterState, "reroute"); + assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(30)); + + logger.info("--> start the shards (primaries)"); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + + logger.info("--> add five new node in new zone and reroute"); + clusterState = addNodes(clusterState, strategy, "zone2", "node6", "node7", "node8", "node9", "node10"); + + logger.info("--> complete relocation"); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + + ClusterState newState = addNodes(clusterState, strategy, "zone3", "node11", "node12", "node13", "node14", "node15"); + + while (newState.getRoutingNodes().shardsWithState(INITIALIZING).isEmpty() == false) { + newState = startInitializingShardsAndReroute(strategy, newState); + } + assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(60)); + assertThat(newState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(0)); + + assertThat(newState.getRoutingNodes().node("node11").size(), equalTo(4)); + assertThat(newState.getRoutingNodes().node("node12").size(), equalTo(4)); + assertThat(newState.getRoutingNodes().node("node13").size(), equalTo(4)); + assertThat(newState.getRoutingNodes().node("node14").size(), equalTo(4)); + assertThat(newState.getRoutingNodes().node("node15").size(), equalTo(4)); + + logger.info("--> Removing three nodes from zone3"); + newState = removeNodes(newState, strategy, "node11", "node12", "node13"); + + while (newState.getRoutingNodes().shardsWithState(INITIALIZING).isEmpty() == false) { + newState = startInitializingShardsAndReroute(strategy, newState); + } + + // Each node can take 6 shards max (2 + ceil(4*1.0)), so all shards should be assigned + assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(60)); + assertThat(newState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(0)); + + logger.info("add another index with 30 primary 1 replica"); + metadata = Metadata.builder(newState.metadata()) + .put( + IndexMetadata.builder("test1") + .settings( + settings(Version.CURRENT).put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 30) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + ) + ) + .build(); + RoutingTable updatedRoutingTable = RoutingTable.builder(newState.routingTable()).addAsNew(metadata.index("test1")).build(); + + newState = ClusterState.builder(newState).metadata(metadata).routingTable(updatedRoutingTable).build(); + newState = strategy.reroute(newState, "reroute"); + + newState = startInitializingShardsAndReroute(strategy, newState); + + // add the removed node + newState = ClusterState.builder(newState) + .nodes(DiscoveryNodes.builder(newState.nodes()).add(newNode("node11", singletonMap("zone", "zone3")))) + .build(); + newState = strategy.reroute(newState, "reroute"); + + newState = startInitializingShardsAndReroute(strategy, newState); + + // add the removed node + newState = ClusterState.builder(newState) + .nodes(DiscoveryNodes.builder(newState.nodes()).add(newNode("node12", singletonMap("zone", "zone3")))) + .build(); + newState = strategy.reroute(newState, "reroute"); + + newState = startInitializingShardsAndReroute(strategy, newState); + + // add the removed node + newState = ClusterState.builder(newState) + .nodes(DiscoveryNodes.builder(newState.nodes()).add(newNode("node13", singletonMap("zone", "zone3")))) + .build(); + newState = strategy.reroute(newState, "reroute"); + + while (newState.getRoutingNodes().shardsWithState(INITIALIZING).isEmpty() == false) { + newState = startInitializingShardsAndReroute(strategy, newState); + } + assertThat(newState.getRoutingNodes().node("node13").size(), equalTo(8)); + assertThat(newState.getRoutingNodes().node("node12").size(), equalTo(8)); + assertThat(newState.getRoutingNodes().node("node11").size(), equalTo(8)); + // ensure all shards are assigned + assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(120)); + assertThat(newState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(0)); + } + private ClusterState removeNodes(ClusterState clusterState, AllocationService allocationService, String... nodeIds) { DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.getNodes()); org.opensearch.common.collect.List.of(nodeIds).forEach(nodeId -> nodeBuilder.remove(nodeId)); @@ -1097,7 +1228,6 @@ private Settings buildSettings(Map settingsValue) { .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getKey(), 20) .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING.getKey(), 20) .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_REPLICAS_RECOVERIES_SETTING.getKey(), 20) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.awareness.attributes", "zone"); settingsValue.forEach((k, v) -> { if (v instanceof Integer) settingsBuilder.put(k, (Integer) (v)); From 688c348ee1acceed5f4f6598111db0231fef6bed Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jun 2022 22:03:30 -0700 Subject: [PATCH 31/34] Bump xmlbeans from 5.0.3 to 5.1.0 in /plugins/ingest-attachment (#3572) * Bump xmlbeans from 5.0.3 to 5.1.0 in /plugins/ingest-attachment Bumps xmlbeans from 5.0.3 to 5.1.0. --- updated-dependencies: - dependency-name: org.apache.xmlbeans:xmlbeans dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- plugins/ingest-attachment/build.gradle | 2 +- plugins/ingest-attachment/licenses/xmlbeans-5.0.3.jar.sha1 | 1 - plugins/ingest-attachment/licenses/xmlbeans-5.1.0.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/ingest-attachment/licenses/xmlbeans-5.0.3.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/xmlbeans-5.1.0.jar.sha1 diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 456b652ff82a3..86694b9bc9da7 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -79,7 +79,7 @@ dependencies { api "org.apache.poi:poi:${versions.poi}" api "org.apache.poi:poi-ooxml-lite:${versions.poi}" api "commons-codec:commons-codec:${versions.commonscodec}" - api 'org.apache.xmlbeans:xmlbeans:5.0.3' + api 'org.apache.xmlbeans:xmlbeans:5.1.0' api 'org.apache.commons:commons-collections4:4.4' // MS Office api "org.apache.poi:poi-scratchpad:${versions.poi}" diff --git a/plugins/ingest-attachment/licenses/xmlbeans-5.0.3.jar.sha1 b/plugins/ingest-attachment/licenses/xmlbeans-5.0.3.jar.sha1 deleted file mode 100644 index 7451ee17640d6..0000000000000 --- a/plugins/ingest-attachment/licenses/xmlbeans-5.0.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e1ef1382ae9dfb2438b82b6dd575566355c2f30f \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/xmlbeans-5.1.0.jar.sha1 b/plugins/ingest-attachment/licenses/xmlbeans-5.1.0.jar.sha1 new file mode 100644 index 0000000000000..85f757b61048c --- /dev/null +++ b/plugins/ingest-attachment/licenses/xmlbeans-5.1.0.jar.sha1 @@ -0,0 +1 @@ +3534ab896663e6f6d8a2cf46882d7407641d7a31 \ No newline at end of file From cce0781b77928c3c9dd0724105a5ab266fe145c5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jun 2022 22:03:59 -0700 Subject: [PATCH 32/34] Bump google-oauth-client from 1.34.0 to 1.34.1 in /plugins/discovery-gce (#3570) * Bump google-oauth-client from 1.34.0 to 1.34.1 in /plugins/discovery-gce Bumps [google-oauth-client](https://github.com/googleapis/google-oauth-java-client) from 1.34.0 to 1.34.1. - [Release notes](https://github.com/googleapis/google-oauth-java-client/releases) - [Changelog](https://github.com/googleapis/google-oauth-java-client/blob/main/CHANGELOG.md) - [Commits](https://github.com/googleapis/google-oauth-java-client/compare/v1.34.0...v1.34.1) --- updated-dependencies: - dependency-name: com.google.oauth-client:google-oauth-client dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- plugins/discovery-gce/build.gradle | 2 +- .../discovery-gce/licenses/google-oauth-client-1.34.0.jar.sha1 | 1 - .../discovery-gce/licenses/google-oauth-client-1.34.1.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/discovery-gce/licenses/google-oauth-client-1.34.0.jar.sha1 create mode 100644 plugins/discovery-gce/licenses/google-oauth-client-1.34.1.jar.sha1 diff --git a/plugins/discovery-gce/build.gradle b/plugins/discovery-gce/build.gradle index 983a2907e4e67..c8b52d3afcd45 100644 --- a/plugins/discovery-gce/build.gradle +++ b/plugins/discovery-gce/build.gradle @@ -24,7 +24,7 @@ versions << [ dependencies { api "com.google.apis:google-api-services-compute:v1-rev160-${versions.google}" api "com.google.api-client:google-api-client:${versions.google}" - api "com.google.oauth-client:google-oauth-client:1.34.0" + api "com.google.oauth-client:google-oauth-client:1.34.1" api "com.google.http-client:google-http-client:${versions.google}" api "com.google.http-client:google-http-client-jackson2:${versions.google}" api 'com.google.code.findbugs:jsr305:3.0.2' diff --git a/plugins/discovery-gce/licenses/google-oauth-client-1.34.0.jar.sha1 b/plugins/discovery-gce/licenses/google-oauth-client-1.34.0.jar.sha1 deleted file mode 100644 index 57c5c16b34deb..0000000000000 --- a/plugins/discovery-gce/licenses/google-oauth-client-1.34.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a0dc471bd498c62280120037a42d410c0e36f5d6 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-oauth-client-1.34.1.jar.sha1 b/plugins/discovery-gce/licenses/google-oauth-client-1.34.1.jar.sha1 new file mode 100644 index 0000000000000..a8434bd380761 --- /dev/null +++ b/plugins/discovery-gce/licenses/google-oauth-client-1.34.1.jar.sha1 @@ -0,0 +1 @@ +4a4f88c5e13143f882268c98239fb85c3b2c6cb2 \ No newline at end of file From 2f9f8e1e3cde803d96e6ca1229c4a6879d752099 Mon Sep 17 00:00:00 2001 From: Anshu Agarwal Date: Tue, 14 Jun 2022 11:29:52 +0530 Subject: [PATCH 33/34] Fix for bug showing incorrect awareness attributes count in AwarenessAllocationDecider (#3428) * Fix for bug showing incorrect awareness attributes count in AwarenessAllocationDecider Signed-off-by: Anshu Agarwal --- .../decider/AwarenessAllocationDecider.java | 13 ++- .../allocation/AwarenessAllocationTests.java | 92 +++++++++++++++++++ 2 files changed, 101 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java index a873129723577..3d7ba09c839fc 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java @@ -33,11 +33,14 @@ package org.opensearch.cluster.routing.allocation.decider; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.function.Function; import com.carrotsearch.hppc.ObjectIntHashMap; +import com.carrotsearch.hppc.cursors.ObjectCursor; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.ShardRouting; @@ -207,12 +210,14 @@ private Decision underCapacity(ShardRouting shardRouting, RoutingNode node, Rout int numberOfAttributes = nodesPerAttribute.size(); List fullValues = forcedAwarenessAttributes.get(awarenessAttribute); + if (fullValues != null) { - for (String fullValue : fullValues) { - if (shardPerAttribute.containsKey(fullValue) == false) { - numberOfAttributes++; - } + // If forced awareness is enabled, numberOfAttributes = count(distinct((union(discovered_attributes, forced_attributes))) + Set attributesSet = new HashSet<>(fullValues); + for (ObjectCursor stringObjectCursor : nodesPerAttribute.keys()) { + attributesSet.add(stringObjectCursor.value); } + numberOfAttributes = attributesSet.size(); } // TODO should we remove ones that are not part of full list? diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/AwarenessAllocationTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/AwarenessAllocationTests.java index c9e427a178515..b2adcd21cd8c9 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/AwarenessAllocationTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/AwarenessAllocationTests.java @@ -35,23 +35,32 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.Version; +import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.RoutingNode; +import org.opensearch.cluster.routing.RoutingNodes; import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.allocation.command.AllocationCommands; import org.opensearch.cluster.routing.allocation.command.CancelAllocationCommand; import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; +import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; import org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; +import org.opensearch.cluster.routing.allocation.decider.Decision; import org.opensearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; + +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import static java.util.Collections.singletonMap; @@ -971,4 +980,87 @@ public void testMultipleAwarenessAttributes() { assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(2)); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(0)); } + + public void testAllocationExplainForUnassignedShardsWithUnbalancedZones() { + Settings settings = Settings.builder() + .put("cluster.routing.allocation.node_concurrent_recoveries", 10) + .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_REPLICAS_RECOVERIES_SETTING.getKey(), 10) + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .build(); + + AllocationService strategy = createAllocationService(settings); + + logger.info("Building initial routing table for 'testAllocationExplainForUnassignedShardsWithUnbalancedZones'"); + Metadata metadata = Metadata.builder() + .put(IndexMetadata.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2)) + .build(); + + RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metadata.index("test")).build(); + + ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(initialRoutingTable) + .build(); + + logger.info("--> adding 3 nodes in different zones and do rerouting"); + clusterState = ClusterState.builder(clusterState) + .nodes( + DiscoveryNodes.builder() + .add(newNode("A-0", singletonMap("zone", "a"))) + .add(newNode("A-1", singletonMap("zone", "a"))) + .add(newNode("B-0", singletonMap("zone", "b"))) + ) + .build(); + clusterState = strategy.reroute(clusterState, "reroute"); + assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(0)); + assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); + + logger.info("--> start the shard (primary)"); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(1)); + assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); + // One Shard is unassigned due to forced zone awareness + assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(1)); + + List unassignedShards = clusterState.getRoutingTable().shardsWithState(UNASSIGNED); + + ClusterSettings EMPTY_CLUSTER_SETTINGS = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + // Add a new node in zone c + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("C-0", singletonMap("zone", "c")))) + .build(); + + final AwarenessAllocationDecider decider = new AwarenessAllocationDecider(settings, EMPTY_CLUSTER_SETTINGS); + + final RoutingAllocation allocation = new RoutingAllocation( + new AllocationDeciders(Collections.singleton(decider)), + clusterState.getRoutingNodes(), + clusterState, + null, + null, + 0L + ); + allocation.debugDecision(true); + + Decision decision = null; + RoutingNodes nodes = clusterState.getRoutingNodes(); + + for (RoutingNode node : nodes) { + // Try to allocate unassigned shard to A-0, fails because of forced zone awareness + if (node.nodeId().equals("A-0")) { + decision = decider.canAllocate(unassignedShards.get(0), node, allocation); + assertEquals(Decision.Type.NO, decision.type()); + assertEquals( + decision.getExplanation(), + "there are too many copies of the shard allocated to nodes with attribute" + + " [zone], there are [3] total configured shard copies for this shard id and [3]" + + " total attribute values, expected the allocated shard count per attribute [2] to" + + " be less than or equal to the upper bound of the required number of shards per attribute [1]" + ); + } + + } + } } From c764d6343a66a4696b9031850707b2c33a0224a5 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Tue, 14 Jun 2022 09:57:45 -0400 Subject: [PATCH 34/34] Added bwc version 1.3.4 (#3552) Signed-off-by: GitHub Co-authored-by: opensearch-ci-bot --- .ci/bwcVersions | 1 + server/src/main/java/org/opensearch/Version.java | 1 + 2 files changed, 2 insertions(+) diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 0461af4966e92..378c0f52da3ad 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -40,6 +40,7 @@ BWC_VERSION: - "1.3.1" - "1.3.2" - "1.3.3" + - "1.3.4" - "2.0.0" - "2.0.1" - "2.1.0" diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index ec33e674c4d5f..2cc8cde2cf0f3 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -87,6 +87,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_1_3_1 = new Version(1030199, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_1_3_2 = new Version(1030299, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_1_3_3 = new Version(1030399, org.apache.lucene.util.Version.LUCENE_8_10_1); + public static final Version V_1_3_4 = new Version(1030499, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_2_0_0 = new Version(2000099, org.apache.lucene.util.Version.LUCENE_9_1_0); public static final Version V_2_0_1 = new Version(2000199, org.apache.lucene.util.Version.LUCENE_9_1_0); public static final Version V_2_1_0 = new Version(2010099, org.apache.lucene.util.Version.LUCENE_9_3_0);