use of org.opensearch.transport.TransportService in project OpenSearch by opensearch-project.
the class SeedHostsResolver method resolveHostsLists.
/**
* Resolves a list of hosts to a list of transport addresses. Each host is resolved into a transport address (or a collection of
* addresses if the number of ports is greater than one). Host lookups are done in parallel using specified executor service up
* to the specified resolve timeout.
*
* @param executorService the executor service used to parallelize hostname lookups
* @param logger logger used for logging messages regarding hostname lookups
* @param hosts the hosts to resolve
* @param transportService the transport service
* @param resolveTimeout the timeout before returning from hostname lookups
* @return a list of resolved transport addresses
*/
public static List<TransportAddress> resolveHostsLists(final CancellableThreads cancellableThreads, final ExecutorService executorService, final Logger logger, final List<String> hosts, final TransportService transportService, final TimeValue resolveTimeout) {
Objects.requireNonNull(executorService);
Objects.requireNonNull(logger);
Objects.requireNonNull(hosts);
Objects.requireNonNull(transportService);
Objects.requireNonNull(resolveTimeout);
if (resolveTimeout.nanos() < 0) {
throw new IllegalArgumentException("resolve timeout must be non-negative but was [" + resolveTimeout + "]");
}
// create tasks to submit to the executor service; we will wait up to resolveTimeout for these tasks to complete
final List<Callable<TransportAddress[]>> callables = hosts.stream().map(hn -> (Callable<TransportAddress[]>) () -> transportService.addressesFromString(hn)).collect(Collectors.toList());
final SetOnce<List<Future<TransportAddress[]>>> futures = new SetOnce<>();
try {
cancellableThreads.execute(() -> futures.set(executorService.invokeAll(callables, resolveTimeout.nanos(), TimeUnit.NANOSECONDS)));
} catch (CancellableThreads.ExecutionCancelledException e) {
return Collections.emptyList();
}
final List<TransportAddress> transportAddresses = new ArrayList<>();
final Set<TransportAddress> localAddresses = new HashSet<>();
localAddresses.add(transportService.boundAddress().publishAddress());
localAddresses.addAll(Arrays.asList(transportService.boundAddress().boundAddresses()));
// ExecutorService#invokeAll guarantees that the futures are returned in the iteration order of the tasks so we can associate the
// hostname with the corresponding task by iterating together
final Iterator<String> it = hosts.iterator();
for (final Future<TransportAddress[]> future : futures.get()) {
assert future.isDone();
final String hostname = it.next();
if (!future.isCancelled()) {
try {
final TransportAddress[] addresses = future.get();
logger.trace("resolved host [{}] to {}", hostname, addresses);
for (int addressId = 0; addressId < addresses.length; addressId++) {
final TransportAddress address = addresses[addressId];
// no point in pinging ourselves
if (localAddresses.contains(address) == false) {
transportAddresses.add(address);
}
}
} catch (final ExecutionException e) {
assert e.getCause() != null;
final String message = "failed to resolve host [" + hostname + "]";
logger.warn(message, e.getCause());
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
// ignore
}
} else {
logger.warn("timed out after [{}] resolving host [{}]", resolveTimeout, hostname);
}
}
return Collections.unmodifiableList(transportAddresses);
}
use of org.opensearch.transport.TransportService in project OpenSearch by opensearch-project.
the class UpdateByQueryWithScriptTests method action.
@Override
protected TransportUpdateByQueryAction.AsyncIndexBySearchAction action(ScriptService scriptService, UpdateByQueryRequest request) {
TransportService transportService = mock(TransportService.class);
TransportUpdateByQueryAction transportAction = new TransportUpdateByQueryAction(threadPool, new ActionFilters(Collections.emptySet()), null, transportService, scriptService, null);
return new TransportUpdateByQueryAction.AsyncIndexBySearchAction(task, logger, null, threadPool, scriptService, request, ClusterState.EMPTY_STATE, listener());
}
use of org.opensearch.transport.TransportService in project OpenSearch by opensearch-project.
the class IndexingPressureIT method testWriteBytesAreIncremented.
public void testWriteBytesAreIncremented() throws Exception {
assertAcked(prepareCreate(INDEX_NAME, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)));
ensureGreen(INDEX_NAME);
Tuple<String, String> primaryReplicaNodeNames = getPrimaryReplicaNodeNames();
String primaryName = primaryReplicaNodeNames.v1();
String replicaName = primaryReplicaNodeNames.v2();
String coordinatingOnlyNode = getCoordinatingOnlyNode();
final CountDownLatch replicationSendPointReached = new CountDownLatch(1);
final CountDownLatch latchBlockingReplicationSend = new CountDownLatch(1);
TransportService primaryService = internalCluster().getInstance(TransportService.class, primaryName);
final MockTransportService primaryTransportService = (MockTransportService) primaryService;
TransportService replicaService = internalCluster().getInstance(TransportService.class, replicaName);
final MockTransportService replicaTransportService = (MockTransportService) replicaService;
primaryTransportService.addSendBehavior((connection, requestId, action, request, options) -> {
if (action.equals(TransportShardBulkAction.ACTION_NAME + "[r]")) {
try {
replicationSendPointReached.countDown();
latchBlockingReplicationSend.await();
} catch (InterruptedException e) {
throw new IllegalStateException(e);
}
}
connection.sendRequest(requestId, action, request, options);
});
final ThreadPool replicaThreadPool = replicaTransportService.getThreadPool();
final Releasable replicaRelease = blockReplicas(replicaThreadPool);
final BulkRequest bulkRequest = new BulkRequest();
int totalRequestSize = 0;
for (int i = 0; i < 80; ++i) {
IndexRequest request = new IndexRequest(INDEX_NAME).id(UUIDs.base64UUID()).source(Collections.singletonMap("key", randomAlphaOfLength(50)));
totalRequestSize += request.ramBytesUsed();
assertTrue(request.ramBytesUsed() > request.source().length());
bulkRequest.add(request);
}
final long bulkRequestSize = bulkRequest.ramBytesUsed();
final long bulkShardRequestSize = totalRequestSize;
try {
final ActionFuture<BulkResponse> successFuture = client(coordinatingOnlyNode).bulk(bulkRequest);
replicationSendPointReached.await();
IndexingPressure primaryWriteLimits = internalCluster().getInstance(IndexingPressureService.class, primaryName).getShardIndexingPressure();
IndexingPressure replicaWriteLimits = internalCluster().getInstance(IndexingPressureService.class, replicaName).getShardIndexingPressure();
;
IndexingPressure coordinatingWriteLimits = internalCluster().getInstance(IndexingPressureService.class, coordinatingOnlyNode).getShardIndexingPressure();
;
assertThat(primaryWriteLimits.getCurrentCombinedCoordinatingAndPrimaryBytes(), greaterThan(bulkShardRequestSize));
assertThat(primaryWriteLimits.getCurrentPrimaryBytes(), greaterThan(bulkShardRequestSize));
assertEquals(0, primaryWriteLimits.getCurrentCoordinatingBytes());
assertEquals(0, primaryWriteLimits.getCurrentReplicaBytes());
assertEquals(0, replicaWriteLimits.getCurrentCombinedCoordinatingAndPrimaryBytes());
assertEquals(0, replicaWriteLimits.getCurrentCoordinatingBytes());
assertEquals(0, replicaWriteLimits.getCurrentPrimaryBytes());
assertEquals(0, replicaWriteLimits.getCurrentReplicaBytes());
assertEquals(bulkRequestSize, coordinatingWriteLimits.getCurrentCombinedCoordinatingAndPrimaryBytes());
assertEquals(bulkRequestSize, coordinatingWriteLimits.getCurrentCoordinatingBytes());
assertEquals(0, coordinatingWriteLimits.getCurrentPrimaryBytes());
assertEquals(0, coordinatingWriteLimits.getCurrentReplicaBytes());
latchBlockingReplicationSend.countDown();
IndexRequest request = new IndexRequest(INDEX_NAME).id(UUIDs.base64UUID()).source(Collections.singletonMap("key", randomAlphaOfLength(50)));
final BulkRequest secondBulkRequest = new BulkRequest();
secondBulkRequest.add(request);
// Use the primary or the replica data node as the coordinating node this time
boolean usePrimaryAsCoordinatingNode = randomBoolean();
final ActionFuture<BulkResponse> secondFuture;
if (usePrimaryAsCoordinatingNode) {
secondFuture = client(primaryName).bulk(secondBulkRequest);
} else {
secondFuture = client(replicaName).bulk(secondBulkRequest);
}
final long secondBulkRequestSize = secondBulkRequest.ramBytesUsed();
final long secondBulkShardRequestSize = request.ramBytesUsed();
if (usePrimaryAsCoordinatingNode) {
assertBusy(() -> {
assertThat(primaryWriteLimits.getCurrentCombinedCoordinatingAndPrimaryBytes(), greaterThan(bulkShardRequestSize + secondBulkRequestSize));
assertEquals(secondBulkRequestSize, primaryWriteLimits.getCurrentCoordinatingBytes());
assertThat(primaryWriteLimits.getCurrentPrimaryBytes(), greaterThan(bulkShardRequestSize + secondBulkRequestSize));
assertEquals(0, replicaWriteLimits.getCurrentCombinedCoordinatingAndPrimaryBytes());
assertEquals(0, replicaWriteLimits.getCurrentCoordinatingBytes());
assertEquals(0, replicaWriteLimits.getCurrentPrimaryBytes());
});
} else {
assertThat(primaryWriteLimits.getCurrentCombinedCoordinatingAndPrimaryBytes(), greaterThan(bulkShardRequestSize));
assertEquals(secondBulkRequestSize, replicaWriteLimits.getCurrentCombinedCoordinatingAndPrimaryBytes());
assertEquals(secondBulkRequestSize, replicaWriteLimits.getCurrentCoordinatingBytes());
assertEquals(0, replicaWriteLimits.getCurrentPrimaryBytes());
}
assertEquals(bulkRequestSize, coordinatingWriteLimits.getCurrentCombinedCoordinatingAndPrimaryBytes());
assertBusy(() -> assertThat(replicaWriteLimits.getCurrentReplicaBytes(), greaterThan(bulkShardRequestSize + secondBulkShardRequestSize)));
replicaRelease.close();
successFuture.actionGet();
secondFuture.actionGet();
assertEquals(0, primaryWriteLimits.getCurrentCombinedCoordinatingAndPrimaryBytes());
assertEquals(0, primaryWriteLimits.getCurrentCoordinatingBytes());
assertEquals(0, primaryWriteLimits.getCurrentPrimaryBytes());
assertEquals(0, primaryWriteLimits.getCurrentReplicaBytes());
assertEquals(0, replicaWriteLimits.getCurrentCombinedCoordinatingAndPrimaryBytes());
assertEquals(0, replicaWriteLimits.getCurrentCoordinatingBytes());
assertEquals(0, replicaWriteLimits.getCurrentPrimaryBytes());
assertEquals(0, replicaWriteLimits.getCurrentReplicaBytes());
assertEquals(0, coordinatingWriteLimits.getCurrentCombinedCoordinatingAndPrimaryBytes());
assertEquals(0, coordinatingWriteLimits.getCurrentCoordinatingBytes());
assertEquals(0, coordinatingWriteLimits.getCurrentPrimaryBytes());
assertEquals(0, coordinatingWriteLimits.getCurrentReplicaBytes());
} finally {
if (replicationSendPointReached.getCount() > 0) {
replicationSendPointReached.countDown();
}
replicaRelease.close();
if (latchBlockingReplicationSend.getCount() > 0) {
latchBlockingReplicationSend.countDown();
}
replicaRelease.close();
primaryTransportService.clearAllRules();
}
}
use of org.opensearch.transport.TransportService in project OpenSearch by opensearch-project.
the class ShardIndexingPressureIT method testShardIndexingPressureTrackingDuringBulkWrites.
public void testShardIndexingPressureTrackingDuringBulkWrites() throws Exception {
assertAcked(prepareCreate(INDEX_NAME, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)));
ensureGreen(INDEX_NAME);
Tuple<String, String> primaryReplicaNodeNames = getPrimaryReplicaNodeNames(INDEX_NAME);
String primaryName = primaryReplicaNodeNames.v1();
String replicaName = primaryReplicaNodeNames.v2();
String coordinatingOnlyNode = getCoordinatingOnlyNode();
final CountDownLatch replicationSendPointReached = new CountDownLatch(1);
final CountDownLatch latchBlockingReplicationSend = new CountDownLatch(1);
TransportService primaryService = internalCluster().getInstance(TransportService.class, primaryName);
final MockTransportService primaryTransportService = (MockTransportService) primaryService;
TransportService replicaService = internalCluster().getInstance(TransportService.class, replicaName);
final MockTransportService replicaTransportService = (MockTransportService) replicaService;
primaryTransportService.addSendBehavior((connection, requestId, action, request, options) -> {
if (action.equals(TransportShardBulkAction.ACTION_NAME + "[r]")) {
try {
replicationSendPointReached.countDown();
latchBlockingReplicationSend.await();
} catch (InterruptedException e) {
throw new IllegalStateException(e);
}
}
connection.sendRequest(requestId, action, request, options);
});
final ThreadPool replicaThreadPool = replicaTransportService.getThreadPool();
final Releasable replicaRelease = blockReplicas(replicaThreadPool);
final BulkRequest bulkRequest = new BulkRequest();
int totalRequestSize = 0;
for (int i = 0; i < 80; ++i) {
IndexRequest request = new IndexRequest(INDEX_NAME).id(UUIDs.base64UUID()).source(Collections.singletonMap("key", randomAlphaOfLength(50)));
totalRequestSize += request.ramBytesUsed();
assertTrue(request.ramBytesUsed() > request.source().length());
bulkRequest.add(request);
}
final long bulkShardRequestSize = totalRequestSize + (RamUsageEstimator.shallowSizeOfInstance(BulkItemRequest.class) * 80) + RamUsageEstimator.shallowSizeOfInstance(BulkShardRequest.class);
try {
final ActionFuture<BulkResponse> successFuture = client(coordinatingOnlyNode).bulk(bulkRequest);
replicationSendPointReached.await();
IndexService indexService = internalCluster().getInstance(IndicesService.class, primaryName).iterator().next();
Index index = indexService.getIndexSettings().getIndex();
ShardId shardId = new ShardId(index, 0);
ShardIndexingPressureTracker primaryShardTracker = internalCluster().getInstance(IndexingPressureService.class, primaryName).getShardIndexingPressure().getShardIndexingPressureTracker(shardId);
ShardIndexingPressureTracker replicaShardTracker = internalCluster().getInstance(IndexingPressureService.class, replicaName).getShardIndexingPressure().getShardIndexingPressureTracker(shardId);
ShardIndexingPressureTracker coordinatingShardTracker = internalCluster().getInstance(IndexingPressureService.class, coordinatingOnlyNode).getShardIndexingPressure().getShardIndexingPressureTracker(shardId);
assertThat(primaryShardTracker.getCommonOperationTracker().getCurrentCombinedCoordinatingAndPrimaryBytes(), equalTo(bulkShardRequestSize));
assertThat(primaryShardTracker.getPrimaryOperationTracker().getStatsTracker().getCurrentBytes(), equalTo(bulkShardRequestSize));
assertEquals(0, primaryShardTracker.getCoordinatingOperationTracker().getStatsTracker().getCurrentBytes());
assertEquals(0, primaryShardTracker.getReplicaOperationTracker().getStatsTracker().getCurrentBytes());
assertEquals(0, replicaShardTracker.getCommonOperationTracker().getCurrentCombinedCoordinatingAndPrimaryBytes());
assertEquals(0, replicaShardTracker.getCoordinatingOperationTracker().getStatsTracker().getCurrentBytes());
assertEquals(0, replicaShardTracker.getPrimaryOperationTracker().getStatsTracker().getCurrentBytes());
assertEquals(0, replicaShardTracker.getReplicaOperationTracker().getStatsTracker().getCurrentBytes());
assertEquals(bulkShardRequestSize, coordinatingShardTracker.getCommonOperationTracker().getCurrentCombinedCoordinatingAndPrimaryBytes());
assertEquals(bulkShardRequestSize, coordinatingShardTracker.getCoordinatingOperationTracker().getStatsTracker().getCurrentBytes());
assertEquals(0, coordinatingShardTracker.getPrimaryOperationTracker().getStatsTracker().getCurrentBytes());
assertEquals(0, coordinatingShardTracker.getReplicaOperationTracker().getStatsTracker().getCurrentBytes());
latchBlockingReplicationSend.countDown();
IndexRequest request = new IndexRequest(INDEX_NAME).id(UUIDs.base64UUID()).source(Collections.singletonMap("key", randomAlphaOfLength(50)));
final BulkRequest secondBulkRequest = new BulkRequest();
secondBulkRequest.add(request);
// Use the primary or the replica data node as the coordinating node this time
boolean usePrimaryAsCoordinatingNode = randomBoolean();
final ActionFuture<BulkResponse> secondFuture;
if (usePrimaryAsCoordinatingNode) {
secondFuture = client(primaryName).bulk(secondBulkRequest);
} else {
secondFuture = client(replicaName).bulk(secondBulkRequest);
}
final long secondBulkShardRequestSize = request.ramBytesUsed() + RamUsageEstimator.shallowSizeOfInstance(BulkItemRequest.class) + RamUsageEstimator.shallowSizeOfInstance(BulkShardRequest.class);
if (usePrimaryAsCoordinatingNode) {
assertBusy(() -> {
assertThat(primaryShardTracker.getCommonOperationTracker().getCurrentCombinedCoordinatingAndPrimaryBytes(), equalTo(bulkShardRequestSize + secondBulkShardRequestSize));
assertEquals(secondBulkShardRequestSize, primaryShardTracker.getCoordinatingOperationTracker().getStatsTracker().getCurrentBytes());
assertThat(primaryShardTracker.getPrimaryOperationTracker().getStatsTracker().getCurrentBytes(), equalTo(bulkShardRequestSize + secondBulkShardRequestSize));
assertEquals(0, replicaShardTracker.getCommonOperationTracker().getCurrentCombinedCoordinatingAndPrimaryBytes());
assertEquals(0, replicaShardTracker.getCoordinatingOperationTracker().getStatsTracker().getCurrentBytes());
assertEquals(0, replicaShardTracker.getPrimaryOperationTracker().getStatsTracker().getCurrentBytes());
});
} else {
assertThat(primaryShardTracker.getCommonOperationTracker().getCurrentCombinedCoordinatingAndPrimaryBytes(), equalTo(bulkShardRequestSize));
assertEquals(secondBulkShardRequestSize, replicaShardTracker.getCommonOperationTracker().getCurrentCombinedCoordinatingAndPrimaryBytes());
assertEquals(secondBulkShardRequestSize, replicaShardTracker.getCoordinatingOperationTracker().getStatsTracker().getCurrentBytes());
assertEquals(0, replicaShardTracker.getPrimaryOperationTracker().getStatsTracker().getCurrentBytes());
}
assertEquals(bulkShardRequestSize, coordinatingShardTracker.getCommonOperationTracker().getCurrentCombinedCoordinatingAndPrimaryBytes());
assertBusy(() -> assertThat(replicaShardTracker.getReplicaOperationTracker().getStatsTracker().getCurrentBytes(), equalTo(bulkShardRequestSize + secondBulkShardRequestSize)));
replicaRelease.close();
successFuture.actionGet();
secondFuture.actionGet();
assertEquals(0, primaryShardTracker.getCommonOperationTracker().getCurrentCombinedCoordinatingAndPrimaryBytes());
assertEquals(0, primaryShardTracker.getCoordinatingOperationTracker().getStatsTracker().getCurrentBytes());
assertEquals(0, primaryShardTracker.getPrimaryOperationTracker().getStatsTracker().getCurrentBytes());
assertEquals(0, primaryShardTracker.getReplicaOperationTracker().getStatsTracker().getCurrentBytes());
assertEquals(0, replicaShardTracker.getCommonOperationTracker().getCurrentCombinedCoordinatingAndPrimaryBytes());
assertEquals(0, replicaShardTracker.getCoordinatingOperationTracker().getStatsTracker().getCurrentBytes());
assertEquals(0, replicaShardTracker.getReplicaOperationTracker().getStatsTracker().getCurrentBytes());
assertEquals(0, replicaShardTracker.getReplicaOperationTracker().getStatsTracker().getCurrentBytes());
assertEquals(0, coordinatingShardTracker.getCommonOperationTracker().getCurrentCombinedCoordinatingAndPrimaryBytes());
assertEquals(0, coordinatingShardTracker.getCoordinatingOperationTracker().getStatsTracker().getCurrentBytes());
assertEquals(0, coordinatingShardTracker.getPrimaryOperationTracker().getStatsTracker().getCurrentBytes());
assertEquals(0, coordinatingShardTracker.getReplicaOperationTracker().getStatsTracker().getCurrentBytes());
} finally {
if (replicationSendPointReached.getCount() > 0) {
replicationSendPointReached.countDown();
}
replicaRelease.close();
if (latchBlockingReplicationSend.getCount() > 0) {
latchBlockingReplicationSend.countDown();
}
replicaRelease.close();
primaryTransportService.clearAllRules();
}
}
use of org.opensearch.transport.TransportService in project OpenSearch by opensearch-project.
the class RetentionLeaseIT method testRetentionLeasesSyncOnRecovery.
public void testRetentionLeasesSyncOnRecovery() throws Exception {
final int numberOfReplicas = 2 - scaledRandomIntBetween(0, 2);
internalCluster().ensureAtLeastNumDataNodes(1 + numberOfReplicas);
/*
* We effectively disable the background sync to ensure that the retention leases are not synced in the background so that the only
* source of retention leases on the replicas would be from recovery.
*/
final Settings.Builder settings = Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0).put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true).put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), TimeValue.timeValueHours(24));
// when we increase the number of replicas below we want to exclude the replicas from being allocated so that they do not recover
assertAcked(prepareCreate("index", 1, settings));
ensureYellow("index");
final AcknowledgedResponse response = client().admin().indices().prepareUpdateSettings("index").setSettings(Settings.builder().put("index.number_of_replicas", numberOfReplicas).build()).get();
assertTrue(response.isAcknowledged());
final String primaryShardNodeId = clusterService().state().routingTable().index("index").shard(0).primaryShard().currentNodeId();
final String primaryShardNodeName = clusterService().state().nodes().get(primaryShardNodeId).getName();
final IndexShard primary = internalCluster().getInstance(IndicesService.class, primaryShardNodeName).getShardOrNull(new ShardId(resolveIndex("index"), 0));
final int length = randomIntBetween(1, 8);
final Map<String, RetentionLease> currentRetentionLeases = new LinkedHashMap<>();
logger.info("adding retention [{}}] leases", length);
for (int i = 0; i < length; i++) {
final String id = randomValueOtherThanMany(currentRetentionLeases.keySet()::contains, () -> randomAlphaOfLength(8));
final long retainingSequenceNumber = randomLongBetween(0, Long.MAX_VALUE);
final String source = randomAlphaOfLength(8);
final CountDownLatch latch = new CountDownLatch(1);
final ActionListener<ReplicationResponse> listener = countDownLatchListener(latch);
currentRetentionLeases.put(id, primary.addRetentionLease(id, retainingSequenceNumber, source, listener));
latch.await();
}
logger.info("finished adding [{}] retention leases", length);
// cause some recoveries to fail to ensure that retention leases are handled properly when retrying a recovery
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder().put(INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.getKey(), TimeValue.timeValueMillis(100))));
final Semaphore recoveriesToDisrupt = new Semaphore(scaledRandomIntBetween(0, 4));
final MockTransportService primaryTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, primaryShardNodeName);
primaryTransportService.addSendBehavior((connection, requestId, action, request, options) -> {
if (action.equals(PeerRecoveryTargetService.Actions.FINALIZE) && recoveriesToDisrupt.tryAcquire()) {
if (randomBoolean()) {
// return a ConnectTransportException to the START_RECOVERY action
final TransportService replicaTransportService = internalCluster().getInstance(TransportService.class, connection.getNode().getName());
final DiscoveryNode primaryNode = primaryTransportService.getLocalNode();
replicaTransportService.disconnectFromNode(primaryNode);
replicaTransportService.connectToNode(primaryNode);
} else {
// return an exception to the FINALIZE action
throw new OpenSearchException("failing recovery for test purposes");
}
}
connection.sendRequest(requestId, action, request, options);
});
logger.info("allow [{}] replicas to allocate", numberOfReplicas);
// now allow the replicas to be allocated and wait for recovery to finalize
allowNodes("index", 1 + numberOfReplicas);
ensureGreen("index");
// check current retention leases have been synced to all replicas
for (final ShardRouting replicaShard : clusterService().state().routingTable().index("index").shard(0).replicaShards()) {
final String replicaShardNodeId = replicaShard.currentNodeId();
final String replicaShardNodeName = clusterService().state().nodes().get(replicaShardNodeId).getName();
final IndexShard replica = internalCluster().getInstance(IndicesService.class, replicaShardNodeName).getShardOrNull(new ShardId(resolveIndex("index"), 0));
final Map<String, RetentionLease> retentionLeasesOnReplica = RetentionLeaseUtils.toMapExcludingPeerRecoveryRetentionLeases(replica.getRetentionLeases());
assertThat(retentionLeasesOnReplica, equalTo(currentRetentionLeases));
// check retention leases have been written on the replica; see RecoveryTarget#finalizeRecovery
assertThat(currentRetentionLeases, equalTo(RetentionLeaseUtils.toMapExcludingPeerRecoveryRetentionLeases(replica.loadRetentionLeases())));
}
}
Aggregations