use of org.opensearch.transport.TransportService in project OpenSearch by opensearch-project.
the class RetentionLeaseIT method testRetentionLeasesSyncOnRecovery.
public void testRetentionLeasesSyncOnRecovery() throws Exception {
final int numberOfReplicas = 2 - scaledRandomIntBetween(0, 2);
internalCluster().ensureAtLeastNumDataNodes(1 + numberOfReplicas);
/*
* We effectively disable the background sync to ensure that the retention leases are not synced in the background so that the only
* source of retention leases on the replicas would be from recovery.
*/
final Settings.Builder settings = Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0).put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true).put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), TimeValue.timeValueHours(24));
// when we increase the number of replicas below we want to exclude the replicas from being allocated so that they do not recover
assertAcked(prepareCreate("index", 1, settings));
ensureYellow("index");
final AcknowledgedResponse response = client().admin().indices().prepareUpdateSettings("index").setSettings(Settings.builder().put("index.number_of_replicas", numberOfReplicas).build()).get();
assertTrue(response.isAcknowledged());
final String primaryShardNodeId = clusterService().state().routingTable().index("index").shard(0).primaryShard().currentNodeId();
final String primaryShardNodeName = clusterService().state().nodes().get(primaryShardNodeId).getName();
final IndexShard primary = internalCluster().getInstance(IndicesService.class, primaryShardNodeName).getShardOrNull(new ShardId(resolveIndex("index"), 0));
final int length = randomIntBetween(1, 8);
final Map<String, RetentionLease> currentRetentionLeases = new LinkedHashMap<>();
logger.info("adding retention [{}}] leases", length);
for (int i = 0; i < length; i++) {
final String id = randomValueOtherThanMany(currentRetentionLeases.keySet()::contains, () -> randomAlphaOfLength(8));
final long retainingSequenceNumber = randomLongBetween(0, Long.MAX_VALUE);
final String source = randomAlphaOfLength(8);
final CountDownLatch latch = new CountDownLatch(1);
final ActionListener<ReplicationResponse> listener = countDownLatchListener(latch);
currentRetentionLeases.put(id, primary.addRetentionLease(id, retainingSequenceNumber, source, listener));
latch.await();
}
logger.info("finished adding [{}] retention leases", length);
// cause some recoveries to fail to ensure that retention leases are handled properly when retrying a recovery
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder().put(INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.getKey(), TimeValue.timeValueMillis(100))));
final Semaphore recoveriesToDisrupt = new Semaphore(scaledRandomIntBetween(0, 4));
final MockTransportService primaryTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, primaryShardNodeName);
primaryTransportService.addSendBehavior((connection, requestId, action, request, options) -> {
if (action.equals(PeerRecoveryTargetService.Actions.FINALIZE) && recoveriesToDisrupt.tryAcquire()) {
if (randomBoolean()) {
// return a ConnectTransportException to the START_RECOVERY action
final TransportService replicaTransportService = internalCluster().getInstance(TransportService.class, connection.getNode().getName());
final DiscoveryNode primaryNode = primaryTransportService.getLocalNode();
replicaTransportService.disconnectFromNode(primaryNode);
replicaTransportService.connectToNode(primaryNode);
} else {
// return an exception to the FINALIZE action
throw new OpenSearchException("failing recovery for test purposes");
}
}
connection.sendRequest(requestId, action, request, options);
});
logger.info("allow [{}] replicas to allocate", numberOfReplicas);
// now allow the replicas to be allocated and wait for recovery to finalize
allowNodes("index", 1 + numberOfReplicas);
ensureGreen("index");
// check current retention leases have been synced to all replicas
for (final ShardRouting replicaShard : clusterService().state().routingTable().index("index").shard(0).replicaShards()) {
final String replicaShardNodeId = replicaShard.currentNodeId();
final String replicaShardNodeName = clusterService().state().nodes().get(replicaShardNodeId).getName();
final IndexShard replica = internalCluster().getInstance(IndicesService.class, replicaShardNodeName).getShardOrNull(new ShardId(resolveIndex("index"), 0));
final Map<String, RetentionLease> retentionLeasesOnReplica = RetentionLeaseUtils.toMapExcludingPeerRecoveryRetentionLeases(replica.getRetentionLeases());
assertThat(retentionLeasesOnReplica, equalTo(currentRetentionLeases));
// check retention leases have been written on the replica; see RecoveryTarget#finalizeRecovery
assertThat(currentRetentionLeases, equalTo(RetentionLeaseUtils.toMapExcludingPeerRecoveryRetentionLeases(replica.loadRetentionLeases())));
}
}
use of org.opensearch.transport.TransportService in project OpenSearch by opensearch-project.
the class IndexingPressureIT method testWriteBytesAreIncremented.
public void testWriteBytesAreIncremented() throws Exception {
assertAcked(prepareCreate(INDEX_NAME, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)));
ensureGreen(INDEX_NAME);
Tuple<String, String> primaryReplicaNodeNames = getPrimaryReplicaNodeNames();
String primaryName = primaryReplicaNodeNames.v1();
String replicaName = primaryReplicaNodeNames.v2();
String coordinatingOnlyNode = getCoordinatingOnlyNode();
final CountDownLatch replicationSendPointReached = new CountDownLatch(1);
final CountDownLatch latchBlockingReplicationSend = new CountDownLatch(1);
TransportService primaryService = internalCluster().getInstance(TransportService.class, primaryName);
final MockTransportService primaryTransportService = (MockTransportService) primaryService;
TransportService replicaService = internalCluster().getInstance(TransportService.class, replicaName);
final MockTransportService replicaTransportService = (MockTransportService) replicaService;
primaryTransportService.addSendBehavior((connection, requestId, action, request, options) -> {
if (action.equals(TransportShardBulkAction.ACTION_NAME + "[r]")) {
try {
replicationSendPointReached.countDown();
latchBlockingReplicationSend.await();
} catch (InterruptedException e) {
throw new IllegalStateException(e);
}
}
connection.sendRequest(requestId, action, request, options);
});
final ThreadPool replicaThreadPool = replicaTransportService.getThreadPool();
final Releasable replicaRelease = blockReplicas(replicaThreadPool);
final BulkRequest bulkRequest = new BulkRequest();
int totalRequestSize = 0;
for (int i = 0; i < 80; ++i) {
IndexRequest request = new IndexRequest(INDEX_NAME).id(UUIDs.base64UUID()).source(Collections.singletonMap("key", randomAlphaOfLength(50)));
totalRequestSize += request.ramBytesUsed();
assertTrue(request.ramBytesUsed() > request.source().length());
bulkRequest.add(request);
}
final long bulkRequestSize = bulkRequest.ramBytesUsed();
final long bulkShardRequestSize = totalRequestSize;
try {
final ActionFuture<BulkResponse> successFuture = client(coordinatingOnlyNode).bulk(bulkRequest);
replicationSendPointReached.await();
IndexingPressure primaryWriteLimits = internalCluster().getInstance(IndexingPressureService.class, primaryName).getShardIndexingPressure();
IndexingPressure replicaWriteLimits = internalCluster().getInstance(IndexingPressureService.class, replicaName).getShardIndexingPressure();
;
IndexingPressure coordinatingWriteLimits = internalCluster().getInstance(IndexingPressureService.class, coordinatingOnlyNode).getShardIndexingPressure();
;
assertThat(primaryWriteLimits.getCurrentCombinedCoordinatingAndPrimaryBytes(), greaterThan(bulkShardRequestSize));
assertThat(primaryWriteLimits.getCurrentPrimaryBytes(), greaterThan(bulkShardRequestSize));
assertEquals(0, primaryWriteLimits.getCurrentCoordinatingBytes());
assertEquals(0, primaryWriteLimits.getCurrentReplicaBytes());
assertEquals(0, replicaWriteLimits.getCurrentCombinedCoordinatingAndPrimaryBytes());
assertEquals(0, replicaWriteLimits.getCurrentCoordinatingBytes());
assertEquals(0, replicaWriteLimits.getCurrentPrimaryBytes());
assertEquals(0, replicaWriteLimits.getCurrentReplicaBytes());
assertEquals(bulkRequestSize, coordinatingWriteLimits.getCurrentCombinedCoordinatingAndPrimaryBytes());
assertEquals(bulkRequestSize, coordinatingWriteLimits.getCurrentCoordinatingBytes());
assertEquals(0, coordinatingWriteLimits.getCurrentPrimaryBytes());
assertEquals(0, coordinatingWriteLimits.getCurrentReplicaBytes());
latchBlockingReplicationSend.countDown();
IndexRequest request = new IndexRequest(INDEX_NAME).id(UUIDs.base64UUID()).source(Collections.singletonMap("key", randomAlphaOfLength(50)));
final BulkRequest secondBulkRequest = new BulkRequest();
secondBulkRequest.add(request);
// Use the primary or the replica data node as the coordinating node this time
boolean usePrimaryAsCoordinatingNode = randomBoolean();
final ActionFuture<BulkResponse> secondFuture;
if (usePrimaryAsCoordinatingNode) {
secondFuture = client(primaryName).bulk(secondBulkRequest);
} else {
secondFuture = client(replicaName).bulk(secondBulkRequest);
}
final long secondBulkRequestSize = secondBulkRequest.ramBytesUsed();
final long secondBulkShardRequestSize = request.ramBytesUsed();
if (usePrimaryAsCoordinatingNode) {
assertBusy(() -> {
assertThat(primaryWriteLimits.getCurrentCombinedCoordinatingAndPrimaryBytes(), greaterThan(bulkShardRequestSize + secondBulkRequestSize));
assertEquals(secondBulkRequestSize, primaryWriteLimits.getCurrentCoordinatingBytes());
assertThat(primaryWriteLimits.getCurrentPrimaryBytes(), greaterThan(bulkShardRequestSize + secondBulkRequestSize));
assertEquals(0, replicaWriteLimits.getCurrentCombinedCoordinatingAndPrimaryBytes());
assertEquals(0, replicaWriteLimits.getCurrentCoordinatingBytes());
assertEquals(0, replicaWriteLimits.getCurrentPrimaryBytes());
});
} else {
assertThat(primaryWriteLimits.getCurrentCombinedCoordinatingAndPrimaryBytes(), greaterThan(bulkShardRequestSize));
assertEquals(secondBulkRequestSize, replicaWriteLimits.getCurrentCombinedCoordinatingAndPrimaryBytes());
assertEquals(secondBulkRequestSize, replicaWriteLimits.getCurrentCoordinatingBytes());
assertEquals(0, replicaWriteLimits.getCurrentPrimaryBytes());
}
assertEquals(bulkRequestSize, coordinatingWriteLimits.getCurrentCombinedCoordinatingAndPrimaryBytes());
assertBusy(() -> assertThat(replicaWriteLimits.getCurrentReplicaBytes(), greaterThan(bulkShardRequestSize + secondBulkShardRequestSize)));
replicaRelease.close();
successFuture.actionGet();
secondFuture.actionGet();
assertEquals(0, primaryWriteLimits.getCurrentCombinedCoordinatingAndPrimaryBytes());
assertEquals(0, primaryWriteLimits.getCurrentCoordinatingBytes());
assertEquals(0, primaryWriteLimits.getCurrentPrimaryBytes());
assertEquals(0, primaryWriteLimits.getCurrentReplicaBytes());
assertEquals(0, replicaWriteLimits.getCurrentCombinedCoordinatingAndPrimaryBytes());
assertEquals(0, replicaWriteLimits.getCurrentCoordinatingBytes());
assertEquals(0, replicaWriteLimits.getCurrentPrimaryBytes());
assertEquals(0, replicaWriteLimits.getCurrentReplicaBytes());
assertEquals(0, coordinatingWriteLimits.getCurrentCombinedCoordinatingAndPrimaryBytes());
assertEquals(0, coordinatingWriteLimits.getCurrentCoordinatingBytes());
assertEquals(0, coordinatingWriteLimits.getCurrentPrimaryBytes());
assertEquals(0, coordinatingWriteLimits.getCurrentReplicaBytes());
} finally {
if (replicationSendPointReached.getCount() > 0) {
replicationSendPointReached.countDown();
}
replicaRelease.close();
if (latchBlockingReplicationSend.getCount() > 0) {
latchBlockingReplicationSend.countDown();
}
replicaRelease.close();
primaryTransportService.clearAllRules();
}
}
use of org.opensearch.transport.TransportService in project OpenSearch by opensearch-project.
the class ShardIndexingPressureIT method testShardIndexingPressureTrackingDuringBulkWrites.
public void testShardIndexingPressureTrackingDuringBulkWrites() throws Exception {
assertAcked(prepareCreate(INDEX_NAME, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)));
ensureGreen(INDEX_NAME);
Tuple<String, String> primaryReplicaNodeNames = getPrimaryReplicaNodeNames(INDEX_NAME);
String primaryName = primaryReplicaNodeNames.v1();
String replicaName = primaryReplicaNodeNames.v2();
String coordinatingOnlyNode = getCoordinatingOnlyNode();
final CountDownLatch replicationSendPointReached = new CountDownLatch(1);
final CountDownLatch latchBlockingReplicationSend = new CountDownLatch(1);
TransportService primaryService = internalCluster().getInstance(TransportService.class, primaryName);
final MockTransportService primaryTransportService = (MockTransportService) primaryService;
TransportService replicaService = internalCluster().getInstance(TransportService.class, replicaName);
final MockTransportService replicaTransportService = (MockTransportService) replicaService;
primaryTransportService.addSendBehavior((connection, requestId, action, request, options) -> {
if (action.equals(TransportShardBulkAction.ACTION_NAME + "[r]")) {
try {
replicationSendPointReached.countDown();
latchBlockingReplicationSend.await();
} catch (InterruptedException e) {
throw new IllegalStateException(e);
}
}
connection.sendRequest(requestId, action, request, options);
});
final ThreadPool replicaThreadPool = replicaTransportService.getThreadPool();
final Releasable replicaRelease = blockReplicas(replicaThreadPool);
final BulkRequest bulkRequest = new BulkRequest();
int totalRequestSize = 0;
for (int i = 0; i < 80; ++i) {
IndexRequest request = new IndexRequest(INDEX_NAME).id(UUIDs.base64UUID()).source(Collections.singletonMap("key", randomAlphaOfLength(50)));
totalRequestSize += request.ramBytesUsed();
assertTrue(request.ramBytesUsed() > request.source().length());
bulkRequest.add(request);
}
final long bulkShardRequestSize = totalRequestSize + (RamUsageEstimator.shallowSizeOfInstance(BulkItemRequest.class) * 80) + RamUsageEstimator.shallowSizeOfInstance(BulkShardRequest.class);
try {
final ActionFuture<BulkResponse> successFuture = client(coordinatingOnlyNode).bulk(bulkRequest);
replicationSendPointReached.await();
IndexService indexService = internalCluster().getInstance(IndicesService.class, primaryName).iterator().next();
Index index = indexService.getIndexSettings().getIndex();
ShardId shardId = new ShardId(index, 0);
ShardIndexingPressureTracker primaryShardTracker = internalCluster().getInstance(IndexingPressureService.class, primaryName).getShardIndexingPressure().getShardIndexingPressureTracker(shardId);
ShardIndexingPressureTracker replicaShardTracker = internalCluster().getInstance(IndexingPressureService.class, replicaName).getShardIndexingPressure().getShardIndexingPressureTracker(shardId);
ShardIndexingPressureTracker coordinatingShardTracker = internalCluster().getInstance(IndexingPressureService.class, coordinatingOnlyNode).getShardIndexingPressure().getShardIndexingPressureTracker(shardId);
assertThat(primaryShardTracker.getCommonOperationTracker().getCurrentCombinedCoordinatingAndPrimaryBytes(), equalTo(bulkShardRequestSize));
assertThat(primaryShardTracker.getPrimaryOperationTracker().getStatsTracker().getCurrentBytes(), equalTo(bulkShardRequestSize));
assertEquals(0, primaryShardTracker.getCoordinatingOperationTracker().getStatsTracker().getCurrentBytes());
assertEquals(0, primaryShardTracker.getReplicaOperationTracker().getStatsTracker().getCurrentBytes());
assertEquals(0, replicaShardTracker.getCommonOperationTracker().getCurrentCombinedCoordinatingAndPrimaryBytes());
assertEquals(0, replicaShardTracker.getCoordinatingOperationTracker().getStatsTracker().getCurrentBytes());
assertEquals(0, replicaShardTracker.getPrimaryOperationTracker().getStatsTracker().getCurrentBytes());
assertEquals(0, replicaShardTracker.getReplicaOperationTracker().getStatsTracker().getCurrentBytes());
assertEquals(bulkShardRequestSize, coordinatingShardTracker.getCommonOperationTracker().getCurrentCombinedCoordinatingAndPrimaryBytes());
assertEquals(bulkShardRequestSize, coordinatingShardTracker.getCoordinatingOperationTracker().getStatsTracker().getCurrentBytes());
assertEquals(0, coordinatingShardTracker.getPrimaryOperationTracker().getStatsTracker().getCurrentBytes());
assertEquals(0, coordinatingShardTracker.getReplicaOperationTracker().getStatsTracker().getCurrentBytes());
latchBlockingReplicationSend.countDown();
IndexRequest request = new IndexRequest(INDEX_NAME).id(UUIDs.base64UUID()).source(Collections.singletonMap("key", randomAlphaOfLength(50)));
final BulkRequest secondBulkRequest = new BulkRequest();
secondBulkRequest.add(request);
// Use the primary or the replica data node as the coordinating node this time
boolean usePrimaryAsCoordinatingNode = randomBoolean();
final ActionFuture<BulkResponse> secondFuture;
if (usePrimaryAsCoordinatingNode) {
secondFuture = client(primaryName).bulk(secondBulkRequest);
} else {
secondFuture = client(replicaName).bulk(secondBulkRequest);
}
final long secondBulkShardRequestSize = request.ramBytesUsed() + RamUsageEstimator.shallowSizeOfInstance(BulkItemRequest.class) + RamUsageEstimator.shallowSizeOfInstance(BulkShardRequest.class);
if (usePrimaryAsCoordinatingNode) {
assertBusy(() -> {
assertThat(primaryShardTracker.getCommonOperationTracker().getCurrentCombinedCoordinatingAndPrimaryBytes(), equalTo(bulkShardRequestSize + secondBulkShardRequestSize));
assertEquals(secondBulkShardRequestSize, primaryShardTracker.getCoordinatingOperationTracker().getStatsTracker().getCurrentBytes());
assertThat(primaryShardTracker.getPrimaryOperationTracker().getStatsTracker().getCurrentBytes(), equalTo(bulkShardRequestSize + secondBulkShardRequestSize));
assertEquals(0, replicaShardTracker.getCommonOperationTracker().getCurrentCombinedCoordinatingAndPrimaryBytes());
assertEquals(0, replicaShardTracker.getCoordinatingOperationTracker().getStatsTracker().getCurrentBytes());
assertEquals(0, replicaShardTracker.getPrimaryOperationTracker().getStatsTracker().getCurrentBytes());
});
} else {
assertThat(primaryShardTracker.getCommonOperationTracker().getCurrentCombinedCoordinatingAndPrimaryBytes(), equalTo(bulkShardRequestSize));
assertEquals(secondBulkShardRequestSize, replicaShardTracker.getCommonOperationTracker().getCurrentCombinedCoordinatingAndPrimaryBytes());
assertEquals(secondBulkShardRequestSize, replicaShardTracker.getCoordinatingOperationTracker().getStatsTracker().getCurrentBytes());
assertEquals(0, replicaShardTracker.getPrimaryOperationTracker().getStatsTracker().getCurrentBytes());
}
assertEquals(bulkShardRequestSize, coordinatingShardTracker.getCommonOperationTracker().getCurrentCombinedCoordinatingAndPrimaryBytes());
assertBusy(() -> assertThat(replicaShardTracker.getReplicaOperationTracker().getStatsTracker().getCurrentBytes(), equalTo(bulkShardRequestSize + secondBulkShardRequestSize)));
replicaRelease.close();
successFuture.actionGet();
secondFuture.actionGet();
assertEquals(0, primaryShardTracker.getCommonOperationTracker().getCurrentCombinedCoordinatingAndPrimaryBytes());
assertEquals(0, primaryShardTracker.getCoordinatingOperationTracker().getStatsTracker().getCurrentBytes());
assertEquals(0, primaryShardTracker.getPrimaryOperationTracker().getStatsTracker().getCurrentBytes());
assertEquals(0, primaryShardTracker.getReplicaOperationTracker().getStatsTracker().getCurrentBytes());
assertEquals(0, replicaShardTracker.getCommonOperationTracker().getCurrentCombinedCoordinatingAndPrimaryBytes());
assertEquals(0, replicaShardTracker.getCoordinatingOperationTracker().getStatsTracker().getCurrentBytes());
assertEquals(0, replicaShardTracker.getReplicaOperationTracker().getStatsTracker().getCurrentBytes());
assertEquals(0, replicaShardTracker.getReplicaOperationTracker().getStatsTracker().getCurrentBytes());
assertEquals(0, coordinatingShardTracker.getCommonOperationTracker().getCurrentCombinedCoordinatingAndPrimaryBytes());
assertEquals(0, coordinatingShardTracker.getCoordinatingOperationTracker().getStatsTracker().getCurrentBytes());
assertEquals(0, coordinatingShardTracker.getPrimaryOperationTracker().getStatsTracker().getCurrentBytes());
assertEquals(0, coordinatingShardTracker.getReplicaOperationTracker().getStatsTracker().getCurrentBytes());
} finally {
if (replicationSendPointReached.getCount() > 0) {
replicationSendPointReached.countDown();
}
replicaRelease.close();
if (latchBlockingReplicationSend.getCount() > 0) {
latchBlockingReplicationSend.countDown();
}
replicaRelease.close();
primaryTransportService.clearAllRules();
}
}
use of org.opensearch.transport.TransportService in project OpenSearch by opensearch-project.
the class ShardIndexingPressureSettingsIT method testShardIndexingPressureRequestSizeWindowSettingUpdate.
public void testShardIndexingPressureRequestSizeWindowSettingUpdate() throws Exception {
final BulkRequest bulkRequest = new BulkRequest();
int totalRequestSize = 0;
for (int i = 0; i < 80; ++i) {
IndexRequest request = new IndexRequest(INDEX_NAME).id(UUIDs.base64UUID()).source(Collections.singletonMap("key", randomAlphaOfLength(50)));
totalRequestSize += request.ramBytesUsed();
assertTrue(request.ramBytesUsed() > request.source().length());
bulkRequest.add(request);
}
final long bulkShardRequestSize = totalRequestSize + (RamUsageEstimator.shallowSizeOfInstance(BulkItemRequest.class) * 80) + RamUsageEstimator.shallowSizeOfInstance(BulkShardRequest.class);
Settings settings = Settings.builder().put(ShardIndexingPressureSettings.SHARD_INDEXING_PRESSURE_ENABLED.getKey(), true).put(ShardIndexingPressureSettings.SHARD_INDEXING_PRESSURE_ENFORCED.getKey(), true).put(IndexingPressure.MAX_INDEXING_BYTES.getKey(), (long) (bulkShardRequestSize * 1.2) + "B").put(ShardIndexingPressureSettings.REQUEST_SIZE_WINDOW.getKey(), 1).put(ShardIndexingPressureMemoryManager.THROUGHPUT_DEGRADATION_LIMITS.getKey(), 1).build();
restartCluster(settings);
assertAcked(prepareCreate(INDEX_NAME, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)));
ensureGreen(INDEX_NAME);
Tuple<String, String> primaryReplicaNodeNames = getPrimaryReplicaNodeNames(INDEX_NAME);
String primaryName = primaryReplicaNodeNames.v1();
String replicaName = primaryReplicaNodeNames.v2();
String coordinatingOnlyNode = getCoordinatingOnlyNode();
IndexService indexService = internalCluster().getInstance(IndicesService.class, primaryName).iterator().next();
Index index = indexService.getIndexSettings().getIndex();
ShardId shardId = new ShardId(index, 0);
boolean randomBoolean = randomBoolean();
// Send first request which gets successful
ActionFuture<BulkResponse> successFuture;
if (randomBoolean) {
successFuture = client(coordinatingOnlyNode).bulk(bulkRequest);
} else {
successFuture = client(primaryName).bulk(bulkRequest);
}
successFuture.actionGet();
final CountDownLatch replicationSendPointReached = new CountDownLatch(1);
final CountDownLatch latchBlockingReplicationSend = new CountDownLatch(1);
TransportService primaryService = internalCluster().getInstance(TransportService.class, primaryName);
final MockTransportService primaryTransportService = (MockTransportService) primaryService;
TransportService replicaService = internalCluster().getInstance(TransportService.class, replicaName);
final MockTransportService replicaTransportService = (MockTransportService) replicaService;
primaryTransportService.addSendBehavior((connection, requestId, action, request, options) -> {
if (action.equals(TransportShardBulkAction.ACTION_NAME + "[r]")) {
try {
replicationSendPointReached.countDown();
latchBlockingReplicationSend.await();
} catch (InterruptedException e) {
throw new IllegalStateException(e);
}
}
connection.sendRequest(requestId, action, request, options);
});
final ThreadPool replicaThreadPool = replicaTransportService.getThreadPool();
final Releasable replicaRelease = blockReplicas(replicaThreadPool);
// Send one more requests which remains outstanding for delayed time
if (randomBoolean) {
successFuture = client(coordinatingOnlyNode).bulk(bulkRequest);
} else {
successFuture = client(primaryName).bulk(bulkRequest);
}
// Delay to breach the success time stamp threshold
Thread.sleep(3000);
latchBlockingReplicationSend.countDown();
replicaRelease.close();
successFuture.actionGet();
// This request breaches the threshold and hence will be rejected
if (randomBoolean) {
expectThrows(OpenSearchRejectedExecutionException.class, () -> client(coordinatingOnlyNode).bulk(bulkRequest).actionGet());
} else {
expectThrows(OpenSearchRejectedExecutionException.class, () -> client(primaryName).bulk(bulkRequest).actionGet());
}
// Update the outstanding threshold setting to see no rejections
ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest();
updateSettingsRequest.persistentSettings(Settings.builder().put(ShardIndexingPressureSettings.REQUEST_SIZE_WINDOW.getKey(), 10));
assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet());
// Verify no rejection with similar request pattern
if (randomBoolean) {
successFuture = client(coordinatingOnlyNode).bulk(bulkRequest);
successFuture.actionGet();
successFuture = client(coordinatingOnlyNode).bulk(bulkRequest);
Thread.sleep(10);
successFuture.actionGet();
client(coordinatingOnlyNode).bulk(bulkRequest).actionGet();
ShardIndexingPressureTracker coordinatingShardTracker = internalCluster().getInstance(IndexingPressureService.class, coordinatingOnlyNode).getShardIndexingPressure().getShardIndexingPressureTracker(shardId);
assertEquals(1, coordinatingShardTracker.getCoordinatingOperationTracker().getRejectionTracker().getTotalRejections());
assertEquals(1, coordinatingShardTracker.getCoordinatingOperationTracker().getRejectionTracker().getThroughputDegradationLimitsBreachedRejections());
} else {
successFuture = client(primaryName).bulk(bulkRequest);
successFuture.actionGet();
successFuture = client(primaryName).bulk(bulkRequest);
Thread.sleep(10);
successFuture.actionGet();
client(primaryName).bulk(bulkRequest).actionGet();
ShardIndexingPressureTracker primaryShardTracker = internalCluster().getInstance(IndexingPressureService.class, primaryName).getShardIndexingPressure().getShardIndexingPressureTracker(shardId);
assertEquals(1, primaryShardTracker.getCoordinatingOperationTracker().getRejectionTracker().getTotalRejections());
assertEquals(1, primaryShardTracker.getCoordinatingOperationTracker().getRejectionTracker().getThroughputDegradationLimitsBreachedRejections());
}
}
use of org.opensearch.transport.TransportService in project OpenSearch by opensearch-project.
the class SingleNodeDiscoveryIT method testCannotJoinNodeWithSingleNodeDiscovery.
public void testCannotJoinNodeWithSingleNodeDiscovery() throws Exception {
Logger clusterLogger = LogManager.getLogger(JoinHelper.class);
try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(clusterLogger)) {
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test", JoinHelper.class.getCanonicalName(), Level.INFO, "failed to join") {
@Override
public boolean innerMatch(final LogEvent event) {
return event.getThrown() != null && event.getThrown().getClass() == RemoteTransportException.class && event.getThrown().getCause() != null && event.getThrown().getCause().getClass() == IllegalStateException.class && event.getThrown().getCause().getMessage().contains("cannot join node with [discovery.type] set to [single-node]");
}
});
final TransportService service = internalCluster().getInstance(TransportService.class);
final int port = service.boundAddress().publishAddress().getPort();
final NodeConfigurationSource configurationSource = new NodeConfigurationSource() {
@Override
public Settings nodeSettings(int nodeOrdinal) {
return Settings.builder().put("discovery.type", "zen").put("transport.type", getTestTransportType()).put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s").put("transport.port", port + "-" + (port + 5 - 1)).build();
}
@Override
public Path nodeConfigPath(int nodeOrdinal) {
return null;
}
};
try (InternalTestCluster other = new InternalTestCluster(randomLong(), createTempDir(), false, false, 1, 1, internalCluster().getClusterName(), configurationSource, 0, "other", Arrays.asList(getTestTransportPlugin(), MockHttpTransport.TestPlugin.class), Function.identity())) {
other.beforeTest(random());
final ClusterState first = internalCluster().getInstance(ClusterService.class).state();
assertThat(first.nodes().getSize(), equalTo(1));
assertBusy(() -> mockAppender.assertAllExpectationsMatched());
}
}
}
Aggregations