use of org.elasticsearch.test.transport.MockTransportService in project elasticsearch by elastic.
the class DiscoveryWithServiceDisruptionsIT method testClusterJoinDespiteOfPublishingIssues.
/**
* Test cluster join with issues in cluster state publishing *
*/
public void testClusterJoinDespiteOfPublishingIssues() throws Exception {
List<String> nodes = startCluster(2, 1);
String masterNode = internalCluster().getMasterName();
String nonMasterNode;
if (masterNode.equals(nodes.get(0))) {
nonMasterNode = nodes.get(1);
} else {
nonMasterNode = nodes.get(0);
}
DiscoveryNodes discoveryNodes = internalCluster().getInstance(ClusterService.class, nonMasterNode).state().nodes();
TransportService masterTranspotService = internalCluster().getInstance(TransportService.class, discoveryNodes.getMasterNode().getName());
logger.info("blocking requests from non master [{}] to master [{}]", nonMasterNode, masterNode);
MockTransportService nonMasterTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, nonMasterNode);
nonMasterTransportService.addFailToSendNoConnectRule(masterTranspotService);
assertNoMaster(nonMasterNode);
logger.info("blocking cluster state publishing from master [{}] to non master [{}]", masterNode, nonMasterNode);
MockTransportService masterTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, masterNode);
TransportService localTransportService = internalCluster().getInstance(TransportService.class, discoveryNodes.getLocalNode().getName());
if (randomBoolean()) {
masterTransportService.addFailToSendNoConnectRule(localTransportService, PublishClusterStateAction.SEND_ACTION_NAME);
} else {
masterTransportService.addFailToSendNoConnectRule(localTransportService, PublishClusterStateAction.COMMIT_ACTION_NAME);
}
logger.info("allowing requests from non master [{}] to master [{}], waiting for two join request", nonMasterNode, masterNode);
final CountDownLatch countDownLatch = new CountDownLatch(2);
nonMasterTransportService.addDelegate(masterTranspotService, new MockTransportService.DelegateTransport(nonMasterTransportService.original()) {
@Override
protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException {
if (action.equals(MembershipAction.DISCOVERY_JOIN_ACTION_NAME)) {
countDownLatch.countDown();
}
super.sendRequest(connection, requestId, action, request, options);
}
@Override
public Connection openConnection(DiscoveryNode node, ConnectionProfile profile) throws IOException {
return super.openConnection(node, profile);
}
});
countDownLatch.await();
logger.info("waiting for cluster to reform");
masterTransportService.clearRule(localTransportService);
nonMasterTransportService.clearRule(localTransportService);
ensureStableCluster(2);
// shutting down the nodes, to avoid the leakage check tripping
// on the states associated with the commit requests we may have dropped
internalCluster().stopRandomNonMasterNode();
}
use of org.elasticsearch.test.transport.MockTransportService in project elasticsearch by elastic.
the class IndicesStoreIntegrationIT method testShardCleanupIfShardDeletionAfterRelocationFailedAndIndexDeleted.
/* Test that shard is deleted in case ShardActiveRequest after relocation and next incoming cluster state is an index delete. */
public void testShardCleanupIfShardDeletionAfterRelocationFailedAndIndexDeleted() throws Exception {
final String node_1 = internalCluster().startNode();
logger.info("--> creating index [test] with one shard and on replica");
assertAcked(prepareCreate("test").setSettings(Settings.builder().put(indexSettings()).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)));
ensureGreen("test");
ClusterState state = client().admin().cluster().prepareState().get().getState();
Index index = state.metaData().index("test").getIndex();
assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(true));
assertThat(Files.exists(indexDirectory(node_1, index)), equalTo(true));
final String node_2 = internalCluster().startDataOnlyNode(Settings.builder().build());
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut());
assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(true));
assertThat(Files.exists(indexDirectory(node_1, index)), equalTo(true));
assertThat(Files.exists(shardDirectory(node_2, index, 0)), equalTo(false));
assertThat(Files.exists(indexDirectory(node_2, index)), equalTo(false));
// add a transport delegate that will prevent the shard active request to succeed the first time after relocation has finished.
// node_1 will then wait for the next cluster state change before it tries a next attempt to delete the shard.
MockTransportService transportServiceNode_1 = (MockTransportService) internalCluster().getInstance(TransportService.class, node_1);
TransportService transportServiceNode_2 = internalCluster().getInstance(TransportService.class, node_2);
final CountDownLatch shardActiveRequestSent = new CountDownLatch(1);
transportServiceNode_1.addDelegate(transportServiceNode_2, new MockTransportService.DelegateTransport(transportServiceNode_1.original()) {
@Override
protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException {
if (action.equals("internal:index/shard/exists") && shardActiveRequestSent.getCount() > 0) {
shardActiveRequestSent.countDown();
logger.info("prevent shard active request from being sent");
throw new ConnectTransportException(connection.getNode(), "DISCONNECT: simulated");
}
super.sendRequest(connection, requestId, action, request, options);
}
});
logger.info("--> move shard from {} to {}, and wait for relocation to finish", node_1, node_2);
internalCluster().client().admin().cluster().prepareReroute().add(new MoveAllocationCommand("test", 0, node_1, node_2)).get();
shardActiveRequestSent.await();
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForNoRelocatingShards(true).get();
assertThat(clusterHealth.isTimedOut(), equalTo(false));
logClusterState();
// delete the index. node_1 that still waits for the next cluster state update will then get the delete index next.
// it must still delete the shard, even if it cannot find it anymore in indicesservice
client().admin().indices().prepareDelete("test").get();
assertThat(waitForShardDeletion(node_1, index, 0), equalTo(false));
assertThat(waitForIndexDeletion(node_1, index), equalTo(false));
assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(false));
assertThat(Files.exists(indexDirectory(node_1, index)), equalTo(false));
assertThat(waitForShardDeletion(node_2, index, 0), equalTo(false));
assertThat(waitForIndexDeletion(node_2, index), equalTo(false));
assertThat(Files.exists(shardDirectory(node_2, index, 0)), equalTo(false));
assertThat(Files.exists(indexDirectory(node_2, index)), equalTo(false));
}
Aggregations