use of org.elasticsearch.transport.TransportService in project elasticsearch by elastic.
the class IndicesStoreIntegrationIT method testShardCleanupIfShardDeletionAfterRelocationFailedAndIndexDeleted.
/* Test that shard is deleted in case ShardActiveRequest after relocation and next incoming cluster state is an index delete. */
public void testShardCleanupIfShardDeletionAfterRelocationFailedAndIndexDeleted() throws Exception {
final String node_1 = internalCluster().startNode();
logger.info("--> creating index [test] with one shard and on replica");
assertAcked(prepareCreate("test").setSettings(Settings.builder().put(indexSettings()).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)));
ensureGreen("test");
ClusterState state = client().admin().cluster().prepareState().get().getState();
Index index = state.metaData().index("test").getIndex();
assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(true));
assertThat(Files.exists(indexDirectory(node_1, index)), equalTo(true));
final String node_2 = internalCluster().startDataOnlyNode(Settings.builder().build());
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut());
assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(true));
assertThat(Files.exists(indexDirectory(node_1, index)), equalTo(true));
assertThat(Files.exists(shardDirectory(node_2, index, 0)), equalTo(false));
assertThat(Files.exists(indexDirectory(node_2, index)), equalTo(false));
// add a transport delegate that will prevent the shard active request to succeed the first time after relocation has finished.
// node_1 will then wait for the next cluster state change before it tries a next attempt to delete the shard.
MockTransportService transportServiceNode_1 = (MockTransportService) internalCluster().getInstance(TransportService.class, node_1);
TransportService transportServiceNode_2 = internalCluster().getInstance(TransportService.class, node_2);
final CountDownLatch shardActiveRequestSent = new CountDownLatch(1);
transportServiceNode_1.addDelegate(transportServiceNode_2, new MockTransportService.DelegateTransport(transportServiceNode_1.original()) {
@Override
protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException {
if (action.equals("internal:index/shard/exists") && shardActiveRequestSent.getCount() > 0) {
shardActiveRequestSent.countDown();
logger.info("prevent shard active request from being sent");
throw new ConnectTransportException(connection.getNode(), "DISCONNECT: simulated");
}
super.sendRequest(connection, requestId, action, request, options);
}
});
logger.info("--> move shard from {} to {}, and wait for relocation to finish", node_1, node_2);
internalCluster().client().admin().cluster().prepareReroute().add(new MoveAllocationCommand("test", 0, node_1, node_2)).get();
shardActiveRequestSent.await();
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForNoRelocatingShards(true).get();
assertThat(clusterHealth.isTimedOut(), equalTo(false));
logClusterState();
// delete the index. node_1 that still waits for the next cluster state update will then get the delete index next.
// it must still delete the shard, even if it cannot find it anymore in indicesservice
client().admin().indices().prepareDelete("test").get();
assertThat(waitForShardDeletion(node_1, index, 0), equalTo(false));
assertThat(waitForIndexDeletion(node_1, index), equalTo(false));
assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(false));
assertThat(Files.exists(indexDirectory(node_1, index)), equalTo(false));
assertThat(waitForShardDeletion(node_2, index, 0), equalTo(false));
assertThat(waitForIndexDeletion(node_2, index), equalTo(false));
assertThat(Files.exists(shardDirectory(node_2, index, 0)), equalTo(false));
assertThat(Files.exists(indexDirectory(node_2, index)), equalTo(false));
}
use of org.elasticsearch.transport.TransportService in project elasticsearch by elastic.
the class TransportInstanceSingleOperationActionTests method setUp.
@Override
@Before
public void setUp() throws Exception {
super.setUp();
transport = new CapturingTransport();
clusterService = createClusterService(THREAD_POOL);
transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null);
transportService.start();
transportService.acceptIncomingRequests();
action = new TestTransportInstanceSingleOperationAction(Settings.EMPTY, "indices:admin/test", transportService, new ActionFilters(new HashSet<ActionFilter>()), new MyResolver(), Request::new);
}
use of org.elasticsearch.transport.TransportService in project elasticsearch by elastic.
the class InternalTestCluster method reset.
private synchronized void reset(boolean wipeData) throws IOException {
// clear all rules for mock transport services
for (NodeAndClient nodeAndClient : nodes.values()) {
TransportService transportService = nodeAndClient.node.injector().getInstance(TransportService.class);
if (transportService instanceof MockTransportService) {
final MockTransportService mockTransportService = (MockTransportService) transportService;
mockTransportService.clearAllRules();
mockTransportService.clearTracers();
}
}
randomlyResetClients();
final int newSize = sharedNodesSeeds.length;
if (nextNodeId.get() == newSize && nodes.size() == newSize) {
if (wipeData) {
wipePendingDataDirectories();
}
if (nodes.size() > 0 && autoManageMinMasterNodes) {
updateMinMasterNodes(getMasterNodesCount());
}
logger.debug("Cluster hasn't changed - moving out - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), newSize);
return;
}
logger.debug("Cluster is NOT consistent - restarting shared nodes - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), newSize);
// trash all nodes with id >= sharedNodesSeeds.length - they are non shared
final List<NodeAndClient> toClose = new ArrayList<>();
for (Iterator<NodeAndClient> iterator = nodes.values().iterator(); iterator.hasNext(); ) {
NodeAndClient nodeAndClient = iterator.next();
if (nodeAndClient.nodeAndClientId() >= sharedNodesSeeds.length) {
logger.debug("Close Node [{}] not shared", nodeAndClient.name);
toClose.add(nodeAndClient);
}
}
stopNodesAndClients(toClose);
// clean up what the nodes left that is unused
if (wipeData) {
wipePendingDataDirectories();
}
// start any missing node
assert newSize == numSharedDedicatedMasterNodes + numSharedDataNodes + numSharedCoordOnlyNodes;
final int numberOfMasterNodes = numSharedDedicatedMasterNodes > 0 ? numSharedDedicatedMasterNodes : numSharedDataNodes;
final int defaultMinMasterNodes = (numberOfMasterNodes / 2) + 1;
// we want to start nodes in one go due to min master nodes
final List<NodeAndClient> toStartAndPublish = new ArrayList<>();
for (int i = 0; i < numSharedDedicatedMasterNodes; i++) {
final Settings.Builder settings = Settings.builder();
settings.put(Node.NODE_MASTER_SETTING.getKey(), true);
settings.put(Node.NODE_DATA_SETTING.getKey(), false);
NodeAndClient nodeAndClient = buildNode(i, sharedNodesSeeds[i], settings.build(), true, defaultMinMasterNodes);
toStartAndPublish.add(nodeAndClient);
}
for (int i = numSharedDedicatedMasterNodes; i < numSharedDedicatedMasterNodes + numSharedDataNodes; i++) {
final Settings.Builder settings = Settings.builder();
if (numSharedDedicatedMasterNodes > 0) {
// if we don't have dedicated master nodes, keep things default
settings.put(Node.NODE_MASTER_SETTING.getKey(), false).build();
settings.put(Node.NODE_DATA_SETTING.getKey(), true).build();
}
NodeAndClient nodeAndClient = buildNode(i, sharedNodesSeeds[i], settings.build(), true, defaultMinMasterNodes);
toStartAndPublish.add(nodeAndClient);
}
for (int i = numSharedDedicatedMasterNodes + numSharedDataNodes; i < numSharedDedicatedMasterNodes + numSharedDataNodes + numSharedCoordOnlyNodes; i++) {
final Builder settings = Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false).put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_INGEST_SETTING.getKey(), false);
NodeAndClient nodeAndClient = buildNode(i, sharedNodesSeeds[i], settings.build(), true, defaultMinMasterNodes);
toStartAndPublish.add(nodeAndClient);
}
startAndPublishNodesAndClients(toStartAndPublish);
nextNodeId.set(newSize);
assert size() == newSize;
if (newSize > 0) {
validateClusterFormed();
}
logger.debug("Cluster is consistent again - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), newSize);
}
use of org.elasticsearch.transport.TransportService in project elasticsearch by elastic.
the class ClusterStateHealthTests method setUp.
@Override
@Before
public void setUp() throws Exception {
super.setUp();
clusterService = createClusterService(threadPool);
transportService = new TransportService(clusterService.getSettings(), new CapturingTransport(), threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null);
transportService.start();
transportService.acceptIncomingRequests();
}
use of org.elasticsearch.transport.TransportService in project elasticsearch by elastic.
the class DynamicMappingDisabledTests method setUp.
@Override
public void setUp() throws Exception {
super.setUp();
Settings settings = Settings.builder().put(MapperService.INDEX_MAPPER_DYNAMIC_SETTING.getKey(), false).build();
clusterService = createClusterService(threadPool);
Transport transport = new MockTcpTransport(settings, threadPool, BigArrays.NON_RECYCLING_INSTANCE, new NoneCircuitBreakerService(), new NamedWriteableRegistry(Collections.emptyList()), new NetworkService(settings, Collections.emptyList()));
transportService = new TransportService(clusterService.getSettings(), transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null);
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
ShardStateAction shardStateAction = new ShardStateAction(settings, clusterService, transportService, null, null, threadPool);
ActionFilters actionFilters = new ActionFilters(Collections.emptySet());
IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(settings);
AutoCreateIndex autoCreateIndex = new AutoCreateIndex(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), indexNameExpressionResolver);
UpdateHelper updateHelper = new UpdateHelper(settings, null);
TransportShardBulkAction shardBulkAction = new TransportShardBulkAction(settings, transportService, clusterService, indicesService, threadPool, shardStateAction, null, updateHelper, actionFilters, indexNameExpressionResolver);
transportBulkAction = new TransportBulkAction(settings, threadPool, transportService, clusterService, null, shardBulkAction, null, actionFilters, indexNameExpressionResolver, autoCreateIndex, System::currentTimeMillis);
}
Aggregations