use of org.elasticsearch.cluster.service.ClusterService in project elasticsearch by elastic.
the class MetaDataIndexTemplateServiceTests method putTemplateDetail.
private List<Throwable> putTemplateDetail(PutRequest request) throws Exception {
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
ClusterService clusterService = getInstanceFromNode(ClusterService.class);
MetaDataCreateIndexService createIndexService = new MetaDataCreateIndexService(Settings.EMPTY, clusterService, indicesService, null, null, null, null, null, xContentRegistry());
MetaDataIndexTemplateService service = new MetaDataIndexTemplateService(Settings.EMPTY, clusterService, createIndexService, new AliasValidator(Settings.EMPTY), indicesService, new IndexScopedSettings(Settings.EMPTY, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS), xContentRegistry());
final List<Throwable> throwables = new ArrayList<>();
final CountDownLatch latch = new CountDownLatch(1);
service.putTemplate(request, new MetaDataIndexTemplateService.PutListener() {
@Override
public void onResponse(MetaDataIndexTemplateService.PutResponse response) {
latch.countDown();
}
@Override
public void onFailure(Exception e) {
throwables.add(e);
latch.countDown();
}
});
latch.await();
return throwables;
}
use of org.elasticsearch.cluster.service.ClusterService in project elasticsearch by elastic.
the class OperationRoutingTests method testPreferNodes.
public void testPreferNodes() throws InterruptedException, IOException {
TestThreadPool threadPool = null;
ClusterService clusterService = null;
try {
threadPool = new TestThreadPool("testPreferNodes");
clusterService = ClusterServiceUtils.createClusterService(threadPool);
final String indexName = "test";
ClusterServiceUtils.setState(clusterService, ClusterStateCreationUtils.stateWithActivePrimary(indexName, true, randomInt(8)));
final Index index = clusterService.state().metaData().index(indexName).getIndex();
final List<ShardRouting> shards = clusterService.state().getRoutingNodes().assignedShards(new ShardId(index, 0));
final int count = randomIntBetween(1, shards.size());
int position = 0;
final List<String> nodes = new ArrayList<>();
final List<ShardRouting> expected = new ArrayList<>();
for (int i = 0; i < count; i++) {
if (randomBoolean() && !shards.get(position).initializing()) {
nodes.add(shards.get(position).currentNodeId());
expected.add(shards.get(position));
position++;
} else {
nodes.add("missing_" + i);
}
}
final ShardIterator it = new OperationRouting(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)).getShards(clusterService.state(), indexName, 0, "_prefer_nodes:" + String.join(",", nodes));
final List<ShardRouting> all = new ArrayList<>();
ShardRouting shard;
while ((shard = it.nextOrNull()) != null) {
all.add(shard);
}
final Set<ShardRouting> preferred = new HashSet<>();
preferred.addAll(all.subList(0, expected.size()));
// the preferred shards should be at the front of the list
assertThat(preferred, containsInAnyOrder(expected.toArray()));
// verify all the shards are there
assertThat(all.size(), equalTo(shards.size()));
} finally {
IOUtils.close(clusterService);
terminate(threadPool);
}
}
use of org.elasticsearch.cluster.service.ClusterService in project elasticsearch by elastic.
the class RecoveryWhileUnderLoadIT method iterateAssertCount.
private void iterateAssertCount(final int numberOfShards, final int iterations, final Set<String> ids) throws Exception {
final long numberOfDocs = ids.size();
SearchResponse[] iterationResults = new SearchResponse[iterations];
boolean error = false;
for (int i = 0; i < iterations; i++) {
SearchResponse searchResponse = client().prepareSearch().setSize((int) numberOfDocs).setQuery(matchAllQuery()).addSort("id", SortOrder.ASC).get();
logSearchResponse(numberOfShards, numberOfDocs, i, searchResponse);
iterationResults[i] = searchResponse;
if (searchResponse.getHits().getTotalHits() != numberOfDocs) {
error = true;
}
}
if (error) {
//Printing out shards and their doc count
IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats().get();
for (ShardStats shardStats : indicesStatsResponse.getShards()) {
DocsStats docsStats = shardStats.getStats().docs;
logger.info("shard [{}] - count {}, primary {}", shardStats.getShardRouting().id(), docsStats.getCount(), shardStats.getShardRouting().primary());
}
ClusterService clusterService = clusterService();
final ClusterState state = clusterService.state();
for (int shard = 0; shard < numberOfShards; shard++) {
for (String id : ids) {
ShardId docShard = clusterService.operationRouting().shardId(state, "test", id, null);
if (docShard.id() == shard) {
for (ShardRouting shardRouting : state.routingTable().shardRoutingTable("test", shard)) {
GetResponse response = client().prepareGet("test", "type", id).setPreference("_only_nodes:" + shardRouting.currentNodeId()).get();
if (response.isExists()) {
logger.info("missing id [{}] on shard {}", id, shardRouting);
}
}
}
}
}
//if there was an error we try to wait and see if at some point it'll get fixed
logger.info("--> trying to wait");
assertTrue(awaitBusy(() -> {
boolean errorOccurred = false;
for (int i = 0; i < iterations; i++) {
SearchResponse searchResponse = client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get();
if (searchResponse.getHits().getTotalHits() != numberOfDocs) {
errorOccurred = true;
}
}
return !errorOccurred;
}, 5, TimeUnit.MINUTES));
assertEquals(numberOfDocs, ids.size());
}
//lets now make the test fail if it was supposed to fail
for (int i = 0; i < iterations; i++) {
assertHitCount(iterationResults[i], numberOfDocs);
}
}
use of org.elasticsearch.cluster.service.ClusterService in project elasticsearch by elastic.
the class RelocationIT method testCancellationCleansTempFiles.
public void testCancellationCleansTempFiles() throws Exception {
final String indexName = "test";
final String p_node = internalCluster().startNode();
prepareCreate(indexName, Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1, IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)).get();
internalCluster().startNode();
internalCluster().startNode();
List<IndexRequestBuilder> requests = new ArrayList<>();
int numDocs = scaledRandomIntBetween(25, 250);
for (int i = 0; i < numDocs; i++) {
requests.add(client().prepareIndex(indexName, "type").setSource("{}", XContentType.JSON));
}
indexRandom(true, requests);
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("3").setWaitForGreenStatus().get().isTimedOut());
flush();
int allowedFailures = randomIntBetween(3, 10);
logger.info("--> blocking recoveries from primary (allowed failures: [{}])", allowedFailures);
CountDownLatch corruptionCount = new CountDownLatch(allowedFailures);
ClusterService clusterService = internalCluster().getInstance(ClusterService.class, p_node);
MockTransportService mockTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, p_node);
for (DiscoveryNode node : clusterService.state().nodes()) {
if (!node.equals(clusterService.localNode())) {
mockTransportService.addDelegate(internalCluster().getInstance(TransportService.class, node.getName()), new RecoveryCorruption(mockTransportService.original(), corruptionCount));
}
}
client().admin().indices().prepareUpdateSettings(indexName).setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)).get();
corruptionCount.await();
logger.info("--> stopping replica assignment");
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none")));
logger.info("--> wait for all replica shards to be removed, on all nodes");
assertBusy(() -> {
for (String node : internalCluster().getNodeNames()) {
if (node.equals(p_node)) {
continue;
}
ClusterState state = client(node).admin().cluster().prepareState().setLocal(true).get().getState();
assertThat(node + " indicates assigned replicas", state.getRoutingTable().index(indexName).shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1));
}
});
logger.info("--> verifying no temporary recoveries are left");
for (String node : internalCluster().getNodeNames()) {
NodeEnvironment nodeEnvironment = internalCluster().getInstance(NodeEnvironment.class, node);
for (final Path shardLoc : nodeEnvironment.availableShardPaths(new ShardId(indexName, "_na_", 0))) {
if (Files.exists(shardLoc)) {
assertBusy(() -> {
try {
Files.walkFileTree(shardLoc, new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
assertThat("found a temporary recovery file: " + file, file.getFileName().toString(), not(startsWith("recovery.")));
return FileVisitResult.CONTINUE;
}
});
} catch (IOException e) {
throw new AssertionError("failed to walk file tree starting at [" + shardLoc + "]", e);
}
});
}
}
}
}
use of org.elasticsearch.cluster.service.ClusterService in project elasticsearch by elastic.
the class TransportMultiSearchActionTests method testBatchExecute.
public void testBatchExecute() throws Exception {
// Initialize dependencies of TransportMultiSearchAction
Settings settings = Settings.builder().put("node.name", TransportMultiSearchActionTests.class.getSimpleName()).build();
ActionFilters actionFilters = mock(ActionFilters.class);
when(actionFilters.filters()).thenReturn(new ActionFilter[0]);
ThreadPool threadPool = new ThreadPool(settings);
TaskManager taskManager = mock(TaskManager.class);
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), null) {
@Override
public TaskManager getTaskManager() {
return taskManager;
}
};
ClusterService clusterService = mock(ClusterService.class);
when(clusterService.state()).thenReturn(ClusterState.builder(new ClusterName("test")).build());
IndexNameExpressionResolver resolver = new IndexNameExpressionResolver(Settings.EMPTY);
// Keep track of the number of concurrent searches started by multi search api,
// and if there are more searches than is allowed create an error and remember that.
int maxAllowedConcurrentSearches = scaledRandomIntBetween(1, 16);
AtomicInteger counter = new AtomicInteger();
AtomicReference<AssertionError> errorHolder = new AtomicReference<>();
// randomize whether or not requests are executed asynchronously
final List<String> threadPoolNames = Arrays.asList(ThreadPool.Names.GENERIC, ThreadPool.Names.SAME);
Randomness.shuffle(threadPoolNames);
final ExecutorService commonExecutor = threadPool.executor(threadPoolNames.get(0));
final ExecutorService rarelyExecutor = threadPool.executor(threadPoolNames.get(1));
final Set<SearchRequest> requests = Collections.newSetFromMap(Collections.synchronizedMap(new IdentityHashMap<>()));
TransportAction<SearchRequest, SearchResponse> searchAction = new TransportAction<SearchRequest, SearchResponse>(Settings.EMPTY, "action", threadPool, actionFilters, resolver, taskManager) {
@Override
protected void doExecute(SearchRequest request, ActionListener<SearchResponse> listener) {
requests.add(request);
int currentConcurrentSearches = counter.incrementAndGet();
if (currentConcurrentSearches > maxAllowedConcurrentSearches) {
errorHolder.set(new AssertionError("Current concurrent search [" + currentConcurrentSearches + "] is higher than is allowed [" + maxAllowedConcurrentSearches + "]"));
}
final ExecutorService executorService = rarely() ? rarelyExecutor : commonExecutor;
executorService.execute(() -> {
counter.decrementAndGet();
listener.onResponse(new SearchResponse());
});
}
};
TransportMultiSearchAction action = new TransportMultiSearchAction(threadPool, actionFilters, transportService, clusterService, searchAction, resolver, 10);
// Execute the multi search api and fail if we find an error after executing:
try {
/*
* Allow for a large number of search requests in a single batch as previous implementations could stack overflow if the number
* of requests in a single batch was large
*/
int numSearchRequests = scaledRandomIntBetween(1, 8192);
MultiSearchRequest multiSearchRequest = new MultiSearchRequest();
multiSearchRequest.maxConcurrentSearchRequests(maxAllowedConcurrentSearches);
for (int i = 0; i < numSearchRequests; i++) {
multiSearchRequest.add(new SearchRequest());
}
MultiSearchResponse response = action.execute(multiSearchRequest).actionGet();
assertThat(response.getResponses().length, equalTo(numSearchRequests));
assertThat(requests.size(), equalTo(numSearchRequests));
assertThat(errorHolder.get(), nullValue());
} finally {
assertTrue(ESTestCase.terminate(threadPool));
}
}
Aggregations