use of org.elasticsearch.index.Index in project elasticsearch by elastic.
the class DfsQueryPhaseTests method testFailPhaseOnException.
public void testFailPhaseOnException() throws IOException {
AtomicArray<DfsSearchResult> results = new AtomicArray<>(2);
AtomicReference<AtomicArray<QuerySearchResultProvider>> responseRef = new AtomicReference<>();
results.set(0, new DfsSearchResult(1, new SearchShardTarget("node1", new Index("test", "na"), 0)));
results.set(1, new DfsSearchResult(2, new SearchShardTarget("node2", new Index("test", "na"), 0)));
results.get(0).termsStatistics(new Term[0], new TermStatistics[0]);
results.get(1).termsStatistics(new Term[0], new TermStatistics[0]);
SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY, BigArrays.NON_RECYCLING_INSTANCE, null);
SearchTransportService searchTransportService = new SearchTransportService(Settings.builder().put("search.remote.connect", false).build(), null, null) {
@Override
public void sendExecuteQuery(Transport.Connection connection, QuerySearchRequest request, SearchTask task, ActionListener<QuerySearchResult> listener) {
if (request.id() == 1) {
QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new Index("test", "na"), 0));
queryResult.topDocs(new TopDocs(1, new ScoreDoc[] { new ScoreDoc(42, 1.0F) }, 2.0F), new DocValueFormat[0]);
// the size of the result set
queryResult.size(2);
listener.onResponse(queryResult);
} else if (request.id() == 2) {
throw new UncheckedIOException(new MockDirectoryWrapper.FakeIOException());
} else {
fail("no such request ID: " + request.id());
}
}
};
MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(2);
mockSearchPhaseContext.searchTransport = searchTransportService;
DfsQueryPhase phase = new DfsQueryPhase(results, controller, (response) -> new SearchPhase("test") {
@Override
public void run() throws IOException {
responseRef.set(response.results);
}
}, mockSearchPhaseContext);
assertEquals("dfs_query", phase.getName());
expectThrows(UncheckedIOException.class, () -> phase.run());
// phase execution will clean up on the contexts
assertTrue(mockSearchPhaseContext.releasedSearchContexts.isEmpty());
}
use of org.elasticsearch.index.Index in project elasticsearch by elastic.
the class ShardSearchFailureTests method testToXContent.
public void testToXContent() throws IOException {
ShardSearchFailure failure = new ShardSearchFailure(new ParsingException(0, 0, "some message", null), new SearchShardTarget("nodeId", new ShardId(new Index("indexName", "indexUuid"), 123)));
BytesReference xContent = toXContent(failure, XContentType.JSON, randomBoolean());
assertEquals("{\"shard\":123," + "\"index\":\"indexName\"," + "\"node\":\"nodeId\"," + "\"reason\":{" + "\"type\":\"parsing_exception\"," + "\"reason\":\"some message\"," + "\"line\":0," + "\"col\":0" + "}" + "}", xContent.utf8ToString());
}
use of org.elasticsearch.index.Index in project elasticsearch by elastic.
the class ShardFailedClusterStateTaskExecutorTests method testIllegalShardFailureRequests.
public void testIllegalShardFailureRequests() throws Exception {
String reason = "test illegal shard failure requests";
ClusterState currentState = createClusterStateWithStartedShards(reason);
List<ShardStateAction.ShardEntry> failingTasks = createExistingShards(currentState, reason);
List<ShardStateAction.ShardEntry> tasks = new ArrayList<>();
for (ShardStateAction.ShardEntry failingTask : failingTasks) {
long primaryTerm = currentState.metaData().index(failingTask.shardId.getIndex()).primaryTerm(failingTask.shardId.id());
tasks.add(new ShardStateAction.ShardEntry(failingTask.shardId, failingTask.allocationId, randomIntBetween(1, (int) primaryTerm - 1), failingTask.message, failingTask.failure));
}
Map<ShardStateAction.ShardEntry, ClusterStateTaskExecutor.TaskResult> taskResultMap = tasks.stream().collect(Collectors.toMap(Function.identity(), task -> ClusterStateTaskExecutor.TaskResult.failure(new ShardStateAction.NoLongerPrimaryShardException(task.shardId, "primary term [" + task.primaryTerm + "] did not match current primary term [" + currentState.metaData().index(task.shardId.getIndex()).primaryTerm(task.shardId.id()) + "]"))));
ClusterStateTaskExecutor.ClusterTasksResult<ShardStateAction.ShardEntry> result = executor.execute(currentState, tasks);
assertTaskResults(taskResultMap, result, currentState, false);
}
use of org.elasticsearch.index.Index in project elasticsearch by elastic.
the class ShardFailedClusterStateTaskExecutorTests method createNonExistentShards.
private List<ShardStateAction.ShardEntry> createNonExistentShards(ClusterState currentState, String reason) {
// add shards from a non-existent index
String nonExistentIndexUUID = "non-existent";
Index index = new Index("non-existent", nonExistentIndexUUID);
List<String> nodeIds = new ArrayList<>();
for (ObjectCursor<String> nodeId : currentState.nodes().getNodes().keys()) {
nodeIds.add(nodeId.toString());
}
List<ShardRouting> nonExistentShards = new ArrayList<>();
nonExistentShards.add(nonExistentShardRouting(index, nodeIds, true));
for (int i = 0; i < numberOfReplicas; i++) {
nonExistentShards.add(nonExistentShardRouting(index, nodeIds, false));
}
List<ShardStateAction.ShardEntry> existingShards = createExistingShards(currentState, reason);
List<ShardStateAction.ShardEntry> shardsWithMismatchedAllocationIds = new ArrayList<>();
for (ShardStateAction.ShardEntry existingShard : existingShards) {
shardsWithMismatchedAllocationIds.add(new ShardStateAction.ShardEntry(existingShard.shardId, UUIDs.randomBase64UUID(), 0L, existingShard.message, existingShard.failure));
}
List<ShardStateAction.ShardEntry> tasks = new ArrayList<>();
nonExistentShards.forEach(shard -> tasks.add(new ShardStateAction.ShardEntry(shard.shardId(), shard.allocationId().getId(), 0L, reason, new CorruptIndexException("simulated", nonExistentIndexUUID))));
tasks.addAll(shardsWithMismatchedAllocationIds);
return tasks;
}
use of org.elasticsearch.index.Index in project elasticsearch by elastic.
the class ClusterRerouteIT method rerouteWithAllocateLocalGateway.
private void rerouteWithAllocateLocalGateway(Settings commonSettings) throws Exception {
logger.info("--> starting 2 nodes");
String node_1 = internalCluster().startNode(commonSettings);
internalCluster().startNode(commonSettings);
assertThat(cluster().size(), equalTo(2));
ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet();
assertThat(healthResponse.isTimedOut(), equalTo(false));
logger.info("--> create an index with 1 shard, 1 replica, nothing should allocate");
client().admin().indices().prepareCreate("test").setWaitForActiveShards(ActiveShardCount.NONE).setSettings(Settings.builder().put("index.number_of_shards", 1)).execute().actionGet();
ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(2));
logger.info("--> explicitly allocate shard 1, actually allocating, no dry run");
state = client().admin().cluster().prepareReroute().setExplain(randomBoolean()).add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true)).execute().actionGet().getState();
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), equalTo(ShardRoutingState.INITIALIZING));
healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
assertThat(healthResponse.isTimedOut(), equalTo(false));
logger.info("--> get the state, verify shard 1 primary allocated");
state = client().admin().cluster().prepareState().execute().actionGet().getState();
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), equalTo(ShardRoutingState.STARTED));
client().prepareIndex("test", "type", "1").setSource("field", "value").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get();
final Index index = resolveIndex("test");
logger.info("--> closing all nodes");
Path[] shardLocation = internalCluster().getInstance(NodeEnvironment.class, node_1).availableShardPaths(new ShardId(index, 0));
// make sure the data is there!
assertThat(FileSystemUtils.exists(shardLocation), equalTo(true));
// don't wipe data directories the index needs to be there!
internalCluster().closeNonSharedNodes(false);
logger.info("--> deleting the shard data [{}] ", Arrays.toString(shardLocation));
// verify again after cluster was shut down
assertThat(FileSystemUtils.exists(shardLocation), equalTo(true));
IOUtils.rm(shardLocation);
logger.info("--> starting nodes back, will not allocate the shard since it has no data, but the index will be there");
node_1 = internalCluster().startNode(commonSettings);
internalCluster().startNode(commonSettings);
// wait a bit for the cluster to realize that the shard is not there...
// TODO can we get around this? the cluster is RED, so what do we wait for?
client().admin().cluster().prepareReroute().get();
assertThat(client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet().getStatus(), equalTo(ClusterHealthStatus.RED));
logger.info("--> explicitly allocate primary");
state = client().admin().cluster().prepareReroute().setExplain(randomBoolean()).add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true)).execute().actionGet().getState();
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), equalTo(ShardRoutingState.INITIALIZING));
logger.info("--> get the state, verify shard 1 primary allocated");
final String nodeToCheck = node_1;
assertBusy(() -> {
ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
String nodeId = clusterState.nodes().resolveNode(nodeToCheck).getId();
assertThat(clusterState.getRoutingNodes().node(nodeId).iterator().next().state(), equalTo(ShardRoutingState.STARTED));
});
}
Aggregations