use of org.opensearch.cluster.routing.GroupShardsIterator in project OpenSearch by opensearch-project.
the class SearchStatsIT method numAssignedShards.
protected int numAssignedShards(String... indices) {
ClusterState state = client().admin().cluster().prepareState().get().getState();
GroupShardsIterator allAssignedShardsGrouped = state.routingTable().allAssignedShardsGrouped(indices, true);
return allAssignedShardsGrouped.size();
}
use of org.opensearch.cluster.routing.GroupShardsIterator in project OpenSearch by opensearch-project.
the class CorruptedFileIT method numShards.
private int numShards(String... index) {
ClusterState state = client().admin().cluster().prepareState().get().getState();
GroupShardsIterator shardIterators = state.getRoutingTable().activePrimaryShardsGrouped(index, false);
return shardIterators.size();
}
use of org.opensearch.cluster.routing.GroupShardsIterator in project OpenSearch by opensearch-project.
the class CorruptedFileIT method corruptRandomPrimaryFile.
private ShardRouting corruptRandomPrimaryFile(final boolean includePerCommitFiles) throws IOException {
ClusterState state = client().admin().cluster().prepareState().get().getState();
Index test = state.metadata().index("test").getIndex();
GroupShardsIterator shardIterators = state.getRoutingTable().activePrimaryShardsGrouped(new String[] { "test" }, false);
List<ShardIterator> iterators = iterableAsArrayList(shardIterators);
ShardIterator shardIterator = RandomPicks.randomFrom(random(), iterators);
ShardRouting shardRouting = shardIterator.nextOrNull();
assertNotNull(shardRouting);
assertTrue(shardRouting.primary());
assertTrue(shardRouting.assignedToNode());
String nodeId = shardRouting.currentNodeId();
NodesStatsResponse nodeStatses = client().admin().cluster().prepareNodesStats(nodeId).addMetric(FS.metricName()).get();
// treeset makes sure iteration order is deterministic
Set<Path> files = new TreeSet<>();
for (FsInfo.Path info : nodeStatses.getNodes().get(0).getFs()) {
String path = info.getPath();
Path file = PathUtils.get(path).resolve("indices").resolve(test.getUUID()).resolve(Integer.toString(shardRouting.getId())).resolve("index");
if (Files.exists(file)) {
// multi data path might only have one path in use
try (Directory dir = FSDirectory.open(file)) {
SegmentInfos segmentCommitInfos = Lucene.readSegmentInfos(dir);
if (includePerCommitFiles) {
files.add(file.resolve(segmentCommitInfos.getSegmentsFileName()));
}
for (SegmentCommitInfo commitInfo : segmentCommitInfos) {
if (commitInfo.getDelCount() + commitInfo.getSoftDelCount() == commitInfo.info.maxDoc()) {
// don't corrupt fully deleted segments - they might be removed on snapshot
continue;
}
for (String commitFile : commitInfo.files()) {
if (includePerCommitFiles || isPerSegmentFile(commitFile)) {
files.add(file.resolve(commitFile));
}
}
}
}
}
}
CorruptionUtils.corruptFile(random(), files.toArray(new Path[0]));
return shardRouting;
}
use of org.opensearch.cluster.routing.GroupShardsIterator in project OpenSearch by opensearch-project.
the class SliceBuilderTests method testToFilterWithRouting.
public void testToFilterWithRouting() throws IOException {
Directory dir = new ByteBuffersDirectory();
try (IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())))) {
writer.commit();
}
ClusterService clusterService = mock(ClusterService.class);
ClusterState state = mock(ClusterState.class);
when(state.metadata()).thenReturn(Metadata.EMPTY_METADATA);
when(clusterService.state()).thenReturn(state);
OperationRouting routing = mock(OperationRouting.class);
GroupShardsIterator<ShardIterator> it = new GroupShardsIterator<>(Collections.singletonList(new PlainShardIterator(new ShardId("index", "index", 1), Collections.emptyList())));
when(routing.searchShards(any(), any(), any(), any())).thenReturn(it);
when(clusterService.operationRouting()).thenReturn(routing);
when(clusterService.getSettings()).thenReturn(Settings.EMPTY);
try (IndexReader reader = DirectoryReader.open(dir)) {
Version version = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT);
QueryShardContext context = createShardContext(version, reader, "field", DocValuesType.SORTED, 5, 0);
SliceBuilder builder = new SliceBuilder("field", 6, 10);
String[] routings = new String[] { "foo" };
Query query = builder.toFilter(clusterService, createRequest(1, routings, null), context, version);
assertEquals(new DocValuesSliceQuery("field", 6, 10), query);
query = builder.toFilter(clusterService, createRequest(1, Strings.EMPTY_ARRAY, "foo"), context, version);
assertEquals(new DocValuesSliceQuery("field", 6, 10), query);
}
}
use of org.opensearch.cluster.routing.GroupShardsIterator in project OpenSearch by opensearch-project.
the class CanMatchPreFilterSearchPhaseTests method testLotsOfShards.
/*
* In cases that a query coordinating node held all the shards for a query, the can match phase would recurse and end in stack overflow
* when subjected to max concurrent search requests. This test is a test for that situation.
*/
public void testLotsOfShards() throws InterruptedException {
final TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider(0, System.nanoTime(), System::nanoTime);
final Map<String, Transport.Connection> lookup = new ConcurrentHashMap<>();
final DiscoveryNode primaryNode = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Version.CURRENT);
final DiscoveryNode replicaNode = new DiscoveryNode("node_2", buildNewFakeTransportAddress(), Version.CURRENT);
lookup.put("node1", new SearchAsyncActionTests.MockConnection(primaryNode));
lookup.put("node2", new SearchAsyncActionTests.MockConnection(replicaNode));
final SearchTransportService searchTransportService = new SearchTransportService(null, null) {
@Override
public void sendCanMatch(Transport.Connection connection, ShardSearchRequest request, SearchTask task, ActionListener<SearchService.CanMatchResponse> listener) {
listener.onResponse(new SearchService.CanMatchResponse(randomBoolean(), null));
}
};
final CountDownLatch latch = new CountDownLatch(1);
final OriginalIndices originalIndices = new OriginalIndices(new String[] { "idx" }, SearchRequest.DEFAULT_INDICES_OPTIONS);
final GroupShardsIterator<SearchShardIterator> shardsIter = SearchAsyncActionTests.getShardsIter("idx", originalIndices, 4096, randomBoolean(), primaryNode, replicaNode);
final ExecutorService executor = Executors.newFixedThreadPool(randomIntBetween(1, Runtime.getRuntime().availableProcessors()));
final SearchRequest searchRequest = new SearchRequest();
searchRequest.allowPartialSearchResults(true);
SearchTransportService transportService = new SearchTransportService(null, null);
ActionListener<SearchResponse> responseListener = ActionListener.wrap(response -> {
}, (e) -> {
throw new AssertionError("unexpected", e);
});
Map<String, AliasFilter> aliasFilters = Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY));
final CanMatchPreFilterSearchPhase canMatchPhase = new CanMatchPreFilterSearchPhase(logger, searchTransportService, (clusterAlias, node) -> lookup.get(node), Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)), Collections.emptyMap(), Collections.emptyMap(), OpenSearchExecutors.newDirectExecutorService(), searchRequest, null, shardsIter, timeProvider, ClusterState.EMPTY_STATE, null, (iter) -> new AbstractSearchAsyncAction<SearchPhaseResult>("test", logger, transportService, (cluster, node) -> {
assert cluster == null : "cluster was not null: " + cluster;
return lookup.get(node);
}, aliasFilters, Collections.emptyMap(), Collections.emptyMap(), executor, searchRequest, responseListener, iter, new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0), ClusterState.EMPTY_STATE, null, new ArraySearchPhaseResults<>(iter.size()), randomIntBetween(1, 32), SearchResponse.Clusters.EMPTY) {
@Override
protected SearchPhase getNextPhase(SearchPhaseResults<SearchPhaseResult> results, SearchPhaseContext context) {
return new SearchPhase("test") {
@Override
public void run() {
latch.countDown();
}
};
}
@Override
protected void executePhaseOnShard(final SearchShardIterator shardIt, final SearchShardTarget shard, final SearchActionListener<SearchPhaseResult> listener) {
if (randomBoolean()) {
listener.onResponse(new SearchPhaseResult() {
});
} else {
listener.onFailure(new Exception("failure"));
}
}
}, SearchResponse.Clusters.EMPTY);
canMatchPhase.start();
latch.await();
executor.shutdown();
}
Aggregations