use of io.crate.action.job.SharedShardContexts in project crate by crate.
the class FetchContextTest method testSearcherIsAcquiredForShard.
@Test
public void testSearcherIsAcquiredForShard() throws Exception {
Routing routing = new Routing(TreeMapBuilder.<String, Map<String, List<Integer>>>newMapBuilder().put("dummy", TreeMapBuilder.<String, List<Integer>>newMapBuilder().put("i1", ImmutableList.of(1, 2)).map()).map());
IndexBaseVisitor ibv = new IndexBaseVisitor();
routing.walkLocations(ibv);
HashMultimap<TableIdent, String> tableIndices = HashMultimap.create();
tableIndices.put(new TableIdent(null, "i1"), "i1");
final FetchContext context = new FetchContext(new FetchPhase(1, null, ibv.build(), tableIndices, ImmutableList.of(createReference("i1", new ColumnIdent("x"), DataTypes.STRING))), "dummy", new SharedShardContexts(mock(IndicesService.class, RETURNS_MOCKS)), ImmutableList.of(routing));
context.prepare();
assertThat(context.searcher(1), Matchers.notNullValue());
assertThat(context.searcher(2), Matchers.notNullValue());
}
use of io.crate.action.job.SharedShardContexts in project crate by crate.
the class DocLevelCollectTest method collect.
private Bucket collect(RoutedCollectPhase collectNode) throws Throwable {
ContextPreparer contextPreparer = internalCluster().getDataNodeInstance(ContextPreparer.class);
JobContextService contextService = internalCluster().getDataNodeInstance(JobContextService.class);
SharedShardContexts sharedShardContexts = new SharedShardContexts(internalCluster().getDataNodeInstance(IndicesService.class));
JobExecutionContext.Builder builder = contextService.newBuilder(collectNode.jobId());
NodeOperation nodeOperation = NodeOperation.withDownstream(collectNode, mock(ExecutionPhase.class), (byte) 0, "remoteNode");
List<CompletableFuture<Bucket>> results = contextPreparer.prepareOnRemote(ImmutableList.of(nodeOperation), builder, sharedShardContexts);
JobExecutionContext context = contextService.createContext(builder);
context.start();
return results.get(0).get(2, TimeUnit.SECONDS);
}
use of io.crate.action.job.SharedShardContexts in project crate by crate.
the class ShardCollectSource method createMultiShardScoreDocCollector.
private CrateCollector createMultiShardScoreDocCollector(RoutedCollectPhase collectPhase, BatchConsumer consumer, JobCollectContext jobCollectContext, String localNodeId) {
Map<String, Map<String, List<Integer>>> locations = collectPhase.routing().locations();
SharedShardContexts sharedShardContexts = jobCollectContext.sharedShardContexts();
Map<String, List<Integer>> indexShards = locations.get(localNodeId);
List<OrderedDocCollector> orderedDocCollectors = new ArrayList<>();
for (Map.Entry<String, List<Integer>> entry : indexShards.entrySet()) {
String indexName = entry.getKey();
for (Integer shardNum : entry.getValue()) {
ShardId shardId = new ShardId(indexName, shardNum);
SharedShardContext context = sharedShardContexts.getOrCreateContext(shardId);
try {
ShardCollectorProvider shardCollectorProvider = getCollectorProviderSafe(shardId);
orderedDocCollectors.add(shardCollectorProvider.getOrderedCollector(collectPhase, context, jobCollectContext, consumer.requiresScroll()));
} catch (ShardNotFoundException | IllegalIndexShardStateException e) {
throw e;
} catch (IndexNotFoundException e) {
if (PartitionName.isPartition(indexName)) {
break;
}
throw e;
} catch (Throwable t) {
throw new UnhandledServerException(t);
}
}
}
OrderBy orderBy = collectPhase.orderBy();
assert orderBy != null : "orderBy must not be null";
return BatchIteratorCollectorBridge.newInstance(OrderedLuceneBatchIteratorFactory.newInstance(orderedDocCollectors, collectPhase.toCollect().size(), OrderingByPosition.rowOrdering(OrderByPositionVisitor.orderByPositions(orderBy.orderBySymbols(), collectPhase.toCollect()), orderBy.reverseFlags(), orderBy.nullsFirst()), executor, consumer.requiresScroll()), consumer);
}
use of io.crate.action.job.SharedShardContexts in project crate by crate.
the class ExecutionPhasesTask method setupContext.
private void setupContext(Map<String, Collection<NodeOperation>> operationByServer, List<ExecutionPhase> handlerPhases, List<BatchConsumer> handlerConsumers) throws Throwable {
assert handlerPhases.size() == handlerConsumers.size() : "handlerPhases size must match handlerConsumers size";
String localNodeId = clusterService.localNode().getId();
Collection<NodeOperation> localNodeOperations = operationByServer.remove(localNodeId);
if (localNodeOperations == null) {
localNodeOperations = Collections.emptyList();
}
// + 1 for localJobContext which is always created
InitializationTracker initializationTracker = new InitializationTracker(operationByServer.size() + 1);
List<Tuple<ExecutionPhase, BatchConsumer>> handlerPhaseAndReceiver = createHandlerPhaseAndReceivers(handlerPhases, handlerConsumers, initializationTracker);
JobExecutionContext.Builder builder = jobContextService.newBuilder(jobId(), localNodeId, operationByServer.keySet());
List<CompletableFuture<Bucket>> directResponseFutures = contextPreparer.prepareOnHandler(localNodeOperations, builder, handlerPhaseAndReceiver, new SharedShardContexts(indicesService));
JobExecutionContext localJobContext = jobContextService.createContext(builder);
List<PageBucketReceiver> pageBucketReceivers = getHandlerBucketReceivers(localJobContext, handlerPhaseAndReceiver);
int bucketIdx = 0;
/*
* If you touch anything here make sure the following tests pass with > 1k iterations:
*
* Seed: 112E1807417E925A - testInvalidPatternSyntax
* Seed: Any - testRegularSelectWithFewAvailableThreadsShouldNeverGetStuck
* Seed: CC456FF5004F35D3 - testFailureOfJoinDownstream
*/
if (!localNodeOperations.isEmpty() && !directResponseFutures.isEmpty()) {
CompletableFutures.allAsList(directResponseFutures).whenComplete(new SetBucketCallback(pageBucketReceivers, bucketIdx, initializationTracker));
bucketIdx++;
try {
// initializationTracker for localNodeOperations is triggered via SetBucketCallback
localJobContext.start();
} catch (Throwable t) {
accountFailureForRemoteOperations(operationByServer, initializationTracker, handlerPhaseAndReceiver, t);
return;
}
} else {
try {
localJobContext.start();
initializationTracker.jobInitialized();
} catch (Throwable t) {
initializationTracker.jobInitializationFailed(t);
accountFailureForRemoteOperations(operationByServer, initializationTracker, handlerPhaseAndReceiver, t);
return;
}
}
sendJobRequests(localNodeId, operationByServer, pageBucketReceivers, handlerPhaseAndReceiver, bucketIdx, initializationTracker);
}
use of io.crate.action.job.SharedShardContexts in project crate by crate.
the class FetchContextTest method testGetIndexServiceForInvalidReaderId.
@Test
public void testGetIndexServiceForInvalidReaderId() throws Exception {
final FetchContext context = new FetchContext(new FetchPhase(1, null, new TreeMap<String, Integer>(), HashMultimap.<TableIdent, String>create(), ImmutableList.<Reference>of()), "dummy", new SharedShardContexts(mock(IndicesService.class)), Collections.<Routing>emptyList());
expectedException.expect(IllegalArgumentException.class);
context.indexService(10);
}
Aggregations