use of io.crate.data.BatchConsumer in project crate by crate.
the class MultiConsumerTest method testFirstAcceptNullIteratorDoesNotCauseNPE.
@Test
public void testFirstAcceptNullIteratorDoesNotCauseNPE() throws Exception {
TestingBatchConsumer batchConsumer = new TestingBatchConsumer();
BatchConsumer consumer = new CompositeCollector.MultiConsumer(2, batchConsumer, CompositeBatchIterator::new);
consumer.accept(null, new IllegalStateException("dummy"));
consumer.accept(RowsBatchIterator.empty(), null);
expectedException.expect(IllegalStateException.class);
expectedException.expectMessage("dummy");
batchConsumer.getResult();
}
use of io.crate.data.BatchConsumer in project crate by crate.
the class ShardCollectorProvider method getCollectorBuilder.
/**
* Create a CrateCollector.Builder to collect rows from a shard.
* <p>
* This also creates all shard-level projectors.
* The RowReceiver that is used for {@link CrateCollector.Builder#build(BatchConsumer)}
* should be the first node-level projector.
*/
public CrateCollector.Builder getCollectorBuilder(RoutedCollectPhase collectPhase, boolean requiresScroll, JobCollectContext jobCollectContext) throws Exception {
assert collectPhase.orderBy() == null : "getDocCollector shouldn't be called if there is an orderBy on the collectPhase";
RoutedCollectPhase normalizedCollectNode = collectPhase.normalize(shardNormalizer, null);
final CrateCollector.Builder builder;
if (normalizedCollectNode.whereClause().noMatch()) {
builder = RowsCollector.emptyBuilder();
} else {
assert normalizedCollectNode.maxRowGranularity() == RowGranularity.DOC : "granularity must be DOC";
builder = getBuilder(normalizedCollectNode, requiresScroll, jobCollectContext);
}
Collection<? extends Projection> shardProjections = Projections.shardProjections(collectPhase.projections());
if (shardProjections.isEmpty()) {
return builder;
} else {
return new CrateCollector.Builder() {
@Override
public CrateCollector build(BatchConsumer batchConsumer) {
return builder.build(batchConsumer);
}
@Override
public BatchConsumer applyProjections(BatchConsumer consumer) {
return ProjectingBatchConsumer.create(consumer, shardProjections, normalizedCollectNode.jobId(), jobCollectContext.queryPhaseRamAccountingContext(), projectorFactory);
}
};
}
}
use of io.crate.data.BatchConsumer in project crate by crate.
the class DistributingDownstreamFactoryTest method testCreateDownstreamOneNode.
@Test
public void testCreateDownstreamOneNode() throws Exception {
BatchConsumer downstream = createDownstream(ImmutableSet.of("downstream_node"));
assertThat(downstream, instanceOf(DistributingConsumer.class));
assertThat(((DistributingConsumer) downstream).multiBucketBuilder, instanceOf(BroadcastingBucketBuilder.class));
}
use of io.crate.data.BatchConsumer in project crate by crate.
the class ExecutionPhasesTask method execute.
@Override
public void execute(BatchConsumer consumer, Row parameters) {
assert nodeOperationTrees.size() == 1 : "must only have 1 NodeOperationTree for non-bulk operations";
NodeOperationTree nodeOperationTree = nodeOperationTrees.get(0);
Map<String, Collection<NodeOperation>> operationByServer = NodeOperationGrouper.groupByServer(nodeOperationTree.nodeOperations());
List<ExecutionPhase> handlerPhases = Collections.singletonList(nodeOperationTree.leaf());
List<BatchConsumer> handlerConsumers = Collections.singletonList(consumer);
try {
setupContext(operationByServer, handlerPhases, handlerConsumers);
} catch (Throwable throwable) {
consumer.accept(null, throwable);
}
}
use of io.crate.data.BatchConsumer in project crate by crate.
the class ExecutionPhasesTask method executeBulk.
@Override
public List<CompletableFuture<Long>> executeBulk() {
FluentIterable<NodeOperation> nodeOperations = FluentIterable.from(nodeOperationTrees).transformAndConcat(new Function<NodeOperationTree, Iterable<? extends NodeOperation>>() {
@Nullable
@Override
public Iterable<? extends NodeOperation> apply(NodeOperationTree input) {
return input.nodeOperations();
}
});
Map<String, Collection<NodeOperation>> operationByServer = NodeOperationGrouper.groupByServer(nodeOperations);
List<ExecutionPhase> handlerPhases = new ArrayList<>(nodeOperationTrees.size());
List<BatchConsumer> handlerConsumers = new ArrayList<>(nodeOperationTrees.size());
List<CompletableFuture<Long>> results = new ArrayList<>(nodeOperationTrees.size());
for (NodeOperationTree nodeOperationTree : nodeOperationTrees) {
CollectingBatchConsumer<?, Long> consumer = new CollectingBatchConsumer<>(Collectors.collectingAndThen(Collectors.summingLong(r -> ((long) r.get(0))), sum -> sum));
handlerConsumers.add(consumer);
results.add(consumer.resultFuture());
handlerPhases.add(nodeOperationTree.leaf());
}
try {
setupContext(operationByServer, handlerPhases, handlerConsumers);
} catch (Throwable throwable) {
return Collections.singletonList(CompletableFutures.failedFuture(throwable));
}
return results;
}
Aggregations