use of io.prestosql.operator.exchange.LocalExchangeSourceOperator.LocalExchangeSourceOperatorFactory in project hetu-core by openlookeng.
the class TestHashJoinOperator method setupBuildSide.
private BuildSideSetup setupBuildSide(boolean parallelBuild, TaskContext taskContext, List<Integer> hashChannels, RowPagesBuilder buildPages, Optional<InternalJoinFilterFunction> filterFunction, boolean spillEnabled, SingleStreamSpillerFactory singleStreamSpillerFactory, boolean outer) {
Optional<JoinFilterFunctionFactory> filterFunctionFactory = filterFunction.map(function -> (session, addresses, pages) -> new StandardJoinFilterFunction(function, addresses, pages));
int partitionCount = parallelBuild ? PARTITION_COUNT : 1;
LocalExchangeFactory localExchangeFactory = new LocalExchangeFactory(FIXED_HASH_DISTRIBUTION, partitionCount, buildPages.getTypes(), hashChannels, buildPages.getHashChannel(), UNGROUPED_EXECUTION, new DataSize(32, DataSize.Unit.MEGABYTE));
LocalExchangeSinkFactoryId localExchangeSinkFactoryId = localExchangeFactory.newSinkFactoryId();
localExchangeFactory.noMoreSinkFactories();
// collect input data into the partitioned exchange
DriverContext collectDriverContext = taskContext.addPipelineContext(0, true, true, false).addDriverContext();
ValuesOperatorFactory valuesOperatorFactory = new ValuesOperatorFactory(0, new PlanNodeId("values"), buildPages.build());
LocalExchangeSinkOperatorFactory sinkOperatorFactory = new LocalExchangeSinkOperatorFactory(localExchangeFactory, 1, new PlanNodeId("sink"), localExchangeSinkFactoryId, Function.identity());
Driver sourceDriver = Driver.createDriver(collectDriverContext, valuesOperatorFactory.createOperator(collectDriverContext), sinkOperatorFactory.createOperator(collectDriverContext));
valuesOperatorFactory.noMoreOperators();
sinkOperatorFactory.noMoreOperators();
while (!sourceDriver.isFinished()) {
sourceDriver.process();
}
// build side operator factories
LocalExchangeSourceOperatorFactory sourceOperatorFactory = new LocalExchangeSourceOperatorFactory(0, new PlanNodeId("source"), localExchangeFactory, 1);
PartitionedLookupSourceFactory lookupSourceFactory = new PartitionedLookupSourceFactory(buildPages.getTypes(), rangeList(buildPages.getTypes().size()).stream().map(buildPages.getTypes()::get).collect(toImmutableList()), hashChannels.stream().map(buildPages.getTypes()::get).collect(toImmutableList()), partitionCount, requireNonNull(ImmutableMap.of(), "layout is null"), outer, false);
JoinBridgeManager<PartitionedLookupSourceFactory> lookupSourceFactoryManager = new JoinBridgeManager<>(outer, UNGROUPED_EXECUTION, UNGROUPED_EXECUTION, ignored -> lookupSourceFactory, lookupSourceFactory.getOutputTypes());
HashBuilderOperatorFactory buildOperatorFactory = new HashBuilderOperatorFactory(1, new PlanNodeId("build"), lookupSourceFactoryManager, rangeList(buildPages.getTypes().size()), hashChannels, buildPages.getHashChannel().map(OptionalInt::of).orElse(OptionalInt.empty()), filterFunctionFactory, Optional.empty(), ImmutableList.of(), 100, new PagesIndex.TestingFactory(false), spillEnabled, singleStreamSpillerFactory);
return new BuildSideSetup(lookupSourceFactoryManager, buildOperatorFactory, sourceOperatorFactory, partitionCount);
}
Aggregations