use of io.trino.operator.DriverContext in project trino by trinodb.
the class TestMemoryPools method setupConsumeRevocableMemory.
private RevocableMemoryOperator setupConsumeRevocableMemory(DataSize reservedPerPage, long numberOfPages) {
AtomicReference<RevocableMemoryOperator> createOperator = new AtomicReference<>();
setUp(() -> {
DriverContext driverContext = taskContext.addPipelineContext(0, false, false, false).addDriverContext();
OperatorContext revokableOperatorContext = driverContext.addOperatorContext(Integer.MAX_VALUE, new PlanNodeId("revokable_operator"), TableScanOperator.class.getSimpleName());
OutputFactory outputFactory = new PageConsumerOutputFactory(types -> (page -> {
}));
Operator outputOperator = outputFactory.createOutputOperator(2, new PlanNodeId("output"), ImmutableList.of(), Function.identity(), new TestingPagesSerdeFactory()).createOperator(driverContext);
RevocableMemoryOperator revocableMemoryOperator = new RevocableMemoryOperator(revokableOperatorContext, reservedPerPage, numberOfPages);
createOperator.set(revocableMemoryOperator);
Driver driver = Driver.createDriver(driverContext, revocableMemoryOperator, outputOperator);
return ImmutableList.of(driver);
});
return createOperator.get();
}
use of io.trino.operator.DriverContext in project trino by trinodb.
the class LocalQueryRunner method createDrivers.
private List<Driver> createDrivers(Session session, Plan plan, OutputFactory outputFactory, TaskContext taskContext) {
if (printPlan) {
System.out.println(PlanPrinter.textLogicalPlan(plan.getRoot(), plan.getTypes(), plannerContext.getMetadata(), plannerContext.getFunctionManager(), plan.getStatsAndCosts(), session, 0, false));
}
SubPlan subplan = createSubPlans(session, plan, true);
if (!subplan.getChildren().isEmpty()) {
throw new AssertionError("Expected subplan to have no children");
}
TableExecuteContextManager tableExecuteContextManager = new TableExecuteContextManager();
tableExecuteContextManager.registerTableExecuteContextForQuery(taskContext.getQueryContext().getQueryId());
LocalExecutionPlanner executionPlanner = new LocalExecutionPlanner(plannerContext, new TypeAnalyzer(plannerContext, statementAnalyzerFactory), Optional.empty(), pageSourceManager, indexManager, nodePartitioningManager, pageSinkManager, null, expressionCompiler, pageFunctionCompiler, joinFilterFunctionCompiler, new IndexJoinLookupStats(), this.taskManagerConfig, spillerFactory, singleStreamSpillerFactory, partitioningSpillerFactory, new PagesIndex.TestingFactory(false), joinCompiler, operatorFactories, new OrderingCompiler(plannerContext.getTypeOperators()), new DynamicFilterConfig(), blockTypeOperators, tableExecuteContextManager, exchangeManagerRegistry);
// plan query
StageExecutionDescriptor stageExecutionDescriptor = subplan.getFragment().getStageExecutionDescriptor();
LocalExecutionPlan localExecutionPlan = executionPlanner.plan(taskContext, stageExecutionDescriptor, subplan.getFragment().getRoot(), subplan.getFragment().getPartitioningScheme().getOutputLayout(), plan.getTypes(), subplan.getFragment().getPartitionedSources(), outputFactory);
// generate splitAssignments
List<SplitAssignment> splitAssignments = new ArrayList<>();
long sequenceId = 0;
for (TableScanNode tableScan : findTableScanNodes(subplan.getFragment().getRoot())) {
TableHandle table = tableScan.getTable();
SplitSource splitSource = splitManager.getSplits(session, table, stageExecutionDescriptor.isScanGroupedExecution(tableScan.getId()) ? GROUPED_SCHEDULING : UNGROUPED_SCHEDULING, EMPTY, alwaysTrue());
ImmutableSet.Builder<ScheduledSplit> scheduledSplits = ImmutableSet.builder();
while (!splitSource.isFinished()) {
for (Split split : getNextBatch(splitSource)) {
scheduledSplits.add(new ScheduledSplit(sequenceId++, tableScan.getId(), split));
}
}
splitAssignments.add(new SplitAssignment(tableScan.getId(), scheduledSplits.build(), true));
}
// create drivers
List<Driver> drivers = new ArrayList<>();
Map<PlanNodeId, DriverFactory> driverFactoriesBySource = new HashMap<>();
for (DriverFactory driverFactory : localExecutionPlan.getDriverFactories()) {
for (int i = 0; i < driverFactory.getDriverInstances().orElse(1); i++) {
if (driverFactory.getSourceId().isPresent()) {
checkState(driverFactoriesBySource.put(driverFactory.getSourceId().get(), driverFactory) == null);
} else {
DriverContext driverContext = taskContext.addPipelineContext(driverFactory.getPipelineId(), driverFactory.isInputDriver(), driverFactory.isOutputDriver(), false).addDriverContext();
Driver driver = driverFactory.createDriver(driverContext);
drivers.add(driver);
}
}
}
// add split assignments to the drivers
ImmutableSet<PlanNodeId> partitionedSources = ImmutableSet.copyOf(subplan.getFragment().getPartitionedSources());
for (SplitAssignment splitAssignment : splitAssignments) {
DriverFactory driverFactory = driverFactoriesBySource.get(splitAssignment.getPlanNodeId());
checkState(driverFactory != null);
boolean partitioned = partitionedSources.contains(driverFactory.getSourceId().get());
for (ScheduledSplit split : splitAssignment.getSplits()) {
DriverContext driverContext = taskContext.addPipelineContext(driverFactory.getPipelineId(), driverFactory.isInputDriver(), driverFactory.isOutputDriver(), partitioned).addDriverContext();
Driver driver = driverFactory.createDriver(driverContext);
driver.updateSplitAssignment(new SplitAssignment(split.getPlanNodeId(), ImmutableSet.of(split), true));
drivers.add(driver);
}
}
for (DriverFactory driverFactory : localExecutionPlan.getDriverFactories()) {
driverFactory.noMoreDrivers();
}
return ImmutableList.copyOf(drivers);
}
use of io.trino.operator.DriverContext in project trino by trinodb.
the class TestHashJoinOperator method testUnwrapsLazyBlocks.
@Test
public void testUnwrapsLazyBlocks() {
TaskContext taskContext = createTaskContext();
DriverContext driverContext = taskContext.addPipelineContext(0, true, true, false).addDriverContext();
InternalJoinFilterFunction filterFunction = new TestInternalJoinFilterFunction(((leftPosition, leftPage, rightPosition, rightPage) -> {
// force loading of probe block
rightPage.getBlock(1).getLoadedBlock();
return true;
}));
RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(BIGINT)).addSequencePage(1, 0);
BuildSideSetup buildSideSetup = setupBuildSide(nodePartitioningManager, true, taskContext, buildPages, Optional.of(filterFunction), false, SINGLE_STREAM_SPILLER_FACTORY);
JoinBridgeManager<PartitionedLookupSourceFactory> lookupSourceFactory = buildSideSetup.getLookupSourceFactoryManager();
RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(BIGINT, BIGINT));
List<Page> probeInput = probePages.addSequencePage(1, 0, 0).build();
probeInput = probeInput.stream().map(page -> new Page(page.getBlock(0), new LazyBlock(1, () -> page.getBlock(1)))).collect(toImmutableList());
OperatorFactory joinOperatorFactory = operatorFactories.innerJoin(0, new PlanNodeId("test"), lookupSourceFactory, false, false, true, probePages.getTypes(), Ints.asList(0), getHashChannelAsInt(probePages), Optional.empty(), OptionalInt.of(1), PARTITIONING_SPILLER_FACTORY, TYPE_OPERATOR_FACTORY);
instantiateBuildDrivers(buildSideSetup, taskContext);
buildLookupSource(executor, buildSideSetup);
Operator operator = joinOperatorFactory.createOperator(driverContext);
assertTrue(operator.needsInput());
operator.addInput(probeInput.get(0));
operator.finish();
Page output = operator.getOutput();
assertFalse(output.getBlock(1) instanceof LazyBlock);
}
use of io.trino.operator.DriverContext in project trino by trinodb.
the class TestHashJoinOperator method testYield.
@Test
public void testYield() {
// create a filter function that yields for every probe match
// verify we will yield #match times totally
TaskContext taskContext = createTaskContext();
DriverContext driverContext = taskContext.addPipelineContext(0, true, true, false).addDriverContext();
// force a yield for every match
AtomicInteger filterFunctionCalls = new AtomicInteger();
InternalJoinFilterFunction filterFunction = new TestInternalJoinFilterFunction(((leftPosition, leftPage, rightPosition, rightPage) -> {
filterFunctionCalls.incrementAndGet();
driverContext.getYieldSignal().forceYieldForTesting();
return true;
}));
// build with 40 entries
int entries = 40;
RowPagesBuilder buildPages = rowPagesBuilder(true, Ints.asList(0), ImmutableList.of(BIGINT)).addSequencePage(entries, 42);
BuildSideSetup buildSideSetup = setupBuildSide(nodePartitioningManager, true, taskContext, buildPages, Optional.of(filterFunction), false, SINGLE_STREAM_SPILLER_FACTORY);
JoinBridgeManager<PartitionedLookupSourceFactory> lookupSourceFactory = buildSideSetup.getLookupSourceFactoryManager();
// probe matching the above 40 entries
RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(BIGINT));
List<Page> probeInput = probePages.addSequencePage(100, 0).build();
OperatorFactory joinOperatorFactory = operatorFactories.innerJoin(0, new PlanNodeId("test"), lookupSourceFactory, false, false, true, probePages.getTypes(), Ints.asList(0), getHashChannelAsInt(probePages), Optional.empty(), OptionalInt.of(1), PARTITIONING_SPILLER_FACTORY, TYPE_OPERATOR_FACTORY);
instantiateBuildDrivers(buildSideSetup, taskContext);
buildLookupSource(executor, buildSideSetup);
Operator operator = joinOperatorFactory.createOperator(driverContext);
assertTrue(operator.needsInput());
operator.addInput(probeInput.get(0));
operator.finish();
// we will yield 40 times due to filterFunction
for (int i = 0; i < entries; i++) {
driverContext.getYieldSignal().setWithDelay(5 * SECONDS.toNanos(1), driverContext.getYieldExecutor());
filterFunctionCalls.set(0);
assertNull(operator.getOutput());
assertEquals(filterFunctionCalls.get(), 1, "Expected join to stop processing (yield) after calling filter function once");
driverContext.getYieldSignal().reset();
}
// delayed yield is not going to prevent operator from producing a page now (yield won't be forced because filter function won't be called anymore)
driverContext.getYieldSignal().setWithDelay(5 * SECONDS.toNanos(1), driverContext.getYieldExecutor());
// expect output page to be produced within few calls to getOutput(), e.g. to facilitate spill
Page output = null;
for (int i = 0; output == null && i < 5; i++) {
output = operator.getOutput();
}
assertNotNull(output);
driverContext.getYieldSignal().reset();
// make sure we have all 4 entries
assertEquals(output.getPositionCount(), entries);
}
use of io.trino.operator.DriverContext in project trino by trinodb.
the class JoinTestUtils method setupBuildSide.
public static BuildSideSetup setupBuildSide(NodePartitioningManager nodePartitioningManager, boolean parallelBuild, TaskContext taskContext, RowPagesBuilder buildPages, Optional<InternalJoinFilterFunction> filterFunction, boolean spillEnabled, SingleStreamSpillerFactory singleStreamSpillerFactory) {
Optional<JoinFilterFunctionCompiler.JoinFilterFunctionFactory> filterFunctionFactory = filterFunction.map(function -> (session, addresses, pages) -> new StandardJoinFilterFunction(function, addresses, pages));
int partitionCount = parallelBuild ? PARTITION_COUNT : 1;
List<Integer> hashChannels = buildPages.getHashChannels().orElseThrow();
LocalExchange.LocalExchangeFactory localExchangeFactory = new LocalExchange.LocalExchangeFactory(nodePartitioningManager, taskContext.getSession(), FIXED_HASH_DISTRIBUTION, partitionCount, buildPages.getTypes(), hashChannels, buildPages.getHashChannel(), UNGROUPED_EXECUTION, DataSize.of(32, DataSize.Unit.MEGABYTE), TYPE_OPERATOR_FACTORY);
LocalExchange.LocalExchangeSinkFactoryId localExchangeSinkFactoryId = localExchangeFactory.newSinkFactoryId();
localExchangeFactory.noMoreSinkFactories();
// collect input data into the partitioned exchange
DriverContext collectDriverContext = taskContext.addPipelineContext(0, true, true, false).addDriverContext();
ValuesOperator.ValuesOperatorFactory valuesOperatorFactory = new ValuesOperator.ValuesOperatorFactory(0, new PlanNodeId("values"), buildPages.build());
LocalExchangeSinkOperator.LocalExchangeSinkOperatorFactory sinkOperatorFactory = new LocalExchangeSinkOperator.LocalExchangeSinkOperatorFactory(localExchangeFactory, 1, new PlanNodeId("sink"), localExchangeSinkFactoryId, Function.identity());
Driver sourceDriver = Driver.createDriver(collectDriverContext, valuesOperatorFactory.createOperator(collectDriverContext), sinkOperatorFactory.createOperator(collectDriverContext));
valuesOperatorFactory.noMoreOperators();
sinkOperatorFactory.noMoreOperators();
while (!sourceDriver.isFinished()) {
sourceDriver.process();
}
// build side operator factories
LocalExchangeSourceOperatorFactory sourceOperatorFactory = new LocalExchangeSourceOperatorFactory(0, new PlanNodeId("source"), localExchangeFactory);
JoinBridgeManager<PartitionedLookupSourceFactory> lookupSourceFactoryManager = JoinBridgeManager.lookupAllAtOnce(new PartitionedLookupSourceFactory(buildPages.getTypes(), rangeList(buildPages.getTypes().size()).stream().map(buildPages.getTypes()::get).collect(toImmutableList()), hashChannels.stream().map(buildPages.getTypes()::get).collect(toImmutableList()), partitionCount, false, TYPE_OPERATOR_FACTORY));
HashBuilderOperatorFactory buildOperatorFactory = new HashBuilderOperatorFactory(1, new PlanNodeId("build"), lookupSourceFactoryManager, rangeList(buildPages.getTypes().size()), hashChannels, buildPages.getHashChannel().map(OptionalInt::of).orElse(OptionalInt.empty()), filterFunctionFactory, Optional.empty(), ImmutableList.of(), 100, new PagesIndex.TestingFactory(false), spillEnabled, singleStreamSpillerFactory, incrementalLoadFactorHashArraySizeSupplier(taskContext.getSession()));
return new BuildSideSetup(lookupSourceFactoryManager, buildOperatorFactory, sourceOperatorFactory, partitionCount);
}
Aggregations