use of io.trino.sql.planner.LocalExecutionPlanner in project trino by trinodb.
the class TaskTestUtils method createTestingPlanner.
public static LocalExecutionPlanner createTestingPlanner() {
PageSourceManager pageSourceManager = new PageSourceManager();
pageSourceManager.addConnectorPageSourceProvider(CONNECTOR_ID, new TestingPageSourceProvider());
// we don't start the finalizer so nothing will be collected, which is ok for a test
FinalizerService finalizerService = new FinalizerService();
BlockTypeOperators blockTypeOperators = new BlockTypeOperators(PLANNER_CONTEXT.getTypeOperators());
NodeScheduler nodeScheduler = new NodeScheduler(new UniformNodeSelectorFactory(new InMemoryNodeManager(), new NodeSchedulerConfig().setIncludeCoordinator(true), new NodeTaskMap(finalizerService)));
NodePartitioningManager nodePartitioningManager = new NodePartitioningManager(nodeScheduler, blockTypeOperators);
PageFunctionCompiler pageFunctionCompiler = new PageFunctionCompiler(PLANNER_CONTEXT.getFunctionManager(), 0);
return new LocalExecutionPlanner(PLANNER_CONTEXT, createTestingTypeAnalyzer(PLANNER_CONTEXT), Optional.empty(), pageSourceManager, new IndexManager(), nodePartitioningManager, new PageSinkManager(), new MockDirectExchangeClientSupplier(), new ExpressionCompiler(PLANNER_CONTEXT.getFunctionManager(), pageFunctionCompiler), pageFunctionCompiler, new JoinFilterFunctionCompiler(PLANNER_CONTEXT.getFunctionManager()), new IndexJoinLookupStats(), new TaskManagerConfig(), new GenericSpillerFactory((types, spillContext, memoryContext) -> {
throw new UnsupportedOperationException();
}), (types, spillContext, memoryContext) -> {
throw new UnsupportedOperationException();
}, (types, partitionFunction, spillContext, memoryContext) -> {
throw new UnsupportedOperationException();
}, new PagesIndex.TestingFactory(false), new JoinCompiler(PLANNER_CONTEXT.getTypeOperators()), new TrinoOperatorFactories(), new OrderingCompiler(PLANNER_CONTEXT.getTypeOperators()), new DynamicFilterConfig(), blockTypeOperators, new TableExecuteContextManager(), new ExchangeManagerRegistry(new ExchangeHandleResolver()));
}
use of io.trino.sql.planner.LocalExecutionPlanner in project trino by trinodb.
the class TestMemoryRevokingScheduler method setUp.
@BeforeMethod
public void setUp() {
memoryPool = new MemoryPool(DataSize.ofBytes(10));
TaskExecutor taskExecutor = new TaskExecutor(8, 16, 3, 4, Ticker.systemTicker());
taskExecutor.start();
// Must be single threaded
executor = newScheduledThreadPool(1, threadsNamed("task-notification-%s"));
scheduledExecutor = newScheduledThreadPool(2, threadsNamed("task-notification-%s"));
LocalExecutionPlanner planner = createTestingPlanner();
sqlTaskExecutionFactory = new SqlTaskExecutionFactory(executor, taskExecutor, planner, createTestSplitMonitor(), new TaskManagerConfig());
allOperatorContexts = null;
}
use of io.trino.sql.planner.LocalExecutionPlanner in project trino by trinodb.
the class LocalQueryRunner method createDrivers.
private List<Driver> createDrivers(Session session, Plan plan, OutputFactory outputFactory, TaskContext taskContext) {
if (printPlan) {
System.out.println(PlanPrinter.textLogicalPlan(plan.getRoot(), plan.getTypes(), plannerContext.getMetadata(), plannerContext.getFunctionManager(), plan.getStatsAndCosts(), session, 0, false));
}
SubPlan subplan = createSubPlans(session, plan, true);
if (!subplan.getChildren().isEmpty()) {
throw new AssertionError("Expected subplan to have no children");
}
TableExecuteContextManager tableExecuteContextManager = new TableExecuteContextManager();
tableExecuteContextManager.registerTableExecuteContextForQuery(taskContext.getQueryContext().getQueryId());
LocalExecutionPlanner executionPlanner = new LocalExecutionPlanner(plannerContext, new TypeAnalyzer(plannerContext, statementAnalyzerFactory), Optional.empty(), pageSourceManager, indexManager, nodePartitioningManager, pageSinkManager, null, expressionCompiler, pageFunctionCompiler, joinFilterFunctionCompiler, new IndexJoinLookupStats(), this.taskManagerConfig, spillerFactory, singleStreamSpillerFactory, partitioningSpillerFactory, new PagesIndex.TestingFactory(false), joinCompiler, operatorFactories, new OrderingCompiler(plannerContext.getTypeOperators()), new DynamicFilterConfig(), blockTypeOperators, tableExecuteContextManager, exchangeManagerRegistry);
// plan query
StageExecutionDescriptor stageExecutionDescriptor = subplan.getFragment().getStageExecutionDescriptor();
LocalExecutionPlan localExecutionPlan = executionPlanner.plan(taskContext, stageExecutionDescriptor, subplan.getFragment().getRoot(), subplan.getFragment().getPartitioningScheme().getOutputLayout(), plan.getTypes(), subplan.getFragment().getPartitionedSources(), outputFactory);
// generate splitAssignments
List<SplitAssignment> splitAssignments = new ArrayList<>();
long sequenceId = 0;
for (TableScanNode tableScan : findTableScanNodes(subplan.getFragment().getRoot())) {
TableHandle table = tableScan.getTable();
SplitSource splitSource = splitManager.getSplits(session, table, stageExecutionDescriptor.isScanGroupedExecution(tableScan.getId()) ? GROUPED_SCHEDULING : UNGROUPED_SCHEDULING, EMPTY, alwaysTrue());
ImmutableSet.Builder<ScheduledSplit> scheduledSplits = ImmutableSet.builder();
while (!splitSource.isFinished()) {
for (Split split : getNextBatch(splitSource)) {
scheduledSplits.add(new ScheduledSplit(sequenceId++, tableScan.getId(), split));
}
}
splitAssignments.add(new SplitAssignment(tableScan.getId(), scheduledSplits.build(), true));
}
// create drivers
List<Driver> drivers = new ArrayList<>();
Map<PlanNodeId, DriverFactory> driverFactoriesBySource = new HashMap<>();
for (DriverFactory driverFactory : localExecutionPlan.getDriverFactories()) {
for (int i = 0; i < driverFactory.getDriverInstances().orElse(1); i++) {
if (driverFactory.getSourceId().isPresent()) {
checkState(driverFactoriesBySource.put(driverFactory.getSourceId().get(), driverFactory) == null);
} else {
DriverContext driverContext = taskContext.addPipelineContext(driverFactory.getPipelineId(), driverFactory.isInputDriver(), driverFactory.isOutputDriver(), false).addDriverContext();
Driver driver = driverFactory.createDriver(driverContext);
drivers.add(driver);
}
}
}
// add split assignments to the drivers
ImmutableSet<PlanNodeId> partitionedSources = ImmutableSet.copyOf(subplan.getFragment().getPartitionedSources());
for (SplitAssignment splitAssignment : splitAssignments) {
DriverFactory driverFactory = driverFactoriesBySource.get(splitAssignment.getPlanNodeId());
checkState(driverFactory != null);
boolean partitioned = partitionedSources.contains(driverFactory.getSourceId().get());
for (ScheduledSplit split : splitAssignment.getSplits()) {
DriverContext driverContext = taskContext.addPipelineContext(driverFactory.getPipelineId(), driverFactory.isInputDriver(), driverFactory.isOutputDriver(), partitioned).addDriverContext();
Driver driver = driverFactory.createDriver(driverContext);
driver.updateSplitAssignment(new SplitAssignment(split.getPlanNodeId(), ImmutableSet.of(split), true));
drivers.add(driver);
}
}
for (DriverFactory driverFactory : localExecutionPlan.getDriverFactories()) {
driverFactory.noMoreDrivers();
}
return ImmutableList.copyOf(drivers);
}
Aggregations