use of io.prestosql.operator.DriverContext in project hetu-core by openlookeng.
the class TestMemoryRevokingScheduler method createContexts.
private OperatorContext createContexts(SqlTask sqlTask) {
TaskContext taskContext = sqlTask.getQueryContext().addTaskContext(new TaskStateMachine(new TaskId("q", 1, 1), executor), session, false, false, OptionalInt.empty(), Optional.empty(), TESTING_SERDE_FACTORY);
PipelineContext pipelineContext = taskContext.addPipelineContext(0, false, false, false);
DriverContext driverContext = pipelineContext.addDriverContext();
OperatorContext operatorContext = driverContext.addOperatorContext(1, new PlanNodeId("na"), "na");
return operatorContext;
}
use of io.prestosql.operator.DriverContext in project boostkit-bigdata by kunpengcompute.
the class OmniLocalQueryRunner method createDrivers.
private List<Driver> createDrivers(Session session, Plan plan, OutputFactory outputFactory, TaskContext taskContext) {
if (printPlan) {
System.out.println(PlanPrinter.textLogicalPlan(plan.getRoot(), plan.getTypes(), metadata, plan.getStatsAndCosts(), session, 0, false));
}
SubPlan subplan = planFragmenter.createSubPlans(session, plan, true, WarningCollector.NOOP);
if (!subplan.getChildren().isEmpty()) {
throw new AssertionError("Expected subplan to have no children");
}
NodeInfo nodeInfo = new NodeInfo("test");
FileSystemClientManager fileSystemClientManager = new FileSystemClientManager();
SeedStoreManager seedStoreManager = new SeedStoreManager(fileSystemClientManager);
StateStoreProvider stateStoreProvider = new LocalStateStoreProvider(seedStoreManager);
LocalExecutionPlanner executionPlanner = new LocalExecutionPlanner(metadata, new TypeAnalyzer(sqlParser, metadata), Optional.empty(), pageSourceManager, indexManager, nodePartitioningManager, pageSinkManager, null, expressionCompiler, pageFunctionCompiler, joinFilterFunctionCompiler, new IndexJoinLookupStats(), this.taskManagerConfig, spillerFactory, singleStreamSpillerFactory, partitioningSpillerFactory, new PagesIndex.TestingFactory(false), joinCompiler, new LookupJoinOperators(), new OrderingCompiler(), nodeInfo, stateStoreProvider, new StateStoreListenerManager(stateStoreProvider), new DynamicFilterCacheManager(), heuristicIndexerManager, cubeManager);
// plan query
StageExecutionDescriptor stageExecutionDescriptor = subplan.getFragment().getStageExecutionDescriptor();
LocalExecutionPlan localExecutionPlan = executionPlanner.plan(taskContext, stageExecutionDescriptor, subplan.getFragment().getRoot(), subplan.getFragment().getPartitioningScheme().getOutputLayout(), plan.getTypes(), subplan.getFragment().getPartitionedSources(), null, outputFactory, Optional.empty(), Optional.empty(), null);
// generate sources
List<TaskSource> sources = new ArrayList<>();
long sequenceId = 0;
for (TableScanNode tableScan : findTableScanNodes(subplan.getFragment().getRoot())) {
TableHandle table = tableScan.getTable();
SplitSource splitSource = splitManager.getSplits(session, table, stageExecutionDescriptor.isScanGroupedExecution(tableScan.getId()) ? GROUPED_SCHEDULING : UNGROUPED_SCHEDULING, null, Optional.empty(), Collections.emptyMap(), ImmutableSet.of(), tableScan.getStrategy() != ReuseExchangeOperator.STRATEGY.REUSE_STRATEGY_DEFAULT, tableScan.getId());
ImmutableSet.Builder<ScheduledSplit> scheduledSplits = ImmutableSet.builder();
while (!splitSource.isFinished()) {
for (Split split : getNextBatch(splitSource)) {
scheduledSplits.add(new ScheduledSplit(sequenceId++, tableScan.getId(), split));
}
}
sources.add(new TaskSource(tableScan.getId(), scheduledSplits.build(), true));
}
// create drivers
List<Driver> drivers = new ArrayList<>();
Map<PlanNodeId, DriverFactory> driverFactoriesBySource = new HashMap<>();
for (DriverFactory driverFactory : localExecutionPlan.getDriverFactories()) {
for (int i = 0; i < driverFactory.getDriverInstances().orElse(1); i++) {
if (driverFactory.getSourceId().isPresent()) {
checkState(driverFactoriesBySource.put(driverFactory.getSourceId().get(), driverFactory) == null);
} else {
DriverContext driverContext = taskContext.addPipelineContext(driverFactory.getPipelineId(), driverFactory.isInputDriver(), driverFactory.isOutputDriver(), false).addDriverContext();
Driver driver = driverFactory.createDriver(driverContext);
drivers.add(driver);
}
}
}
// add sources to the drivers
ImmutableSet<PlanNodeId> partitionedSources = ImmutableSet.copyOf(subplan.getFragment().getPartitionedSources());
for (TaskSource source : sources) {
DriverFactory driverFactory = driverFactoriesBySource.get(source.getPlanNodeId());
checkState(driverFactory != null);
boolean partitioned = partitionedSources.contains(driverFactory.getSourceId().get());
for (ScheduledSplit split : source.getSplits()) {
DriverContext driverContext = taskContext.addPipelineContext(driverFactory.getPipelineId(), driverFactory.isInputDriver(), driverFactory.isOutputDriver(), partitioned).addDriverContext();
Driver driver = driverFactory.createDriver(driverContext);
driver.updateSource(new TaskSource(split.getPlanNodeId(), ImmutableSet.of(split), true));
drivers.add(driver);
}
}
for (DriverFactory driverFactory : localExecutionPlan.getDriverFactories()) {
driverFactory.noMoreDrivers();
}
return ImmutableList.copyOf(drivers);
}
use of io.prestosql.operator.DriverContext in project hetu-core by openlookeng.
the class TestSpatialJoinOperator method testDistributedSpatialSelfJoin.
@Test
public void testDistributedSpatialSelfJoin() {
TaskContext taskContext = createTaskContext();
DriverContext driverContext = taskContext.addPipelineContext(0, true, true, true).addDriverContext();
RowPagesBuilder pages = rowPagesBuilder(ImmutableList.of(GEOMETRY, VARCHAR, INTEGER)).row(POLYGON_A, "A", 1).row(POLYGON_A, "A", 2).row(null, "null", null).pageBreak().row(POLYGON_B, "B", 0).row(POLYGON_B, "B", 2);
MaterializedResult expected = resultBuilder(taskContext.getSession(), ImmutableList.of(VARCHAR, VARCHAR)).row("A", "A").row("A", "B").row("B", "A").row("B", "B").build();
PagesSpatialIndexFactory pagesSpatialIndexFactory = buildIndex(driverContext, (build, probe, r) -> build.intersects(probe), Optional.empty(), Optional.of(2), Optional.of(KDB_TREE_JSON), Optional.empty(), pages);
OperatorFactory joinOperatorFactory = new SpatialJoinOperatorFactory(2, new PlanNodeId("test"), INNER, pages.getTypes(), Ints.asList(1), 0, Optional.of(2), pagesSpatialIndexFactory);
assertOperatorEquals(joinOperatorFactory, driverContext, pages.build(), expected);
}
use of io.prestosql.operator.DriverContext in project hetu-core by openlookeng.
the class TestSpatialJoinOperator method assertSpatialJoin.
private void assertSpatialJoin(TaskContext taskContext, Type joinType, RowPagesBuilder buildPages, RowPagesBuilder probePages, MaterializedResult expected) {
DriverContext driverContext = taskContext.addPipelineContext(0, true, true, false).addDriverContext();
PagesSpatialIndexFactory pagesSpatialIndexFactory = buildIndex(driverContext, (build, probe, r) -> build.contains(probe), Optional.empty(), Optional.empty(), buildPages);
OperatorFactory joinOperatorFactory = new SpatialJoinOperatorFactory(2, new PlanNodeId("test"), joinType, probePages.getTypes(), Ints.asList(1), 0, Optional.empty(), pagesSpatialIndexFactory);
assertOperatorEquals(joinOperatorFactory, driverContext, probePages.build(), expected);
}
use of io.prestosql.operator.DriverContext in project hetu-core by openlookeng.
the class TestSpatialJoinOperator method testYield.
@Test
public void testYield() {
// create a filter function that yields for every probe match
// verify we will yield #match times totally
TaskContext taskContext = createTaskContext();
DriverContext driverContext = taskContext.addPipelineContext(0, true, true, false).addDriverContext();
// force a yield for every match
AtomicInteger filterFunctionCalls = new AtomicInteger();
InternalJoinFilterFunction filterFunction = new TestInternalJoinFilterFunction(((leftPosition, leftPage, rightPosition, rightPage) -> {
filterFunctionCalls.incrementAndGet();
driverContext.getYieldSignal().forceYieldForTesting();
return true;
}));
RowPagesBuilder buildPages = rowPagesBuilder(ImmutableList.of(GEOMETRY, VARCHAR)).row(POLYGON_A, "A").pageBreak().row(POLYGON_B, "B");
PagesSpatialIndexFactory pagesSpatialIndexFactory = buildIndex(driverContext, (build, probe, r) -> build.contains(probe), Optional.empty(), Optional.of(filterFunction), buildPages);
// 10 points in polygon A (x0...x9)
// 10 points in polygons A and B (y0...y9)
// 10 points in polygon B (z0...z9)
// 40 total matches
RowPagesBuilder probePages = rowPagesBuilder(ImmutableList.of(GEOMETRY, VARCHAR));
for (int i = 0; i < 10; i++) {
probePages.row(stPoint(1 + 0.1 * i, 1 + 0.1 * i), "x" + i);
}
for (int i = 0; i < 10; i++) {
probePages.row(stPoint(4.5 + 0.01 * i, 4.5 + 0.01 * i), "y" + i);
}
for (int i = 0; i < 10; i++) {
probePages.row(stPoint(6 + 0.1 * i, 6 + 0.1 * i), "z" + i);
}
List<Page> probeInput = probePages.build();
OperatorFactory joinOperatorFactory = new SpatialJoinOperatorFactory(2, new PlanNodeId("test"), INNER, probePages.getTypes(), Ints.asList(1), 0, Optional.empty(), pagesSpatialIndexFactory);
Operator operator = joinOperatorFactory.createOperator(driverContext);
assertTrue(operator.needsInput());
operator.addInput(probeInput.get(0));
operator.finish();
// we will yield 40 times due to filterFunction
for (int i = 0; i < 40; i++) {
driverContext.getYieldSignal().setWithDelay(5 * SECONDS.toNanos(1), driverContext.getYieldExecutor());
assertNull(operator.getOutput());
assertEquals(filterFunctionCalls.get(), i + 1, "Expected join to stop processing (yield) after calling filter function once");
driverContext.getYieldSignal().reset();
}
// delayed yield is not going to prevent operator from producing a page now (yield won't be forced because filter function won't be called anymore)
driverContext.getYieldSignal().setWithDelay(5 * SECONDS.toNanos(1), driverContext.getYieldExecutor());
Page output = operator.getOutput();
assertNotNull(output);
// make sure we have 40 matches
assertEquals(output.getPositionCount(), 40);
}
Aggregations