use of io.prestosql.operator.Driver in project hetu-core by openlookeng.
the class AbstractOperatorBenchmark method execute.
protected Map<String, Long> execute(TaskContext taskContext) {
List<Driver> drivers = createDrivers(taskContext);
long peakMemory = 0;
boolean done = false;
while (!done) {
boolean processed = false;
for (Driver driver : drivers) {
if (!driver.isFinished()) {
driver.process();
long lastPeakMemory = peakMemory;
peakMemory = (long) taskContext.getTaskStats().getUserMemoryReservation().getValue(BYTE);
if (peakMemory <= lastPeakMemory) {
peakMemory = lastPeakMemory;
}
processed = true;
}
}
done = !processed;
}
return ImmutableMap.of("peak_memory", peakMemory);
}
use of io.prestosql.operator.Driver in project hetu-core by openlookeng.
the class AbstractSimpleOperatorBenchmark method createDrivers.
@Override
protected List<Driver> createDrivers(TaskContext taskContext) {
DriverFactory driverFactory = createDriverFactory();
DriverContext driverContext = taskContext.addPipelineContext(0, true, true, false).addDriverContext();
Driver driver = driverFactory.createDriver(driverContext);
return ImmutableList.of(driver);
}
use of io.prestosql.operator.Driver in project hetu-core by openlookeng.
the class HashBuildAndJoinBenchmark method createDrivers.
/*
select orderkey, quantity, totalprice
from lineitem join orders using (orderkey)
*/
@Override
protected List<Driver> createDrivers(TaskContext taskContext) {
ImmutableList.Builder<OperatorFactory> driversBuilder = ImmutableList.builder();
driversBuilder.add(ordersTableScan);
List<Type> sourceTypes = ordersTableTypes;
OptionalInt hashChannel = OptionalInt.empty();
if (hashEnabled) {
driversBuilder.add(createHashProjectOperator(1, new PlanNodeId("test"), sourceTypes));
sourceTypes = ImmutableList.<Type>builder().addAll(sourceTypes).add(BIGINT).build();
hashChannel = OptionalInt.of(sourceTypes.size() - 1);
}
// hash build
JoinBridgeManager<PartitionedLookupSourceFactory> lookupSourceFactoryManager = JoinBridgeManager.lookupAllAtOnce(new PartitionedLookupSourceFactory(sourceTypes, ImmutableList.of(0, 1).stream().map(sourceTypes::get).collect(toImmutableList()), Ints.asList(0).stream().map(sourceTypes::get).collect(toImmutableList()), 1, requireNonNull(ImmutableMap.of(), "layout is null"), false, false));
HashBuilderOperatorFactory hashBuilder = new HashBuilderOperatorFactory(2, new PlanNodeId("test"), lookupSourceFactoryManager, ImmutableList.of(0, 1), Ints.asList(0), hashChannel, Optional.empty(), Optional.empty(), ImmutableList.of(), 1_500_000, new PagesIndex.TestingFactory(false), false, SingleStreamSpillerFactory.unsupportedSingleStreamSpillerFactory());
driversBuilder.add(hashBuilder);
DriverFactory hashBuildDriverFactory = new DriverFactory(0, true, false, driversBuilder.build(), OptionalInt.empty(), UNGROUPED_EXECUTION);
// join
ImmutableList.Builder<OperatorFactory> joinDriversBuilder = ImmutableList.builder();
joinDriversBuilder.add(lineItemTableScan);
sourceTypes = lineItemTableTypes;
hashChannel = OptionalInt.empty();
if (hashEnabled) {
joinDriversBuilder.add(createHashProjectOperator(1, new PlanNodeId("test"), sourceTypes));
sourceTypes = ImmutableList.<Type>builder().addAll(sourceTypes).add(BIGINT).build();
hashChannel = OptionalInt.of(sourceTypes.size() - 1);
}
OperatorFactory joinOperator = LOOKUP_JOIN_OPERATORS.innerJoin(2, new PlanNodeId("test"), lookupSourceFactoryManager, sourceTypes, Ints.asList(0), hashChannel, Optional.empty(), OptionalInt.empty(), unsupportedPartitioningSpillerFactory());
joinDriversBuilder.add(joinOperator);
joinDriversBuilder.add(new NullOutputOperatorFactory(3, new PlanNodeId("test")));
DriverFactory joinDriverFactory = new DriverFactory(1, true, true, joinDriversBuilder.build(), OptionalInt.empty(), UNGROUPED_EXECUTION);
Driver hashBuildDriver = hashBuildDriverFactory.createDriver(taskContext.addPipelineContext(0, true, false, false).addDriverContext());
hashBuildDriverFactory.noMoreDrivers();
Driver joinDriver = joinDriverFactory.createDriver(taskContext.addPipelineContext(1, true, true, false).addDriverContext());
joinDriverFactory.noMoreDrivers();
return ImmutableList.of(hashBuildDriver, joinDriver);
}
use of io.prestosql.operator.Driver in project hetu-core by openlookeng.
the class TestMemoryPools method setupConsumeRevocableMemory.
private RevocableMemoryOperator setupConsumeRevocableMemory(DataSize reservedPerPage, long numberOfPages) {
AtomicReference<RevocableMemoryOperator> createOperator = new AtomicReference<>();
setUp(() -> {
DriverContext driverContext = taskContext.addPipelineContext(0, false, false, false).addDriverContext();
OperatorContext revokableOperatorContext = driverContext.addOperatorContext(Integer.MAX_VALUE, new PlanNodeId("revokable_operator"), TableScanOperator.class.getSimpleName());
OutputFactory outputFactory = new PageConsumerOutputFactory(types -> (page -> {
}));
Operator outputOperator = outputFactory.createOutputOperator(2, new PlanNodeId("output"), ImmutableList.of(), Function.identity(), driverContext.getPipelineContext().getTaskContext()).createOperator(driverContext);
RevocableMemoryOperator revocableMemoryOperator = new RevocableMemoryOperator(revokableOperatorContext, reservedPerPage, numberOfPages);
createOperator.set(revocableMemoryOperator);
Driver driver = Driver.createDriver(driverContext, revocableMemoryOperator, outputOperator);
return ImmutableList.of(driver);
});
return createOperator.get();
}
use of io.prestosql.operator.Driver in project hetu-core by openlookeng.
the class LocalQueryRunner method createDrivers.
private List<Driver> createDrivers(Session session, Plan plan, OutputFactory outputFactory, TaskContext taskContext) {
if (printPlan) {
System.out.println(PlanPrinter.textLogicalPlan(plan.getRoot(), plan.getTypes(), metadata, plan.getStatsAndCosts(), session, 0, false));
}
SubPlan subplan = planFragmenter.createSubPlans(session, plan, true, WarningCollector.NOOP);
if (!subplan.getChildren().isEmpty()) {
throw new AssertionError("Expected subplan to have no children");
}
NodeInfo nodeInfo = new NodeInfo("test");
FileSystemClientManager fileSystemClientManager = new FileSystemClientManager();
SeedStoreManager seedStoreManager = new SeedStoreManager(fileSystemClientManager);
StateStoreProvider stateStoreProvider = new LocalStateStoreProvider(seedStoreManager);
LocalExecutionPlanner executionPlanner = new LocalExecutionPlanner(metadata, new TypeAnalyzer(sqlParser, metadata), Optional.empty(), pageSourceManager, indexManager, nodePartitioningManager, pageSinkManager, null, expressionCompiler, pageFunctionCompiler, joinFilterFunctionCompiler, new IndexJoinLookupStats(), this.taskManagerConfig, spillerFactory, singleStreamSpillerFactory, partitioningSpillerFactory, new PagesIndex.TestingFactory(false), joinCompiler, new LookupJoinOperators(), new OrderingCompiler(), nodeInfo, stateStoreProvider, new StateStoreListenerManager(stateStoreProvider), new DynamicFilterCacheManager(), heuristicIndexerManager, cubeManager);
// plan query
StageExecutionDescriptor stageExecutionDescriptor = subplan.getFragment().getStageExecutionDescriptor();
LocalExecutionPlan localExecutionPlan = executionPlanner.plan(taskContext, stageExecutionDescriptor, subplan.getFragment().getRoot(), subplan.getFragment().getPartitioningScheme().getOutputLayout(), plan.getTypes(), subplan.getFragment().getPartitionedSources(), null, outputFactory, Optional.empty(), Optional.empty(), null);
// generate sources
List<TaskSource> sources = new ArrayList<>();
long sequenceId = 0;
for (TableScanNode tableScan : findTableScanNodes(subplan.getFragment().getRoot())) {
TableHandle table = tableScan.getTable();
SplitSource splitSource = splitManager.getSplits(session, table, stageExecutionDescriptor.isScanGroupedExecution(tableScan.getId()) ? GROUPED_SCHEDULING : UNGROUPED_SCHEDULING, null, Optional.empty(), Collections.emptyMap(), ImmutableSet.of(), tableScan.getStrategy() != ReuseExchangeOperator.STRATEGY.REUSE_STRATEGY_DEFAULT, tableScan.getId());
ImmutableSet.Builder<ScheduledSplit> scheduledSplits = ImmutableSet.builder();
while (!splitSource.isFinished()) {
for (Split split : getNextBatch(splitSource)) {
scheduledSplits.add(new ScheduledSplit(sequenceId++, tableScan.getId(), split));
}
}
sources.add(new TaskSource(tableScan.getId(), scheduledSplits.build(), true));
}
// create drivers
List<Driver> drivers = new ArrayList<>();
Map<PlanNodeId, DriverFactory> driverFactoriesBySource = new HashMap<>();
for (DriverFactory driverFactory : localExecutionPlan.getDriverFactories()) {
for (int i = 0; i < driverFactory.getDriverInstances().orElse(1); i++) {
if (driverFactory.getSourceId().isPresent()) {
checkState(driverFactoriesBySource.put(driverFactory.getSourceId().get(), driverFactory) == null);
} else {
DriverContext driverContext = taskContext.addPipelineContext(driverFactory.getPipelineId(), driverFactory.isInputDriver(), driverFactory.isOutputDriver(), false).addDriverContext();
Driver driver = driverFactory.createDriver(driverContext);
drivers.add(driver);
}
}
}
// add sources to the drivers
ImmutableSet<PlanNodeId> partitionedSources = ImmutableSet.copyOf(subplan.getFragment().getPartitionedSources());
for (TaskSource source : sources) {
DriverFactory driverFactory = driverFactoriesBySource.get(source.getPlanNodeId());
checkState(driverFactory != null);
boolean partitioned = partitionedSources.contains(driverFactory.getSourceId().get());
for (ScheduledSplit split : source.getSplits()) {
DriverContext driverContext = taskContext.addPipelineContext(driverFactory.getPipelineId(), driverFactory.isInputDriver(), driverFactory.isOutputDriver(), partitioned).addDriverContext();
Driver driver = driverFactory.createDriver(driverContext);
driver.updateSource(new TaskSource(split.getPlanNodeId(), ImmutableSet.of(split), true));
drivers.add(driver);
}
}
for (DriverFactory driverFactory : localExecutionPlan.getDriverFactories()) {
driverFactory.noMoreDrivers();
}
return ImmutableList.copyOf(drivers);
}
Aggregations