use of io.prestosql.operator.DriverContext in project hetu-core by openlookeng.
the class TestQueryContext method testMoveTaggedAllocations.
@Test
public void testMoveTaggedAllocations() {
MemoryPool generalPool = new MemoryPool(GENERAL_POOL, new DataSize(10_000, BYTE));
MemoryPool reservedPool = new MemoryPool(RESERVED_POOL, new DataSize(10_000, BYTE));
QueryId queryId = new QueryId("query");
QueryContext queryContext = createQueryContext(queryId, generalPool);
TaskStateMachine taskStateMachine = new TaskStateMachine(TaskId.valueOf("task-id"), TEST_EXECUTOR);
TaskContext taskContext = queryContext.addTaskContext(taskStateMachine, TEST_SESSION, false, false, OptionalInt.empty(), Optional.empty(), TESTING_SERDE_FACTORY);
DriverContext driverContext = taskContext.addPipelineContext(0, false, false, false).addDriverContext();
OperatorContext operatorContext = driverContext.addOperatorContext(0, new PlanNodeId("test"), "test");
// allocate some memory in the general pool
LocalMemoryContext memoryContext = operatorContext.aggregateUserMemoryContext().newLocalMemoryContext("test_context");
memoryContext.setBytes(1_000);
Map<String, Long> allocations = generalPool.getTaggedMemoryAllocations().get(queryId);
assertEquals(allocations, ImmutableMap.of("test_context", 1_000L));
queryContext.setMemoryPool(reservedPool);
assertNull(generalPool.getTaggedMemoryAllocations().get(queryId));
allocations = reservedPool.getTaggedMemoryAllocations().get(queryId);
assertEquals(allocations, ImmutableMap.of("test_context", 1_000L));
assertEquals(generalPool.getFreeBytes(), 10_000);
assertEquals(reservedPool.getFreeBytes(), 9_000);
memoryContext.close();
assertEquals(generalPool.getFreeBytes(), 10_000);
assertEquals(reservedPool.getFreeBytes(), 10_000);
}
use of io.prestosql.operator.DriverContext in project hetu-core by openlookeng.
the class AbstractOperatorBenchmark method createTableScanOperator.
protected final OperatorFactory createTableScanOperator(int operatorId, PlanNodeId planNodeId, String tableName, String... columnNames) {
checkArgument(session.getCatalog().isPresent(), "catalog not set");
checkArgument(session.getSchema().isPresent(), "schema not set");
// look up the table
Metadata metadata = localQueryRunner.getMetadata();
QualifiedObjectName qualifiedTableName = new QualifiedObjectName(session.getCatalog().get(), session.getSchema().get(), tableName);
TableHandle tableHandle = metadata.getTableHandle(session, qualifiedTableName).orElse(null);
checkArgument(tableHandle != null, "Table %s does not exist", qualifiedTableName);
// lookup the columns
Map<String, ColumnHandle> allColumnHandles = metadata.getColumnHandles(session, tableHandle);
ImmutableList.Builder<ColumnHandle> columnHandlesBuilder = ImmutableList.builder();
for (String columnName : columnNames) {
ColumnHandle columnHandle = allColumnHandles.get(columnName);
checkArgument(columnHandle != null, "Table %s does not have a column %s", tableName, columnName);
columnHandlesBuilder.add(columnHandle);
}
List<ColumnHandle> columnHandles = columnHandlesBuilder.build();
// get the split for this table
Split split = getLocalQuerySplit(session, tableHandle, planNodeId);
return new OperatorFactory() {
@Override
public Operator createOperator(DriverContext driverContext) {
OperatorContext operatorContext = driverContext.addOperatorContext(operatorId, planNodeId, "BenchmarkSource");
ConnectorPageSource pageSource = localQueryRunner.getPageSourceManager().createPageSource(session, split, tableHandle, columnHandles, Optional.empty());
return new PageSourceOperator(pageSource, operatorContext);
}
@Override
public void noMoreOperators() {
}
@Override
public OperatorFactory duplicate() {
throw new UnsupportedOperationException();
}
};
}
use of io.prestosql.operator.DriverContext in project hetu-core by openlookeng.
the class AbstractSimpleOperatorBenchmark method createDrivers.
@Override
protected List<Driver> createDrivers(TaskContext taskContext) {
DriverFactory driverFactory = createDriverFactory();
DriverContext driverContext = taskContext.addPipelineContext(0, true, true, false).addDriverContext();
Driver driver = driverFactory.createDriver(driverContext);
return ImmutableList.of(driver);
}
use of io.prestosql.operator.DriverContext in project hetu-core by openlookeng.
the class TestMemoryPools method setupConsumeRevocableMemory.
private RevocableMemoryOperator setupConsumeRevocableMemory(DataSize reservedPerPage, long numberOfPages) {
AtomicReference<RevocableMemoryOperator> createOperator = new AtomicReference<>();
setUp(() -> {
DriverContext driverContext = taskContext.addPipelineContext(0, false, false, false).addDriverContext();
OperatorContext revokableOperatorContext = driverContext.addOperatorContext(Integer.MAX_VALUE, new PlanNodeId("revokable_operator"), TableScanOperator.class.getSimpleName());
OutputFactory outputFactory = new PageConsumerOutputFactory(types -> (page -> {
}));
Operator outputOperator = outputFactory.createOutputOperator(2, new PlanNodeId("output"), ImmutableList.of(), Function.identity(), driverContext.getPipelineContext().getTaskContext()).createOperator(driverContext);
RevocableMemoryOperator revocableMemoryOperator = new RevocableMemoryOperator(revokableOperatorContext, reservedPerPage, numberOfPages);
createOperator.set(revocableMemoryOperator);
Driver driver = Driver.createDriver(driverContext, revocableMemoryOperator, outputOperator);
return ImmutableList.of(driver);
});
return createOperator.get();
}
use of io.prestosql.operator.DriverContext in project hetu-core by openlookeng.
the class LocalQueryRunner method createDrivers.
private List<Driver> createDrivers(Session session, Plan plan, OutputFactory outputFactory, TaskContext taskContext) {
if (printPlan) {
System.out.println(PlanPrinter.textLogicalPlan(plan.getRoot(), plan.getTypes(), metadata, plan.getStatsAndCosts(), session, 0, false));
}
SubPlan subplan = planFragmenter.createSubPlans(session, plan, true, WarningCollector.NOOP);
if (!subplan.getChildren().isEmpty()) {
throw new AssertionError("Expected subplan to have no children");
}
NodeInfo nodeInfo = new NodeInfo("test");
FileSystemClientManager fileSystemClientManager = new FileSystemClientManager();
SeedStoreManager seedStoreManager = new SeedStoreManager(fileSystemClientManager);
StateStoreProvider stateStoreProvider = new LocalStateStoreProvider(seedStoreManager);
LocalExecutionPlanner executionPlanner = new LocalExecutionPlanner(metadata, new TypeAnalyzer(sqlParser, metadata), Optional.empty(), pageSourceManager, indexManager, nodePartitioningManager, pageSinkManager, null, expressionCompiler, pageFunctionCompiler, joinFilterFunctionCompiler, new IndexJoinLookupStats(), this.taskManagerConfig, spillerFactory, singleStreamSpillerFactory, partitioningSpillerFactory, new PagesIndex.TestingFactory(false), joinCompiler, new LookupJoinOperators(), new OrderingCompiler(), nodeInfo, stateStoreProvider, new StateStoreListenerManager(stateStoreProvider), new DynamicFilterCacheManager(), heuristicIndexerManager, cubeManager);
// plan query
StageExecutionDescriptor stageExecutionDescriptor = subplan.getFragment().getStageExecutionDescriptor();
LocalExecutionPlan localExecutionPlan = executionPlanner.plan(taskContext, stageExecutionDescriptor, subplan.getFragment().getRoot(), subplan.getFragment().getPartitioningScheme().getOutputLayout(), plan.getTypes(), subplan.getFragment().getPartitionedSources(), null, outputFactory, Optional.empty(), Optional.empty(), null);
// generate sources
List<TaskSource> sources = new ArrayList<>();
long sequenceId = 0;
for (TableScanNode tableScan : findTableScanNodes(subplan.getFragment().getRoot())) {
TableHandle table = tableScan.getTable();
SplitSource splitSource = splitManager.getSplits(session, table, stageExecutionDescriptor.isScanGroupedExecution(tableScan.getId()) ? GROUPED_SCHEDULING : UNGROUPED_SCHEDULING, null, Optional.empty(), Collections.emptyMap(), ImmutableSet.of(), tableScan.getStrategy() != ReuseExchangeOperator.STRATEGY.REUSE_STRATEGY_DEFAULT, tableScan.getId());
ImmutableSet.Builder<ScheduledSplit> scheduledSplits = ImmutableSet.builder();
while (!splitSource.isFinished()) {
for (Split split : getNextBatch(splitSource)) {
scheduledSplits.add(new ScheduledSplit(sequenceId++, tableScan.getId(), split));
}
}
sources.add(new TaskSource(tableScan.getId(), scheduledSplits.build(), true));
}
// create drivers
List<Driver> drivers = new ArrayList<>();
Map<PlanNodeId, DriverFactory> driverFactoriesBySource = new HashMap<>();
for (DriverFactory driverFactory : localExecutionPlan.getDriverFactories()) {
for (int i = 0; i < driverFactory.getDriverInstances().orElse(1); i++) {
if (driverFactory.getSourceId().isPresent()) {
checkState(driverFactoriesBySource.put(driverFactory.getSourceId().get(), driverFactory) == null);
} else {
DriverContext driverContext = taskContext.addPipelineContext(driverFactory.getPipelineId(), driverFactory.isInputDriver(), driverFactory.isOutputDriver(), false).addDriverContext();
Driver driver = driverFactory.createDriver(driverContext);
drivers.add(driver);
}
}
}
// add sources to the drivers
ImmutableSet<PlanNodeId> partitionedSources = ImmutableSet.copyOf(subplan.getFragment().getPartitionedSources());
for (TaskSource source : sources) {
DriverFactory driverFactory = driverFactoriesBySource.get(source.getPlanNodeId());
checkState(driverFactory != null);
boolean partitioned = partitionedSources.contains(driverFactory.getSourceId().get());
for (ScheduledSplit split : source.getSplits()) {
DriverContext driverContext = taskContext.addPipelineContext(driverFactory.getPipelineId(), driverFactory.isInputDriver(), driverFactory.isOutputDriver(), partitioned).addDriverContext();
Driver driver = driverFactory.createDriver(driverContext);
driver.updateSource(new TaskSource(split.getPlanNodeId(), ImmutableSet.of(split), true));
drivers.add(driver);
}
}
for (DriverFactory driverFactory : localExecutionPlan.getDriverFactories()) {
driverFactory.noMoreDrivers();
}
return ImmutableList.copyOf(drivers);
}
Aggregations