use of io.trino.operator.Driver in project trino by trinodb.
the class SqlTaskExecution method addSplitAssignments.
public void addSplitAssignments(List<SplitAssignment> splitAssignments) {
requireNonNull(splitAssignments, "splitAssignments is null");
checkState(!Thread.holdsLock(this), "Cannot add split assignments while holding a lock on the %s", getClass().getSimpleName());
try (SetThreadName ignored = new SetThreadName("Task-%s", taskId)) {
// update our record of split assignments and schedule drivers for new partitioned splits
Map<PlanNodeId, SplitAssignment> updatedUnpartitionedSources = updateSplitAssignments(splitAssignments);
// the unpartitioned splits
for (WeakReference<Driver> driverReference : drivers) {
Driver driver = driverReference.get();
// the driver can be GCed due to a failure or a limit
if (driver == null) {
// remove the weak reference from the list to avoid a memory leak
// NOTE: this is a concurrent safe operation on a CopyOnWriteArrayList
drivers.remove(driverReference);
continue;
}
Optional<PlanNodeId> sourceId = driver.getSourceId();
if (sourceId.isEmpty()) {
continue;
}
SplitAssignment splitAssignmentUpdate = updatedUnpartitionedSources.get(sourceId.get());
if (splitAssignmentUpdate == null) {
continue;
}
driver.updateSplitAssignment(splitAssignmentUpdate);
}
// we may have transitioned to no more splits, so check for completion
checkTaskCompletion();
}
}
use of io.trino.operator.Driver in project trino by trinodb.
the class LocalQueryRunner method executeInternal.
private MaterializedResultWithPlan executeInternal(Session session, @Language("SQL") String sql) {
lock.readLock().lock();
try (Closer closer = Closer.create()) {
accessControl.checkCanExecuteQuery(session.getIdentity());
AtomicReference<MaterializedResult.Builder> builder = new AtomicReference<>();
PageConsumerOutputFactory outputFactory = new PageConsumerOutputFactory(types -> {
builder.compareAndSet(null, MaterializedResult.resultBuilder(session, types));
return builder.get()::page;
});
TaskContext taskContext = TestingTaskContext.builder(notificationExecutor, yieldExecutor, session).setMaxSpillSize(nodeSpillConfig.getMaxSpillPerNode()).setQueryMaxSpillSize(nodeSpillConfig.getQueryMaxSpillPerNode()).build();
Plan plan = createPlan(session, sql, WarningCollector.NOOP);
List<Driver> drivers = createDrivers(session, plan, outputFactory, taskContext);
drivers.forEach(closer::register);
boolean done = false;
while (!done) {
boolean processed = false;
for (Driver driver : drivers) {
if (alwaysRevokeMemory) {
driver.getDriverContext().getOperatorContexts().stream().filter(operatorContext -> operatorContext.getNestedOperatorStats().stream().mapToLong(stats -> stats.getRevocableMemoryReservation().toBytes()).sum() > 0).forEach(OperatorContext::requestMemoryRevoking);
}
if (!driver.isFinished()) {
driver.process();
processed = true;
}
}
done = !processed;
}
verify(builder.get() != null, "Output operator was not created");
return new MaterializedResultWithPlan(builder.get().build(), plan);
} catch (IOException e) {
throw new UncheckedIOException(e);
} finally {
lock.readLock().unlock();
}
}
use of io.trino.operator.Driver in project trino by trinodb.
the class TestMemoryBlocking method testTableScanMemoryBlocking.
@Test
public void testTableScanMemoryBlocking() {
PlanNodeId sourceId = new PlanNodeId("source");
List<Type> types = ImmutableList.of(VARCHAR);
TableScanOperator source = new TableScanOperator(driverContext.addOperatorContext(1, new PlanNodeId("test"), "values"), sourceId, (session, split, table, columns, dynamicFilter) -> new FixedPageSource(rowPagesBuilder(types).addSequencePage(10, 1).addSequencePage(10, 1).addSequencePage(10, 1).addSequencePage(10, 1).addSequencePage(10, 1).build()), TEST_TABLE_HANDLE, ImmutableList.of(), DynamicFilter.EMPTY);
PageConsumerOperator sink = createSinkOperator(types);
Driver driver = Driver.createDriver(driverContext, source, sink);
assertSame(driver.getDriverContext(), driverContext);
assertFalse(driver.isFinished());
Split testSplit = new Split(new CatalogName("test"), new TestSplit(), Lifespan.taskWide());
driver.updateSplitAssignment(new SplitAssignment(sourceId, ImmutableSet.of(new ScheduledSplit(0, sourceId, testSplit)), true));
ListenableFuture<Void> blocked = driver.processFor(new Duration(1, NANOSECONDS));
// the driver shouldn't block in the first call as it will be able to move a page between source and the sink operator
// but the operator should be blocked
assertTrue(blocked.isDone());
assertFalse(source.getOperatorContext().isWaitingForMemory().isDone());
// and they should stay blocked until more memory becomes available
for (int i = 0; i < 10; i++) {
blocked = driver.processFor(new Duration(1, NANOSECONDS));
assertFalse(blocked.isDone());
assertFalse(source.getOperatorContext().isWaitingForMemory().isDone());
}
// free up some memory
memoryPool.free(QUERY_ID, "test", memoryPool.getReservedBytes());
// the operator should be unblocked
assertTrue(source.getOperatorContext().isWaitingForMemory().isDone());
// the driver shouldn't be blocked
blocked = driver.processFor(new Duration(1, NANOSECONDS));
assertTrue(blocked.isDone());
}
use of io.trino.operator.Driver in project trino by trinodb.
the class TestHashJoinOperator method innerJoinWithSpill.
private void innerJoinWithSpill(boolean probeHashEnabled, List<WhenSpill> whenSpill, SingleStreamSpillerFactory buildSpillerFactory, PartitioningSpillerFactory joinSpillerFactory) throws Exception {
TaskStateMachine taskStateMachine = new TaskStateMachine(new TaskId(new StageId("query", 0), 0, 0), executor);
TaskContext taskContext = TestingTaskContext.createTaskContext(executor, scheduledExecutor, TEST_SESSION, taskStateMachine);
DriverContext joinDriverContext = taskContext.addPipelineContext(2, true, true, false).addDriverContext();
// force a yield for every match in LookupJoinOperator, set called to true after first
AtomicBoolean called = new AtomicBoolean(false);
InternalJoinFilterFunction filterFunction = new TestInternalJoinFilterFunction((leftPosition, leftPage, rightPosition, rightPage) -> {
called.set(true);
joinDriverContext.getYieldSignal().forceYieldForTesting();
return true;
});
// build factory
RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(VARCHAR, BIGINT)).addSequencePage(4, 20, 200).addSequencePage(4, 20, 200).addSequencePage(4, 30, 300).addSequencePage(4, 40, 400);
BuildSideSetup buildSideSetup = setupBuildSide(nodePartitioningManager, true, taskContext, buildPages, Optional.of(filterFunction), true, buildSpillerFactory);
JoinBridgeManager<PartitionedLookupSourceFactory> lookupSourceFactoryManager = buildSideSetup.getLookupSourceFactoryManager();
// probe factory
RowPagesBuilder probePages = rowPagesBuilder(probeHashEnabled, Ints.asList(0), ImmutableList.of(VARCHAR, BIGINT)).row("20", 123_000L).row("20", 123_000L).pageBreak().addSequencePage(20, 0, 123_000).addSequencePage(10, 30, 123_000);
OperatorFactory joinOperatorFactory = innerJoinOperatorFactory(operatorFactories, lookupSourceFactoryManager, probePages, joinSpillerFactory, true);
// build drivers and operators
instantiateBuildDrivers(buildSideSetup, taskContext);
List<Driver> buildDrivers = buildSideSetup.getBuildDrivers();
int buildOperatorCount = buildDrivers.size();
checkState(buildOperatorCount == whenSpill.size());
LookupSourceFactory lookupSourceFactory = lookupSourceFactoryManager.getJoinBridge(Lifespan.taskWide());
try (Operator joinOperator = joinOperatorFactory.createOperator(joinDriverContext)) {
// build lookup source
ListenableFuture<LookupSourceProvider> lookupSourceProvider = lookupSourceFactory.createLookupSourceProvider();
List<Boolean> revoked = new ArrayList<>(nCopies(buildOperatorCount, false));
while (!lookupSourceProvider.isDone()) {
for (int i = 0; i < buildOperatorCount; i++) {
checkErrors(taskStateMachine);
buildDrivers.get(i).process();
HashBuilderOperator buildOperator = buildSideSetup.getBuildOperators().get(i);
if (whenSpill.get(i) == WhenSpill.DURING_BUILD && buildOperator.getOperatorContext().getReservedRevocableBytes() > 0) {
checkState(!lookupSourceProvider.isDone(), "Too late, LookupSource already done");
revokeMemory(buildOperator);
revoked.set(i, true);
}
}
}
getFutureValue(lookupSourceProvider).close();
assertEquals(revoked, whenSpill.stream().map(WhenSpill.DURING_BUILD::equals).collect(toImmutableList()), "Some operators not spilled before LookupSource built");
for (int i = 0; i < buildOperatorCount; i++) {
if (whenSpill.get(i) == WhenSpill.AFTER_BUILD) {
revokeMemory(buildSideSetup.getBuildOperators().get(i));
}
}
for (Driver buildDriver : buildDrivers) {
runDriverInThread(executor, buildDriver);
}
ValuesOperatorFactory valuesOperatorFactory = new ValuesOperatorFactory(17, new PlanNodeId("values"), probePages.build());
PageBuffer pageBuffer = new PageBuffer(10);
PageBufferOperatorFactory pageBufferOperatorFactory = new PageBufferOperatorFactory(18, new PlanNodeId("pageBuffer"), pageBuffer);
Driver joinDriver = Driver.createDriver(joinDriverContext, valuesOperatorFactory.createOperator(joinDriverContext), joinOperator, pageBufferOperatorFactory.createOperator(joinDriverContext));
while (!called.get()) {
// process first row of first page of LookupJoinOperator
processRow(joinDriver, taskStateMachine);
}
for (int i = 0; i < buildOperatorCount; i++) {
if (whenSpill.get(i) == WhenSpill.DURING_USAGE) {
triggerMemoryRevokingAndWait(buildSideSetup.getBuildOperators().get(i), taskStateMachine);
}
}
// process remaining LookupJoinOperator pages
while (!joinDriver.isFinished()) {
checkErrors(taskStateMachine);
processRow(joinDriver, taskStateMachine);
}
checkErrors(taskStateMachine);
List<Page> actualPages = getPages(pageBuffer);
MaterializedResult expected = MaterializedResult.resultBuilder(taskContext.getSession(), concat(probePages.getTypesWithoutHash(), buildPages.getTypesWithoutHash())).row("20", 123_000L, "20", 200L).row("20", 123_000L, "20", 200L).row("20", 123_000L, "20", 200L).row("20", 123_000L, "20", 200L).row("30", 123_000L, "30", 300L).row("31", 123_001L, "31", 301L).row("32", 123_002L, "32", 302L).row("33", 123_003L, "33", 303L).build();
assertEqualsIgnoreOrder(getProperColumns(joinOperator, concat(probePages.getTypes(), buildPages.getTypes()), probePages, actualPages).getMaterializedRows(), expected.getMaterializedRows());
} finally {
joinOperatorFactory.noMoreOperators();
}
}
use of io.trino.operator.Driver in project trino by trinodb.
the class JoinTestUtils method instantiateBuildDrivers.
public static void instantiateBuildDrivers(BuildSideSetup buildSideSetup, TaskContext taskContext) {
PipelineContext buildPipeline = taskContext.addPipelineContext(1, true, true, false);
List<Driver> buildDrivers = new ArrayList<>();
List<HashBuilderOperator> buildOperators = new ArrayList<>();
for (int i = 0; i < buildSideSetup.getPartitionCount(); i++) {
DriverContext buildDriverContext = buildPipeline.addDriverContext();
HashBuilderOperator buildOperator = buildSideSetup.getBuildOperatorFactory().createOperator(buildDriverContext);
Driver driver = Driver.createDriver(buildDriverContext, buildSideSetup.getBuildSideSourceOperatorFactory().createOperator(buildDriverContext), buildOperator);
buildDrivers.add(driver);
buildOperators.add(buildOperator);
}
buildSideSetup.setDriversAndOperators(buildDrivers, buildOperators);
}
Aggregations