use of io.trino.sql.planner.NodePartitioningManager in project trino by trinodb.
the class TestLocalExchange method setUp.
@BeforeMethod
public void setUp() {
NodeScheduler nodeScheduler = new NodeScheduler(new UniformNodeSelectorFactory(new InMemoryNodeManager(), new NodeSchedulerConfig().setIncludeCoordinator(true), new NodeTaskMap(new FinalizerService())));
nodePartitioningManager = new NodePartitioningManager(nodeScheduler, new BlockTypeOperators(new TypeOperators()));
}
use of io.trino.sql.planner.NodePartitioningManager in project trino by trinodb.
the class TestHashJoinOperator method testOuterJoinWithNullOnBothSidesAndFilterFunction.
@Test(dataProvider = "hashJoinTestValues")
public void testOuterJoinWithNullOnBothSidesAndFilterFunction(boolean parallelBuild, boolean probeHashEnabled, boolean buildHashEnabled) {
TaskContext taskContext = createTaskContext();
InternalJoinFilterFunction filterFunction = new TestInternalJoinFilterFunction(((leftPosition, leftPage, rightPosition, rightPage) -> ImmutableSet.of("a", "c").contains(VARCHAR.getSlice(rightPage.getBlock(0), rightPosition).toStringAscii())));
// build factory
RowPagesBuilder buildPages = rowPagesBuilder(buildHashEnabled, Ints.asList(0), ImmutableList.of(VARCHAR)).row("a").row((String) null).row((String) null).row("a").row("b");
BuildSideSetup buildSideSetup = setupBuildSide(nodePartitioningManager, parallelBuild, taskContext, buildPages, Optional.of(filterFunction), false, SINGLE_STREAM_SPILLER_FACTORY);
JoinBridgeManager<PartitionedLookupSourceFactory> lookupSourceFactory = buildSideSetup.getLookupSourceFactoryManager();
// probe factory
List<Type> probeTypes = ImmutableList.of(VARCHAR);
RowPagesBuilder probePages = rowPagesBuilder(probeHashEnabled, Ints.asList(0), probeTypes);
List<Page> probeInput = probePages.row("a").row("b").row((String) null).row("c").build();
OperatorFactory joinOperatorFactory = probeOuterJoinOperatorFactory(lookupSourceFactory, probePages, true);
// build drivers and operators
instantiateBuildDrivers(buildSideSetup, taskContext);
buildLookupSource(executor, buildSideSetup);
// expected
MaterializedResult expected = MaterializedResult.resultBuilder(taskContext.getSession(), concat(probeTypes, buildPages.getTypesWithoutHash())).row("a", "a").row("a", "a").row("b", null).row(null, null).row("c", null).build();
assertOperatorEquals(joinOperatorFactory, taskContext.addPipelineContext(0, true, true, false).addDriverContext(), probeInput, expected, true, getHashChannels(probePages, buildPages));
}
use of io.trino.sql.planner.NodePartitioningManager in project trino by trinodb.
the class TestHashJoinOperator method testUnwrapsLazyBlocks.
@Test
public void testUnwrapsLazyBlocks() {
TaskContext taskContext = createTaskContext();
DriverContext driverContext = taskContext.addPipelineContext(0, true, true, false).addDriverContext();
InternalJoinFilterFunction filterFunction = new TestInternalJoinFilterFunction(((leftPosition, leftPage, rightPosition, rightPage) -> {
// force loading of probe block
rightPage.getBlock(1).getLoadedBlock();
return true;
}));
RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(BIGINT)).addSequencePage(1, 0);
BuildSideSetup buildSideSetup = setupBuildSide(nodePartitioningManager, true, taskContext, buildPages, Optional.of(filterFunction), false, SINGLE_STREAM_SPILLER_FACTORY);
JoinBridgeManager<PartitionedLookupSourceFactory> lookupSourceFactory = buildSideSetup.getLookupSourceFactoryManager();
RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(BIGINT, BIGINT));
List<Page> probeInput = probePages.addSequencePage(1, 0, 0).build();
probeInput = probeInput.stream().map(page -> new Page(page.getBlock(0), new LazyBlock(1, () -> page.getBlock(1)))).collect(toImmutableList());
OperatorFactory joinOperatorFactory = operatorFactories.innerJoin(0, new PlanNodeId("test"), lookupSourceFactory, false, false, true, probePages.getTypes(), Ints.asList(0), getHashChannelAsInt(probePages), Optional.empty(), OptionalInt.of(1), PARTITIONING_SPILLER_FACTORY, TYPE_OPERATOR_FACTORY);
instantiateBuildDrivers(buildSideSetup, taskContext);
buildLookupSource(executor, buildSideSetup);
Operator operator = joinOperatorFactory.createOperator(driverContext);
assertTrue(operator.needsInput());
operator.addInput(probeInput.get(0));
operator.finish();
Page output = operator.getOutput();
assertFalse(output.getBlock(1) instanceof LazyBlock);
}
use of io.trino.sql.planner.NodePartitioningManager in project trino by trinodb.
the class TestHashJoinOperator method setUp.
@BeforeMethod
public void setUp() {
// Before/AfterMethod is chosen here because the executor needs to be shutdown
// after every single test case to terminate outstanding threads, if any.
// The line below is the same as newCachedThreadPool(daemonThreadsNamed(...)) except RejectionExecutionHandler.
// RejectionExecutionHandler is set to DiscardPolicy (instead of the default AbortPolicy) here.
// Otherwise, a large number of RejectedExecutionException will flood logging, resulting in Travis failure.
executor = new ThreadPoolExecutor(0, Integer.MAX_VALUE, 60L, SECONDS, new SynchronousQueue<>(), daemonThreadsNamed("test-executor-%s"), new ThreadPoolExecutor.DiscardPolicy());
scheduledExecutor = newScheduledThreadPool(2, daemonThreadsNamed(getClass().getSimpleName() + "-scheduledExecutor-%s"));
NodeScheduler nodeScheduler = new NodeScheduler(new UniformNodeSelectorFactory(new InMemoryNodeManager(), new NodeSchedulerConfig().setIncludeCoordinator(true), new NodeTaskMap(new FinalizerService())));
nodePartitioningManager = new NodePartitioningManager(nodeScheduler, new BlockTypeOperators(new TypeOperators()));
}
use of io.trino.sql.planner.NodePartitioningManager in project trino by trinodb.
the class TestHashJoinOperator method testYield.
@Test
public void testYield() {
// create a filter function that yields for every probe match
// verify we will yield #match times totally
TaskContext taskContext = createTaskContext();
DriverContext driverContext = taskContext.addPipelineContext(0, true, true, false).addDriverContext();
// force a yield for every match
AtomicInteger filterFunctionCalls = new AtomicInteger();
InternalJoinFilterFunction filterFunction = new TestInternalJoinFilterFunction(((leftPosition, leftPage, rightPosition, rightPage) -> {
filterFunctionCalls.incrementAndGet();
driverContext.getYieldSignal().forceYieldForTesting();
return true;
}));
// build with 40 entries
int entries = 40;
RowPagesBuilder buildPages = rowPagesBuilder(true, Ints.asList(0), ImmutableList.of(BIGINT)).addSequencePage(entries, 42);
BuildSideSetup buildSideSetup = setupBuildSide(nodePartitioningManager, true, taskContext, buildPages, Optional.of(filterFunction), false, SINGLE_STREAM_SPILLER_FACTORY);
JoinBridgeManager<PartitionedLookupSourceFactory> lookupSourceFactory = buildSideSetup.getLookupSourceFactoryManager();
// probe matching the above 40 entries
RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(BIGINT));
List<Page> probeInput = probePages.addSequencePage(100, 0).build();
OperatorFactory joinOperatorFactory = operatorFactories.innerJoin(0, new PlanNodeId("test"), lookupSourceFactory, false, false, true, probePages.getTypes(), Ints.asList(0), getHashChannelAsInt(probePages), Optional.empty(), OptionalInt.of(1), PARTITIONING_SPILLER_FACTORY, TYPE_OPERATOR_FACTORY);
instantiateBuildDrivers(buildSideSetup, taskContext);
buildLookupSource(executor, buildSideSetup);
Operator operator = joinOperatorFactory.createOperator(driverContext);
assertTrue(operator.needsInput());
operator.addInput(probeInput.get(0));
operator.finish();
// we will yield 40 times due to filterFunction
for (int i = 0; i < entries; i++) {
driverContext.getYieldSignal().setWithDelay(5 * SECONDS.toNanos(1), driverContext.getYieldExecutor());
filterFunctionCalls.set(0);
assertNull(operator.getOutput());
assertEquals(filterFunctionCalls.get(), 1, "Expected join to stop processing (yield) after calling filter function once");
driverContext.getYieldSignal().reset();
}
// delayed yield is not going to prevent operator from producing a page now (yield won't be forced because filter function won't be called anymore)
driverContext.getYieldSignal().setWithDelay(5 * SECONDS.toNanos(1), driverContext.getYieldExecutor());
// expect output page to be produced within few calls to getOutput(), e.g. to facilitate spill
Page output = null;
for (int i = 0; output == null && i < 5; i++) {
output = operator.getOutput();
}
assertNotNull(output);
driverContext.getYieldSignal().reset();
// make sure we have all 4 entries
assertEquals(output.getPositionCount(), entries);
}
Aggregations