use of io.prestosql.RowPagesBuilder in project hetu-core by openlookeng.
the class TestRowNumberOperator method testRowNumberPartitioned.
@Test(dataProvider = "hashEnabledValues")
public void testRowNumberPartitioned(boolean hashEnabled) {
DriverContext driverContext = getDriverContext();
RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, Ints.asList(0), BIGINT, DOUBLE);
List<Page> input = rowPagesBuilder.row(1L, 0.3).row(2L, 0.2).row(3L, 0.1).row(3L, 0.19).pageBreak().row(1L, 0.4).pageBreak().row(1L, 0.5).row(1L, 0.6).row(2L, 0.7).row(2L, 0.8).row(2L, 0.9).build();
RowNumberOperator.RowNumberOperatorFactory operatorFactory = new RowNumberOperator.RowNumberOperatorFactory(0, new PlanNodeId("test"), ImmutableList.of(BIGINT, DOUBLE), Ints.asList(1, 0), Ints.asList(0), ImmutableList.of(BIGINT), Optional.of(10), rowPagesBuilder.getHashChannel(), 10, joinCompiler);
MaterializedResult expectedPartition1 = resultBuilder(driverContext.getSession(), DOUBLE, BIGINT).row(0.3, 1L).row(0.4, 1L).row(0.5, 1L).row(0.6, 1L).build();
MaterializedResult expectedPartition2 = resultBuilder(driverContext.getSession(), DOUBLE, BIGINT).row(0.2, 2L).row(0.7, 2L).row(0.8, 2L).row(0.9, 2L).build();
MaterializedResult expectedPartition3 = resultBuilder(driverContext.getSession(), DOUBLE, BIGINT).row(0.1, 3L).row(0.19, 3L).build();
List<Page> pages = toPages(operatorFactory, driverContext, input);
Block rowNumberColumn = getRowNumberColumn(pages);
assertEquals(rowNumberColumn.getPositionCount(), 10);
pages = stripRowNumberColumn(pages);
MaterializedResult actual = toMaterializedResult(driverContext.getSession(), ImmutableList.of(DOUBLE, BIGINT), pages);
ImmutableSet<?> actualSet = ImmutableSet.copyOf(actual.getMaterializedRows());
ImmutableSet<?> expectedPartition1Set = ImmutableSet.copyOf(expectedPartition1.getMaterializedRows());
ImmutableSet<?> expectedPartition2Set = ImmutableSet.copyOf(expectedPartition2.getMaterializedRows());
ImmutableSet<?> expectedPartition3Set = ImmutableSet.copyOf(expectedPartition3.getMaterializedRows());
assertEquals(Sets.intersection(expectedPartition1Set, actualSet).size(), 4);
assertEquals(Sets.intersection(expectedPartition2Set, actualSet).size(), 4);
assertEquals(Sets.intersection(expectedPartition3Set, actualSet).size(), 2);
}
use of io.prestosql.RowPagesBuilder in project hetu-core by openlookeng.
the class TestTopNRankingNumberOperator method testPartitionedSnapshot.
@Test
public void testPartitionedSnapshot() {
RowPagesBuilder rowPagesBuilder = rowPagesBuilder(true, Ints.asList(0), BIGINT, DOUBLE);
List<Page> input = rowPagesBuilder.row(1L, 0.1).row(2L, 0.1).row(3L, 0.1).row(3L, 0.1).pageBreak().row(1L, 0.2).pageBreak().row(1L, 0.2).row(1L, 0.2).row(2L, 0.3).row(2L, 0.4).pageBreak().row(2L, 0.3).build();
// row_number() over(partition by 0 order by 1) top 3
TopNRankingNumberOperatorFactory operatorFactory = new TopNRankingNumberOperatorFactory(0, new PlanNodeId("test"), ImmutableList.of(BIGINT, DOUBLE), Ints.asList(1, 0), Ints.asList(0), ImmutableList.of(BIGINT), Ints.asList(1), ImmutableList.of(SortOrder.ASC_NULLS_LAST), 3, false, Optional.empty(), 10, joinCompiler, Optional.of(RankingFunction.ROW_NUMBER));
MaterializedResult rowNumberExpected = resultBuilder(driverContext.getSession(), DOUBLE, BIGINT, BIGINT).row(0.1, 1L, 1L).row(0.2, 1L, 2L).row(0.2, 1L, 3L).row(0.1, 2L, 1L).row(0.3, 2L, 2L).row(0.3, 2L, 3L).row(0.1, 3L, 1L).row(0.1, 3L, 2L).build();
assertOperatorEqualsWithStateComparison(operatorFactory, driverContext, input, rowNumberExpected, createExpectedMappingRestore());
// rank() over(partition by 0 order by 1) top 3
operatorFactory = new TopNRankingNumberOperatorFactory(1, new PlanNodeId("test"), ImmutableList.of(BIGINT, DOUBLE), Ints.asList(1, 0), Ints.asList(0), ImmutableList.of(BIGINT), Ints.asList(1), ImmutableList.of(SortOrder.ASC_NULLS_LAST), 3, false, Optional.empty(), 10, joinCompiler, Optional.of(RankingFunction.RANK));
MaterializedResult rankNumberExpected = resultBuilder(driverContext.getSession(), DOUBLE, BIGINT, BIGINT).row(0.1, 1L, 1L).row(0.2, 1L, 2L).row(0.2, 1L, 2L).row(0.2, 1L, 2L).row(0.1, 2L, 1L).row(0.3, 2L, 2L).row(0.3, 2L, 2L).row(0.1, 3L, 1L).row(0.1, 3L, 1L).build();
assertOperatorEqualsWithStateComparison(operatorFactory, driverContext, input, rankNumberExpected, createExpectedMappingRestore());
// dense_rank() over(partition by 0 order by 1) top 3
operatorFactory = new TopNRankingNumberOperatorFactory(2, new PlanNodeId("test"), ImmutableList.of(BIGINT, DOUBLE), Ints.asList(1, 0), Ints.asList(0), ImmutableList.of(BIGINT), Ints.asList(1), ImmutableList.of(SortOrder.ASC_NULLS_LAST), 3, false, Optional.empty(), 10, joinCompiler, Optional.of(RankingFunction.DENSE_RANK));
MaterializedResult denseRankNumberExpected = resultBuilder(driverContext.getSession(), DOUBLE, BIGINT, BIGINT).row(0.1, 1L, 1L).row(0.2, 1L, 2L).row(0.2, 1L, 2L).row(0.2, 1L, 2L).row(0.1, 2L, 1L).row(0.3, 2L, 2L).row(0.3, 2L, 2L).row(0.4, 2L, 3L).row(0.1, 3L, 1L).row(0.1, 3L, 1L).build();
assertOperatorEqualsWithStateComparison(operatorFactory, driverContext, input, denseRankNumberExpected, createExpectedMappingRestoreDenseRank());
}
use of io.prestosql.RowPagesBuilder in project hetu-core by openlookeng.
the class TestGenericPartitioningSpiller method testFileSpiller.
@Test
public void testFileSpiller() throws Exception {
try (PartitioningSpiller spiller = factory.create(TYPES, new FourFixedPartitionsPartitionFunction(0), mockSpillContext(), mockMemoryContext(scheduledExecutor))) {
RowPagesBuilder builder = RowPagesBuilder.rowPagesBuilder(TYPES);
builder.addSequencePage(10, SECOND_PARTITION_START, 5, 10, 15);
builder.addSequencePage(10, FIRST_PARTITION_START, -5, 0, 5);
List<Page> firstSpill = builder.build();
builder = RowPagesBuilder.rowPagesBuilder(TYPES);
builder.addSequencePage(10, THIRD_PARTITION_START, 15, 20, 25);
builder.addSequencePage(10, FOURTH_PARTITION_START, 25, 30, 35);
List<Page> secondSpill = builder.build();
IntPredicate spillPartitionMask = ImmutableSet.of(1, 2)::contains;
PartitioningSpillResult result = spiller.partitionAndSpill(firstSpill.get(0), spillPartitionMask);
result.getSpillingFuture().get();
assertEquals(result.getRetained().getPositionCount(), 0);
result = spiller.partitionAndSpill(firstSpill.get(1), spillPartitionMask);
result.getSpillingFuture().get();
assertEquals(result.getRetained().getPositionCount(), 10);
result = spiller.partitionAndSpill(secondSpill.get(0), spillPartitionMask);
result.getSpillingFuture().get();
assertEquals(result.getRetained().getPositionCount(), 0);
result = spiller.partitionAndSpill(secondSpill.get(1), spillPartitionMask);
result.getSpillingFuture().get();
assertEquals(result.getRetained().getPositionCount(), 10);
builder = RowPagesBuilder.rowPagesBuilder(TYPES);
builder.addSequencePage(10, SECOND_PARTITION_START, 5, 10, 15);
List<Page> secondPartition = builder.build();
builder = RowPagesBuilder.rowPagesBuilder(TYPES);
builder.addSequencePage(10, THIRD_PARTITION_START, 15, 20, 25);
List<Page> thirdPartition = builder.build();
assertSpilledPages(TYPES, spiller, ImmutableList.of(ImmutableList.of(), secondPartition, thirdPartition, ImmutableList.of()));
}
}
use of io.prestosql.RowPagesBuilder in project hetu-core by openlookeng.
the class TestHashAggregationOperator method testMergeWithMemorySpill.
@Test
public void testMergeWithMemorySpill() {
RowPagesBuilder rowPagesBuilder = rowPagesBuilder(BIGINT);
int smallPagesSpillThresholdSize = 150000;
List<Page> input = rowPagesBuilder.addSequencePage(smallPagesSpillThresholdSize, 0).addSequencePage(10, smallPagesSpillThresholdSize).build();
HashAggregationOperatorFactory operatorFactory = new HashAggregationOperatorFactory(0, new PlanNodeId("test"), ImmutableList.of(BIGINT), ImmutableList.of(0), ImmutableList.of(), Step.SINGLE, false, ImmutableList.of(LONG_SUM.bind(ImmutableList.of(0), Optional.empty())), rowPagesBuilder.getHashChannel(), Optional.empty(), 1, Optional.of(new DataSize(16, MEGABYTE)), true, new DataSize(smallPagesSpillThresholdSize, Unit.BYTE), succinctBytes(Integer.MAX_VALUE), spillerFactory, joinCompiler, false);
DriverContext driverContext = createDriverContext(smallPagesSpillThresholdSize);
MaterializedResult.Builder resultBuilder = resultBuilder(driverContext.getSession(), BIGINT, BIGINT);
for (int i = 0; i < smallPagesSpillThresholdSize + 10; ++i) {
resultBuilder.row((long) i, (long) i);
}
assertOperatorEqualsIgnoreOrder(operatorFactory, driverContext, input, resultBuilder.build());
}
use of io.prestosql.RowPagesBuilder in project hetu-core by openlookeng.
the class TestHashAggregationOperator method testHashBuilderResizeLimit.
@Test(dataProvider = "hashEnabled", expectedExceptions = ExceededMemoryLimitException.class, expectedExceptionsMessageRegExp = "Query exceeded per-node user memory limit of 3MB.*")
public void testHashBuilderResizeLimit(boolean hashEnabled) {
BlockBuilder builder = VARCHAR.createBlockBuilder(null, 1, MAX_BLOCK_SIZE_IN_BYTES);
// this must be larger than MAX_BLOCK_SIZE_IN_BYTES, 64K
VARCHAR.writeSlice(builder, Slices.allocate(5_000_000));
builder.build();
List<Integer> hashChannels = Ints.asList(0);
RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, hashChannels, VARCHAR);
List<Page> input = rowPagesBuilder.addSequencePage(10, 100).addBlocksPage(builder.build()).addSequencePage(10, 100).build();
DriverContext driverContext = createTaskContext(executor, scheduledExecutor, TEST_SESSION, new DataSize(3, MEGABYTE)).addPipelineContext(0, true, true, false).addDriverContext();
HashAggregationOperatorFactory operatorFactory = new HashAggregationOperatorFactory(0, new PlanNodeId("test"), ImmutableList.of(VARCHAR), hashChannels, ImmutableList.of(), Step.SINGLE, ImmutableList.of(COUNT.bind(ImmutableList.of(0), Optional.empty())), rowPagesBuilder.getHashChannel(), Optional.empty(), 100_000, Optional.of(new DataSize(16, MEGABYTE)), joinCompiler, false);
toPages(operatorFactory, driverContext, input);
}
Aggregations