use of com.facebook.presto.operator.HashAggregationOperator.HashAggregationOperatorFactory in project presto by prestodb.
the class TestHashAggregationOperator method testMemoryLimit.
@Test(dataProvider = "hashEnabled", expectedExceptions = ExceededMemoryLimitException.class, expectedExceptionsMessageRegExp = "Query exceeded local memory limit of 10B")
public void testMemoryLimit(boolean hashEnabled) {
MetadataManager metadata = MetadataManager.createTestMetadataManager();
InternalAggregationFunction maxVarcharColumn = metadata.getFunctionRegistry().getAggregateFunctionImplementation(new Signature("max", AGGREGATE, parseTypeSignature(StandardTypes.VARCHAR), parseTypeSignature(StandardTypes.VARCHAR)));
List<Integer> hashChannels = Ints.asList(1);
RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, hashChannels, VARCHAR, BIGINT, VARCHAR, BIGINT);
List<Page> input = rowPagesBuilder.addSequencePage(10, 100, 0, 100, 0).addSequencePage(10, 100, 0, 200, 0).addSequencePage(10, 100, 0, 300, 0).build();
DriverContext driverContext = createTaskContext(executor, TEST_SESSION, new DataSize(10, Unit.BYTE)).addPipelineContext(0, true, true).addDriverContext();
HashAggregationOperatorFactory operatorFactory = new HashAggregationOperatorFactory(0, new PlanNodeId("test"), ImmutableList.of(BIGINT), hashChannels, ImmutableList.of(), Step.SINGLE, ImmutableList.of(COUNT.bind(ImmutableList.of(0), Optional.empty()), LONG_SUM.bind(ImmutableList.of(3), Optional.empty()), LONG_AVERAGE.bind(ImmutableList.of(3), Optional.empty()), maxVarcharColumn.bind(ImmutableList.of(2), Optional.empty())), rowPagesBuilder.getHashChannel(), Optional.empty(), 100_000, new DataSize(16, MEGABYTE), joinCompiler);
toPages(operatorFactory, driverContext, input);
}
use of com.facebook.presto.operator.HashAggregationOperator.HashAggregationOperatorFactory in project presto by prestodb.
the class TestHashAggregationOperator method testMultiplePartialFlushes.
@Test(dataProvider = "hashEnabledAndMemoryLimitBeforeSpillValues")
public void testMultiplePartialFlushes(boolean hashEnabled, long memoryLimitBeforeSpill, long memoryLimitForMergeWithMemory) throws Exception {
List<Integer> hashChannels = Ints.asList(0);
RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, hashChannels, BIGINT);
List<Page> input = rowPagesBuilder.addSequencePage(500, 0).addSequencePage(500, 500).addSequencePage(500, 1000).addSequencePage(500, 1500).build();
HashAggregationOperatorFactory operatorFactory = new HashAggregationOperatorFactory(0, new PlanNodeId("test"), ImmutableList.of(BIGINT), hashChannels, ImmutableList.of(), Step.PARTIAL, ImmutableList.of(LONG_SUM.bind(ImmutableList.of(0), Optional.empty())), rowPagesBuilder.getHashChannel(), Optional.empty(), 100_000, new DataSize(1, Unit.KILOBYTE), memoryLimitBeforeSpill > 0, succinctBytes(memoryLimitBeforeSpill), succinctBytes(memoryLimitForMergeWithMemory), spillerFactory, joinCompiler);
DriverContext driverContext = createTaskContext(executor, TEST_SESSION, new DataSize(4, Unit.KILOBYTE)).addPipelineContext(0, true, true).addDriverContext();
try (Operator operator = operatorFactory.createOperator(driverContext)) {
List<Page> expectedPages = rowPagesBuilder(BIGINT, BIGINT).addSequencePage(2000, 0, 0).build();
MaterializedResult expected = resultBuilder(driverContext.getSession(), BIGINT, BIGINT).pages(expectedPages).build();
Iterator<Page> inputIterator = input.iterator();
// Fill up the aggregation
while (operator.needsInput() && inputIterator.hasNext()) {
operator.addInput(inputIterator.next());
}
// Drain the output (partial flush)
List<Page> outputPages = new ArrayList<>();
while (true) {
Page output = operator.getOutput();
if (output == null) {
break;
}
outputPages.add(output);
}
// There should be some pages that were drained
assertTrue(!outputPages.isEmpty());
// The operator need input again since this was a partial flush
assertTrue(operator.needsInput());
// Now, drive the operator to completion
outputPages.addAll(toPages(operator, inputIterator));
MaterializedResult actual;
if (hashEnabled) {
// Drop the hashChannel for all pages
List<Page> actualPages = dropChannel(outputPages, ImmutableList.of(1));
List<Type> expectedTypes = without(operator.getTypes(), ImmutableList.of(1));
actual = toMaterializedResult(operator.getOperatorContext().getSession(), expectedTypes, actualPages);
} else {
actual = toMaterializedResult(operator.getOperatorContext().getSession(), operator.getTypes(), outputPages);
}
assertEquals(actual.getTypes(), expected.getTypes());
assertEqualsIgnoreOrder(actual.getMaterializedRows(), expected.getMaterializedRows());
}
}
use of com.facebook.presto.operator.HashAggregationOperator.HashAggregationOperatorFactory in project presto by prestodb.
the class TestHashAggregationOperator method testMergeWithMemorySpill.
@Test
public void testMergeWithMemorySpill() {
List<Integer> hashChannels = Ints.asList(0);
RowPagesBuilder rowPagesBuilder = rowPagesBuilder(BIGINT);
int smallPagesSpillThresholdSize = 150000;
List<Page> input = rowPagesBuilder.addSequencePage(smallPagesSpillThresholdSize, 0).addSequencePage(10, smallPagesSpillThresholdSize).build();
HashAggregationOperatorFactory operatorFactory = new HashAggregationOperatorFactory(0, new PlanNodeId("test"), ImmutableList.of(BIGINT), hashChannels, ImmutableList.of(), Step.SINGLE, ImmutableList.of(LONG_SUM.bind(ImmutableList.of(0), Optional.empty())), rowPagesBuilder.getHashChannel(), Optional.empty(), 1, new DataSize(16, MEGABYTE), true, new DataSize(smallPagesSpillThresholdSize, Unit.BYTE), succinctBytes(Integer.MAX_VALUE), spillerFactory, joinCompiler);
DriverContext driverContext = createTaskContext(executor, TEST_SESSION, new DataSize(1, Unit.KILOBYTE)).addPipelineContext(0, true, true).addDriverContext();
MaterializedResult.Builder resultBuilder = resultBuilder(driverContext.getSession(), BIGINT);
for (int i = 0; i < smallPagesSpillThresholdSize + 10; ++i) {
resultBuilder.row((long) i, (long) i);
}
assertOperatorEqualsIgnoreOrder(operatorFactory, driverContext, input, resultBuilder.build(), false, Optional.of(hashChannels.size()));
}
use of com.facebook.presto.operator.HashAggregationOperator.HashAggregationOperatorFactory in project presto by prestodb.
the class TestHashAggregationOperator method testHashBuilderResize.
@Test(dataProvider = "hashEnabledAndMemoryLimitBeforeSpillValues")
public void testHashBuilderResize(boolean hashEnabled, long memoryLimitBeforeSpill, long memoryLimitForMergeWithMemory) {
BlockBuilder builder = VARCHAR.createBlockBuilder(new BlockBuilderStatus(), 1, DEFAULT_MAX_BLOCK_SIZE_IN_BYTES);
// this must be larger than DEFAULT_MAX_BLOCK_SIZE, 64K
VARCHAR.writeSlice(builder, Slices.allocate(200_000));
builder.build();
List<Integer> hashChannels = Ints.asList(0);
RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, hashChannels, VARCHAR);
List<Page> input = rowPagesBuilder.addSequencePage(10, 100).addBlocksPage(builder.build()).addSequencePage(10, 100).build();
DriverContext driverContext = createTaskContext(executor, TEST_SESSION, new DataSize(10, MEGABYTE)).addPipelineContext(0, true, true).addDriverContext();
HashAggregationOperatorFactory operatorFactory = new HashAggregationOperatorFactory(0, new PlanNodeId("test"), ImmutableList.of(VARCHAR), hashChannels, ImmutableList.of(), Step.SINGLE, ImmutableList.of(COUNT.bind(ImmutableList.of(0), Optional.empty())), rowPagesBuilder.getHashChannel(), Optional.empty(), 100_000, new DataSize(16, MEGABYTE), memoryLimitBeforeSpill > 0, succinctBytes(memoryLimitBeforeSpill), succinctBytes(memoryLimitForMergeWithMemory), spillerFactory, joinCompiler);
toPages(operatorFactory, driverContext, input);
}
use of com.facebook.presto.operator.HashAggregationOperator.HashAggregationOperatorFactory in project presto by prestodb.
the class TestHashAggregationOperator method testHashAggregation.
@Test(dataProvider = "hashEnabledAndMemoryLimitBeforeSpillValues")
public void testHashAggregation(boolean hashEnabled, long memoryLimitBeforeSpill, long memoryLimitForMergeWithMemory) throws Exception {
MetadataManager metadata = MetadataManager.createTestMetadataManager();
InternalAggregationFunction countVarcharColumn = metadata.getFunctionRegistry().getAggregateFunctionImplementation(new Signature("count", AGGREGATE, parseTypeSignature(StandardTypes.BIGINT), parseTypeSignature(StandardTypes.VARCHAR)));
InternalAggregationFunction countBooleanColumn = metadata.getFunctionRegistry().getAggregateFunctionImplementation(new Signature("count", AGGREGATE, parseTypeSignature(StandardTypes.BIGINT), parseTypeSignature(StandardTypes.BOOLEAN)));
InternalAggregationFunction maxVarcharColumn = metadata.getFunctionRegistry().getAggregateFunctionImplementation(new Signature("max", AGGREGATE, parseTypeSignature(StandardTypes.VARCHAR), parseTypeSignature(StandardTypes.VARCHAR)));
List<Integer> hashChannels = Ints.asList(1);
RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, hashChannels, VARCHAR, VARCHAR, VARCHAR, BIGINT, BOOLEAN);
List<Page> input = rowPagesBuilder.addSequencePage(10, 100, 0, 100, 0, 500).addSequencePage(10, 100, 0, 200, 0, 500).addSequencePage(10, 100, 0, 300, 0, 500).build();
HashAggregationOperatorFactory operatorFactory = new HashAggregationOperatorFactory(0, new PlanNodeId("test"), ImmutableList.of(VARCHAR), hashChannels, ImmutableList.of(), Step.SINGLE, ImmutableList.of(COUNT.bind(ImmutableList.of(0), Optional.empty()), LONG_SUM.bind(ImmutableList.of(3), Optional.empty()), LONG_AVERAGE.bind(ImmutableList.of(3), Optional.empty()), maxVarcharColumn.bind(ImmutableList.of(2), Optional.empty()), countVarcharColumn.bind(ImmutableList.of(0), Optional.empty()), countBooleanColumn.bind(ImmutableList.of(4), Optional.empty())), rowPagesBuilder.getHashChannel(), Optional.empty(), 100_000, new DataSize(16, MEGABYTE), memoryLimitBeforeSpill > 0, succinctBytes(memoryLimitBeforeSpill), succinctBytes(memoryLimitForMergeWithMemory), spillerFactory, joinCompiler);
MaterializedResult expected = resultBuilder(driverContext.getSession(), VARCHAR, BIGINT, BIGINT, DOUBLE, VARCHAR, BIGINT, BIGINT).row("0", 3L, 0L, 0.0, "300", 3L, 3L).row("1", 3L, 3L, 1.0, "301", 3L, 3L).row("2", 3L, 6L, 2.0, "302", 3L, 3L).row("3", 3L, 9L, 3.0, "303", 3L, 3L).row("4", 3L, 12L, 4.0, "304", 3L, 3L).row("5", 3L, 15L, 5.0, "305", 3L, 3L).row("6", 3L, 18L, 6.0, "306", 3L, 3L).row("7", 3L, 21L, 7.0, "307", 3L, 3L).row("8", 3L, 24L, 8.0, "308", 3L, 3L).row("9", 3L, 27L, 9.0, "309", 3L, 3L).build();
assertOperatorEqualsIgnoreOrder(operatorFactory, driverContext, input, expected, hashEnabled, Optional.of(hashChannels.size()));
}
Aggregations