use of io.trino.sql.planner.plan.PlanNodeId in project trino by trinodb.
the class TestHashAggregationOperator method testHashAggregationMemoryReservation.
@Test(dataProvider = "hashEnabledAndMemoryLimitForMergeValues")
public void testHashAggregationMemoryReservation(boolean hashEnabled, boolean spillEnabled, boolean revokeMemoryWhenAddingPages, long memoryLimitForMerge, long memoryLimitForMergeWithMemory) {
TestingAggregationFunction arrayAggColumn = FUNCTION_RESOLUTION.getAggregateFunction(QualifiedName.of("array_agg"), fromTypes(BIGINT));
List<Integer> hashChannels = Ints.asList(1);
RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, hashChannels, BIGINT, BIGINT);
List<Page> input = rowPagesBuilder.addSequencePage(10, 100, 0).addSequencePage(10, 200, 0).addSequencePage(10, 300, 0).build();
DriverContext driverContext = createTaskContext(executor, scheduledExecutor, TEST_SESSION, DataSize.of(11, Unit.MEGABYTE)).addPipelineContext(0, true, true, false).addDriverContext();
HashAggregationOperatorFactory operatorFactory = new HashAggregationOperatorFactory(0, new PlanNodeId("test"), ImmutableList.of(BIGINT), hashChannels, ImmutableList.of(), SINGLE, true, ImmutableList.of(arrayAggColumn.createAggregatorFactory(SINGLE, ImmutableList.of(0), OptionalInt.empty())), rowPagesBuilder.getHashChannel(), Optional.empty(), 100_000, Optional.of(DataSize.of(16, MEGABYTE)), spillEnabled, succinctBytes(memoryLimitForMerge), succinctBytes(memoryLimitForMergeWithMemory), spillerFactory, joinCompiler, blockTypeOperators, Optional.empty());
Operator operator = operatorFactory.createOperator(driverContext);
toPages(operator, input.iterator(), revokeMemoryWhenAddingPages);
// TODO (https://github.com/trinodb/trino/issues/10596): it should be 0, since operator is finished
assertEquals(getOnlyElement(operator.getOperatorContext().getNestedOperatorStats()).getUserMemoryReservation().toBytes(), spillEnabled && revokeMemoryWhenAddingPages ? 5_322_192 : 0);
assertEquals(getOnlyElement(operator.getOperatorContext().getNestedOperatorStats()).getRevocableMemoryReservation().toBytes(), 0);
}
use of io.trino.sql.planner.plan.PlanNodeId in project trino by trinodb.
the class TestHashAggregationOperator method testHashAggregation.
@Test(dataProvider = "hashEnabledAndMemoryLimitForMergeValues")
public void testHashAggregation(boolean hashEnabled, boolean spillEnabled, boolean revokeMemoryWhenAddingPages, long memoryLimitForMerge, long memoryLimitForMergeWithMemory) {
// make operator produce multiple pages during finish phase
int numberOfRows = 40_000;
TestingAggregationFunction countVarcharColumn = FUNCTION_RESOLUTION.getAggregateFunction(QualifiedName.of("count"), fromTypes(VARCHAR));
TestingAggregationFunction countBooleanColumn = FUNCTION_RESOLUTION.getAggregateFunction(QualifiedName.of("count"), fromTypes(BOOLEAN));
TestingAggregationFunction maxVarcharColumn = FUNCTION_RESOLUTION.getAggregateFunction(QualifiedName.of("max"), fromTypes(VARCHAR));
List<Integer> hashChannels = Ints.asList(1);
RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, hashChannels, VARCHAR, VARCHAR, VARCHAR, BIGINT, BOOLEAN);
List<Page> input = rowPagesBuilder.addSequencePage(numberOfRows, 100, 0, 100_000, 0, 500).addSequencePage(numberOfRows, 100, 0, 200_000, 0, 500).addSequencePage(numberOfRows, 100, 0, 300_000, 0, 500).build();
HashAggregationOperatorFactory operatorFactory = new HashAggregationOperatorFactory(0, new PlanNodeId("test"), ImmutableList.of(VARCHAR), hashChannels, ImmutableList.of(), SINGLE, false, ImmutableList.of(COUNT.createAggregatorFactory(SINGLE, ImmutableList.of(0), OptionalInt.empty()), LONG_SUM.createAggregatorFactory(SINGLE, ImmutableList.of(3), OptionalInt.empty()), LONG_AVERAGE.createAggregatorFactory(SINGLE, ImmutableList.of(3), OptionalInt.empty()), maxVarcharColumn.createAggregatorFactory(SINGLE, ImmutableList.of(2), OptionalInt.empty()), countVarcharColumn.createAggregatorFactory(SINGLE, ImmutableList.of(0), OptionalInt.empty()), countBooleanColumn.createAggregatorFactory(SINGLE, ImmutableList.of(4), OptionalInt.empty())), rowPagesBuilder.getHashChannel(), Optional.empty(), 100_000, Optional.of(DataSize.of(16, MEGABYTE)), spillEnabled, succinctBytes(memoryLimitForMerge), succinctBytes(memoryLimitForMergeWithMemory), spillerFactory, joinCompiler, blockTypeOperators, Optional.empty());
DriverContext driverContext = createDriverContext(memoryLimitForMerge);
MaterializedResult.Builder expectedBuilder = resultBuilder(driverContext.getSession(), VARCHAR, BIGINT, BIGINT, DOUBLE, VARCHAR, BIGINT, BIGINT);
for (int i = 0; i < numberOfRows; ++i) {
expectedBuilder.row(Integer.toString(i), 3L, 3L * i, (double) i, Integer.toString(300_000 + i), 3L, 3L);
}
MaterializedResult expected = expectedBuilder.build();
List<Page> pages = toPages(operatorFactory, driverContext, input, revokeMemoryWhenAddingPages);
assertGreaterThan(pages.size(), 1, "Expected more than one output page");
assertPagesEqualIgnoreOrder(driverContext, pages, expected, hashEnabled, Optional.of(hashChannels.size()));
assertTrue(spillEnabled == (spillerFactory.getSpillsCount() > 0), format("Spill state mismatch. Expected spill: %s, spill count: %s", spillEnabled, spillerFactory.getSpillsCount()));
}
use of io.trino.sql.planner.plan.PlanNodeId in project trino by trinodb.
the class TestHashAggregationOperator method testMultiSliceAggregationOutput.
@Test(dataProvider = "hashEnabled")
public void testMultiSliceAggregationOutput(boolean hashEnabled) {
// estimate the number of entries required to create 1.5 pages of results
// See InMemoryHashAggregationBuilder.buildTypes()
int fixedWidthSize = // Used by BigintGroupByHash, see BigintGroupByHash.TYPES_WITH_RAW_HASH
SIZE_OF_LONG + SIZE_OF_LONG + SIZE_OF_LONG + // Used by COUNT and LONG_AVERAGE aggregators;
SIZE_OF_DOUBLE;
int multiSlicePositionCount = (int) (1.5 * PageBuilderStatus.DEFAULT_MAX_PAGE_SIZE_IN_BYTES / fixedWidthSize);
List<Integer> hashChannels = Ints.asList(1);
RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, hashChannels, BIGINT, BIGINT);
List<Page> input = rowPagesBuilder.addSequencePage(multiSlicePositionCount, 0, 0).build();
HashAggregationOperatorFactory operatorFactory = new HashAggregationOperatorFactory(0, new PlanNodeId("test"), ImmutableList.of(BIGINT), hashChannels, ImmutableList.of(), SINGLE, ImmutableList.of(COUNT.createAggregatorFactory(SINGLE, ImmutableList.of(0), OptionalInt.empty()), LONG_AVERAGE.createAggregatorFactory(SINGLE, ImmutableList.of(1), OptionalInt.empty())), rowPagesBuilder.getHashChannel(), Optional.empty(), 100_000, Optional.of(DataSize.of(16, MEGABYTE)), joinCompiler, blockTypeOperators, Optional.empty());
assertEquals(toPages(operatorFactory, createDriverContext(), input).size(), 2);
}
use of io.trino.sql.planner.plan.PlanNodeId in project trino by trinodb.
the class TestHashAggregationOperator method testMemoryLimit.
@Test(dataProvider = "hashEnabled", expectedExceptions = ExceededMemoryLimitException.class, expectedExceptionsMessageRegExp = "Query exceeded per-node memory limit of 10B.*")
public void testMemoryLimit(boolean hashEnabled) {
TestingAggregationFunction maxVarcharColumn = FUNCTION_RESOLUTION.getAggregateFunction(QualifiedName.of("max"), fromTypes(VARCHAR));
List<Integer> hashChannels = Ints.asList(1);
RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, hashChannels, VARCHAR, BIGINT, VARCHAR, BIGINT);
List<Page> input = rowPagesBuilder.addSequencePage(10, 100, 0, 100, 0).addSequencePage(10, 100, 0, 200, 0).addSequencePage(10, 100, 0, 300, 0).build();
DriverContext driverContext = createTaskContext(executor, scheduledExecutor, TEST_SESSION, DataSize.ofBytes(10)).addPipelineContext(0, true, true, false).addDriverContext();
HashAggregationOperatorFactory operatorFactory = new HashAggregationOperatorFactory(0, new PlanNodeId("test"), ImmutableList.of(BIGINT), hashChannels, ImmutableList.of(), SINGLE, ImmutableList.of(COUNT.createAggregatorFactory(SINGLE, ImmutableList.of(0), OptionalInt.empty()), LONG_MIN.createAggregatorFactory(SINGLE, ImmutableList.of(3), OptionalInt.empty()), LONG_AVERAGE.createAggregatorFactory(SINGLE, ImmutableList.of(3), OptionalInt.empty()), maxVarcharColumn.createAggregatorFactory(SINGLE, ImmutableList.of(2), OptionalInt.empty())), rowPagesBuilder.getHashChannel(), Optional.empty(), 100_000, Optional.of(DataSize.of(16, MEGABYTE)), joinCompiler, blockTypeOperators, Optional.empty());
toPages(operatorFactory, driverContext, input);
}
use of io.trino.sql.planner.plan.PlanNodeId in project trino by trinodb.
the class TestHashAggregationOperator method testHashBuilderResize.
@Test(dataProvider = "hashEnabledAndMemoryLimitForMergeValues")
public void testHashBuilderResize(boolean hashEnabled, boolean spillEnabled, boolean revokeMemoryWhenAddingPages, long memoryLimitForMerge, long memoryLimitForMergeWithMemory) {
BlockBuilder builder = VARCHAR.createBlockBuilder(null, 1, MAX_BLOCK_SIZE_IN_BYTES);
// this must be larger than MAX_BLOCK_SIZE_IN_BYTES, 64K
VARCHAR.writeSlice(builder, Slices.allocate(200_000));
builder.build();
List<Integer> hashChannels = Ints.asList(0);
RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, hashChannels, VARCHAR);
List<Page> input = rowPagesBuilder.addSequencePage(10, 100).addBlocksPage(builder.build()).addSequencePage(10, 100).build();
DriverContext driverContext = createDriverContext(memoryLimitForMerge);
HashAggregationOperatorFactory operatorFactory = new HashAggregationOperatorFactory(0, new PlanNodeId("test"), ImmutableList.of(VARCHAR), hashChannels, ImmutableList.of(), SINGLE, false, ImmutableList.of(COUNT.createAggregatorFactory(SINGLE, ImmutableList.of(0), OptionalInt.empty())), rowPagesBuilder.getHashChannel(), Optional.empty(), 100_000, Optional.of(DataSize.of(16, MEGABYTE)), spillEnabled, succinctBytes(memoryLimitForMerge), succinctBytes(memoryLimitForMergeWithMemory), spillerFactory, joinCompiler, blockTypeOperators, Optional.empty());
toPages(operatorFactory, driverContext, input, revokeMemoryWhenAddingPages);
}
Aggregations