use of io.airlift.units.DataSize in project presto by prestodb.
the class TestTopNOperator method testSingleFieldKey.
@Test
public void testSingleFieldKey() throws Exception {
List<Page> input = rowPagesBuilder(BIGINT, DOUBLE).row(1L, 0.1).row(2L, 0.2).pageBreak().row(-1L, -0.1).row(4L, 0.4).pageBreak().row(5L, 0.5).row(4L, 0.41).row(6L, 0.6).pageBreak().build();
TopNOperatorFactory operatorFactory = new TopNOperatorFactory(0, new PlanNodeId("test"), ImmutableList.of(BIGINT, DOUBLE), 2, ImmutableList.of(0), ImmutableList.of(DESC_NULLS_LAST), false, new DataSize(16, MEGABYTE));
MaterializedResult expected = resultBuilder(driverContext.getSession(), BIGINT, DOUBLE).row(6L, 0.6).row(5L, 0.5).build();
assertOperatorEquals(operatorFactory, driverContext, input, expected);
}
use of io.airlift.units.DataSize in project presto by prestodb.
the class TestTopNOperator method testMultiFieldKey.
@Test
public void testMultiFieldKey() throws Exception {
List<Page> input = rowPagesBuilder(VARCHAR, BIGINT).row("a", 1L).row("b", 2L).pageBreak().row("f", 3L).row("a", 4L).pageBreak().row("d", 5L).row("d", 7L).row("e", 6L).build();
TopNOperatorFactory operatorFactory = new TopNOperatorFactory(0, new PlanNodeId("test"), ImmutableList.of(VARCHAR, BIGINT), 3, ImmutableList.of(0, 1), ImmutableList.of(DESC_NULLS_LAST, DESC_NULLS_LAST), false, new DataSize(16, MEGABYTE));
MaterializedResult expected = MaterializedResult.resultBuilder(driverContext.getSession(), VARCHAR, BIGINT).row("f", 3L).row("e", 6L).row("d", 7L).build();
assertOperatorEquals(operatorFactory, driverContext, input, expected);
}
use of io.airlift.units.DataSize in project presto by prestodb.
the class TestHashAggregationOperator method testHashBuilderResize.
@Test(dataProvider = "hashEnabledAndMemoryLimitBeforeSpillValues")
public void testHashBuilderResize(boolean hashEnabled, long memoryLimitBeforeSpill, long memoryLimitForMergeWithMemory) {
BlockBuilder builder = VARCHAR.createBlockBuilder(new BlockBuilderStatus(), 1, DEFAULT_MAX_BLOCK_SIZE_IN_BYTES);
// this must be larger than DEFAULT_MAX_BLOCK_SIZE, 64K
VARCHAR.writeSlice(builder, Slices.allocate(200_000));
builder.build();
List<Integer> hashChannels = Ints.asList(0);
RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, hashChannels, VARCHAR);
List<Page> input = rowPagesBuilder.addSequencePage(10, 100).addBlocksPage(builder.build()).addSequencePage(10, 100).build();
DriverContext driverContext = createTaskContext(executor, TEST_SESSION, new DataSize(10, MEGABYTE)).addPipelineContext(0, true, true).addDriverContext();
HashAggregationOperatorFactory operatorFactory = new HashAggregationOperatorFactory(0, new PlanNodeId("test"), ImmutableList.of(VARCHAR), hashChannels, ImmutableList.of(), Step.SINGLE, ImmutableList.of(COUNT.bind(ImmutableList.of(0), Optional.empty())), rowPagesBuilder.getHashChannel(), Optional.empty(), 100_000, new DataSize(16, MEGABYTE), memoryLimitBeforeSpill > 0, succinctBytes(memoryLimitBeforeSpill), succinctBytes(memoryLimitForMergeWithMemory), spillerFactory, joinCompiler);
toPages(operatorFactory, driverContext, input);
}
use of io.airlift.units.DataSize in project presto by prestodb.
the class TestHashAggregationOperator method testHashAggregation.
@Test(dataProvider = "hashEnabledAndMemoryLimitBeforeSpillValues")
public void testHashAggregation(boolean hashEnabled, long memoryLimitBeforeSpill, long memoryLimitForMergeWithMemory) throws Exception {
MetadataManager metadata = MetadataManager.createTestMetadataManager();
InternalAggregationFunction countVarcharColumn = metadata.getFunctionRegistry().getAggregateFunctionImplementation(new Signature("count", AGGREGATE, parseTypeSignature(StandardTypes.BIGINT), parseTypeSignature(StandardTypes.VARCHAR)));
InternalAggregationFunction countBooleanColumn = metadata.getFunctionRegistry().getAggregateFunctionImplementation(new Signature("count", AGGREGATE, parseTypeSignature(StandardTypes.BIGINT), parseTypeSignature(StandardTypes.BOOLEAN)));
InternalAggregationFunction maxVarcharColumn = metadata.getFunctionRegistry().getAggregateFunctionImplementation(new Signature("max", AGGREGATE, parseTypeSignature(StandardTypes.VARCHAR), parseTypeSignature(StandardTypes.VARCHAR)));
List<Integer> hashChannels = Ints.asList(1);
RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, hashChannels, VARCHAR, VARCHAR, VARCHAR, BIGINT, BOOLEAN);
List<Page> input = rowPagesBuilder.addSequencePage(10, 100, 0, 100, 0, 500).addSequencePage(10, 100, 0, 200, 0, 500).addSequencePage(10, 100, 0, 300, 0, 500).build();
HashAggregationOperatorFactory operatorFactory = new HashAggregationOperatorFactory(0, new PlanNodeId("test"), ImmutableList.of(VARCHAR), hashChannels, ImmutableList.of(), Step.SINGLE, ImmutableList.of(COUNT.bind(ImmutableList.of(0), Optional.empty()), LONG_SUM.bind(ImmutableList.of(3), Optional.empty()), LONG_AVERAGE.bind(ImmutableList.of(3), Optional.empty()), maxVarcharColumn.bind(ImmutableList.of(2), Optional.empty()), countVarcharColumn.bind(ImmutableList.of(0), Optional.empty()), countBooleanColumn.bind(ImmutableList.of(4), Optional.empty())), rowPagesBuilder.getHashChannel(), Optional.empty(), 100_000, new DataSize(16, MEGABYTE), memoryLimitBeforeSpill > 0, succinctBytes(memoryLimitBeforeSpill), succinctBytes(memoryLimitForMergeWithMemory), spillerFactory, joinCompiler);
MaterializedResult expected = resultBuilder(driverContext.getSession(), VARCHAR, BIGINT, BIGINT, DOUBLE, VARCHAR, BIGINT, BIGINT).row("0", 3L, 0L, 0.0, "300", 3L, 3L).row("1", 3L, 3L, 1.0, "301", 3L, 3L).row("2", 3L, 6L, 2.0, "302", 3L, 3L).row("3", 3L, 9L, 3.0, "303", 3L, 3L).row("4", 3L, 12L, 4.0, "304", 3L, 3L).row("5", 3L, 15L, 5.0, "305", 3L, 3L).row("6", 3L, 18L, 6.0, "306", 3L, 3L).row("7", 3L, 21L, 7.0, "307", 3L, 3L).row("8", 3L, 24L, 8.0, "308", 3L, 3L).row("9", 3L, 27L, 9.0, "309", 3L, 3L).build();
assertOperatorEqualsIgnoreOrder(operatorFactory, driverContext, input, expected, hashEnabled, Optional.of(hashChannels.size()));
}
use of io.airlift.units.DataSize in project presto by prestodb.
the class TestLocalExchange method writeUnblockWhenAllReadersFinishAndPagesConsumed.
@Test
public void writeUnblockWhenAllReadersFinishAndPagesConsumed() {
LocalExchange exchange = new LocalExchange(FIXED_BROADCAST_DISTRIBUTION, 2, TYPES, ImmutableList.of(), Optional.empty(), new DataSize(1, BYTE));
assertEquals(exchange.getBufferCount(), 2);
assertExchangeTotalBufferedBytes(exchange, 0);
LocalExchangeSinkFactory sinkFactory = exchange.createSinkFactory();
LocalExchangeSink sinkA = sinkFactory.createSink();
assertSinkCanWrite(sinkA);
LocalExchangeSink sinkB = sinkFactory.createSink();
assertSinkCanWrite(sinkB);
sinkFactory.close();
sinkFactory.noMoreSinkFactories();
LocalExchangeSource sourceA = exchange.getSource(0);
assertSource(sourceA, 0);
LocalExchangeSource sourceB = exchange.getSource(1);
assertSource(sourceB, 0);
sinkA.addPage(createPage(0));
ListenableFuture<?> sinkAFuture = assertSinkWriteBlocked(sinkA);
ListenableFuture<?> sinkBFuture = assertSinkWriteBlocked(sinkB);
assertSource(sourceA, 1);
assertSource(sourceB, 1);
assertExchangeTotalBufferedBytes(exchange, 1);
sourceA.finish();
assertSource(sourceA, 1);
assertRemovePage(sourceA, createPage(0));
assertSourceFinished(sourceA);
assertExchangeTotalBufferedBytes(exchange, 1);
assertSource(sourceB, 1);
assertSinkWriteBlocked(sinkA);
assertSinkWriteBlocked(sinkB);
sourceB.finish();
assertSource(sourceB, 1);
assertRemovePage(sourceB, createPage(0));
assertSourceFinished(sourceB);
assertExchangeTotalBufferedBytes(exchange, 0);
assertTrue(sinkAFuture.isDone());
assertTrue(sinkBFuture.isDone());
assertSinkFinished(sinkA);
assertSinkFinished(sinkB);
}
Aggregations