use of io.trino.spi.type.TypeOperators in project trino by trinodb.
the class TestDistinctLimitOperator method setUp.
@BeforeMethod
public void setUp() {
executor = newCachedThreadPool(daemonThreadsNamed(getClass().getSimpleName() + "-%s"));
scheduledExecutor = newScheduledThreadPool(2, daemonThreadsNamed(getClass().getSimpleName() + "-scheduledExecutor-%s"));
driverContext = createTaskContext(executor, scheduledExecutor, TEST_SESSION).addPipelineContext(0, true, true, false).addDriverContext();
TypeOperators typeOperators = new TypeOperators();
blockTypeOperators = new BlockTypeOperators(typeOperators);
joinCompiler = new JoinCompiler(typeOperators);
}
use of io.trino.spi.type.TypeOperators in project trino by trinodb.
the class TestSimplePagesHashStrategy method testHashRowWithMapType.
@Test
public void testHashRowWithMapType() {
MapType mapType = new MapType(INTEGER, INTEGER, new TypeOperators());
Block block = mapType.createBlockFromKeyValue(Optional.empty(), new int[] { 0, 1 }, new IntArrayBlock(1, Optional.empty(), new int[] { 1234 }), new IntArrayBlock(1, Optional.empty(), new int[] { 5678 }));
SimplePagesHashStrategy strategy = createSimplePagesHashStrategy(mapType, ImmutableList.of(block));
Page page = new Page(block);
// This works because MapType is comparable.
assertEquals(strategy.hashRow(0, page), 451258269207618863L);
}
use of io.trino.spi.type.TypeOperators in project trino by trinodb.
the class TestStreamingAggregationOperator method setUp.
@BeforeMethod
public void setUp() {
executor = newCachedThreadPool(daemonThreadsNamed(getClass().getSimpleName() + "-%s"));
scheduledExecutor = newScheduledThreadPool(2, daemonThreadsNamed(getClass().getSimpleName() + "-scheduledExecutor-%s"));
driverContext = createTaskContext(executor, scheduledExecutor, TEST_SESSION).addPipelineContext(0, true, true, false).addDriverContext();
operatorFactory = StreamingAggregationOperator.createOperatorFactory(0, new PlanNodeId("test"), ImmutableList.of(BOOLEAN, VARCHAR, BIGINT), ImmutableList.of(VARCHAR), ImmutableList.of(1), ImmutableList.of(COUNT.createAggregatorFactory(SINGLE, ImmutableList.of(0), OptionalInt.empty()), LONG_SUM.createAggregatorFactory(SINGLE, ImmutableList.of(2), OptionalInt.empty())), new JoinCompiler(new TypeOperators()));
}
use of io.trino.spi.type.TypeOperators in project trino by trinodb.
the class TestGroupedTopNRankBuilder method testYield.
@Test
public void testYield() {
TypeOperators typeOperators = new TypeOperators();
BlockTypeOperators blockTypeOperators = new BlockTypeOperators(typeOperators);
List<Type> types = ImmutableList.of(BIGINT, DOUBLE);
Page input = rowPagesBuilder(types).row(1L, 0.3).row(1L, 0.2).row(1L, 0.9).row(1L, 0.1).build().get(0);
input.compact();
AtomicBoolean unblock = new AtomicBoolean();
GroupByHash groupByHash = createGroupByHash(ImmutableList.of(types.get(0)), ImmutableList.of(0), unblock::get, typeOperators, blockTypeOperators);
GroupedTopNBuilder groupedTopNBuilder = new GroupedTopNRankBuilder(types, new SimplePageWithPositionComparator(types, ImmutableList.of(1), ImmutableList.of(ASC_NULLS_LAST), typeOperators), new SimplePageWithPositionEqualsAndHash(types, ImmutableList.of(1), blockTypeOperators), 5, false, groupByHash);
Work<?> work = groupedTopNBuilder.processPage(input);
assertFalse(work.process());
assertFalse(work.process());
unblock.set(true);
assertTrue(work.process());
List<Page> output = ImmutableList.copyOf(groupedTopNBuilder.buildResult());
assertEquals(output.size(), 1);
Page expected = rowPagesBuilder(types).row(1L, 0.1).row(1L, 0.2).row(1L, 0.3).row(1L, 0.9).build().get(0);
assertPageEquals(types, output.get(0), expected);
}
use of io.trino.spi.type.TypeOperators in project trino by trinodb.
the class TestGroupedTopNRankBuilder method testMultiGroupTopN.
@Test(dataProvider = "produceRanking")
public void testMultiGroupTopN(boolean produceRanking) {
TypeOperators typeOperators = new TypeOperators();
BlockTypeOperators blockTypeOperators = new BlockTypeOperators(typeOperators);
List<Type> types = ImmutableList.of(BIGINT, DOUBLE);
GroupByHash groupByHash = createGroupByHash(ImmutableList.of(types.get(0)), ImmutableList.of(0), NOOP, typeOperators, blockTypeOperators);
GroupedTopNBuilder groupedTopNBuilder = new GroupedTopNRankBuilder(types, new SimplePageWithPositionComparator(types, ImmutableList.of(1), ImmutableList.of(ASC_NULLS_LAST), typeOperators), new SimplePageWithPositionEqualsAndHash(types, ImmutableList.of(1), blockTypeOperators), 3, produceRanking, groupByHash);
// Expected effect:
// Group 0 [0.2 x 1 => rank=1, 0.3 x 3 => rank=2]
// Group 1 [0.2 x 1 => rank=1]
assertTrue(groupedTopNBuilder.processPage(rowPageBuilder(types).row(0L, 0.3).row(0L, 0.3).row(0L, 0.3).row(0L, 0.2).row(1L, 0.2).build()).process());
// Page should be dropped, because all values too large to be considered
assertTrue(groupedTopNBuilder.processPage(rowPageBuilder(types).row(0L, 0.4).row(1L, 0.4).build()).process());
// Next page should cause evict 0.3 from group 0, which should cause the first page to be compacted
// Expected effect:
// Group 0 [0.1 x 1 => rank=1, 0.2 x 2 => rank=2]
// Group 1 [0.2 x 2 => rank=1, 0.3 x 2 => rank=3]
assertTrue(groupedTopNBuilder.processPage(rowPageBuilder(types).row(0L, 0.1).row(1L, 0.2).row(0L, 0.3).row(0L, 0.2).row(1L, 0.5).row(1L, 0.4).row(1L, 0.3).row(1L, 0.3).build()).process());
List<Page> output = ImmutableList.copyOf(groupedTopNBuilder.buildResult());
assertEquals(output.size(), 1);
List<Type> outputTypes = ImmutableList.of(BIGINT, DOUBLE, BIGINT);
Page expected = rowPageBuilder(outputTypes).row(0, 0.1, 1).row(0, 0.2, 2).row(0, 0.2, 2).row(1, 0.2, 1).row(1, 0.2, 1).row(1, 0.3, 3).row(1, 0.3, 3).build();
if (!produceRanking) {
outputTypes = outputTypes.subList(0, outputTypes.size() - 1);
expected = dropLastColumn(expected);
}
assertPageEquals(outputTypes, getOnlyElement(output), expected);
}
Aggregations