use of io.trino.spi.type.TypeOperators in project trino by trinodb.
the class TestHashJoinOperator method setUp.
@BeforeMethod
public void setUp() {
// Before/AfterMethod is chosen here because the executor needs to be shutdown
// after every single test case to terminate outstanding threads, if any.
// The line below is the same as newCachedThreadPool(daemonThreadsNamed(...)) except RejectionExecutionHandler.
// RejectionExecutionHandler is set to DiscardPolicy (instead of the default AbortPolicy) here.
// Otherwise, a large number of RejectedExecutionException will flood logging, resulting in Travis failure.
executor = new ThreadPoolExecutor(0, Integer.MAX_VALUE, 60L, SECONDS, new SynchronousQueue<>(), daemonThreadsNamed("test-executor-%s"), new ThreadPoolExecutor.DiscardPolicy());
scheduledExecutor = newScheduledThreadPool(2, daemonThreadsNamed(getClass().getSimpleName() + "-scheduledExecutor-%s"));
NodeScheduler nodeScheduler = new NodeScheduler(new UniformNodeSelectorFactory(new InMemoryNodeManager(), new NodeSchedulerConfig().setIncludeCoordinator(true), new NodeTaskMap(new FinalizerService())));
nodePartitioningManager = new NodePartitioningManager(nodeScheduler, new BlockTypeOperators(new TypeOperators()));
}
use of io.trino.spi.type.TypeOperators in project trino by trinodb.
the class TestValueStore method setUp.
@BeforeMethod(alwaysRun = true)
public void setUp() {
VarcharType type = VarcharType.createVarcharType(100);
BlockTypeOperators blockTypeOperators = new BlockTypeOperators(new TypeOperators());
BlockPositionEqual equalOperator = blockTypeOperators.getEqualOperator(type);
hashCodeOperator = blockTypeOperators.getHashCodeOperator(type);
BlockBuilder blockBuilder = type.createBlockBuilder(null, 100, 10);
valueStore = new ValueStore(type, equalOperator, 100, blockBuilder);
valueStoreSmall = new ValueStore(type, equalOperator, 1, blockBuilder);
block = BlockAssertions.createStringsBlock("a", "b", "c", "d");
}
use of io.trino.spi.type.TypeOperators in project trino by trinodb.
the class TestMergeOperator method setUp.
@BeforeMethod
public void setUp() {
executor = newSingleThreadScheduledExecutor(daemonThreadsNamed("test-merge-operator-%s"));
serdeFactory = new TestingPagesSerdeFactory();
taskBuffers = buildNonEvictableCache(CacheBuilder.newBuilder(), CacheLoader.from(TestingTaskBuffer::new));
httpClient = new TestingHttpClient(new TestingExchangeHttpClientHandler(taskBuffers), executor);
exchangeClientFactory = new DirectExchangeClientFactory(new NodeInfo("test"), new FeaturesConfig(), new DirectExchangeClientConfig(), httpClient, executor, new ExchangeManagerRegistry(new ExchangeHandleResolver()));
orderingCompiler = new OrderingCompiler(new TypeOperators());
}
use of io.trino.spi.type.TypeOperators in project trino by trinodb.
the class TestHashSemiJoinOperator method setUp.
@BeforeMethod
public void setUp() {
executor = newCachedThreadPool(daemonThreadsNamed(getClass().getSimpleName() + "-%s"));
scheduledExecutor = newScheduledThreadPool(2, daemonThreadsNamed(getClass().getSimpleName() + "-scheduledExecutor-%s"));
taskContext = createTaskContext(executor, scheduledExecutor, TEST_SESSION);
typeOperators = new TypeOperators();
blockTypeOperators = new BlockTypeOperators(typeOperators);
}
use of io.trino.spi.type.TypeOperators in project trino by trinodb.
the class TestGroupedTopNRankBuilder method testSingleGroupTopN.
@Test(dataProvider = "produceRanking")
public void testSingleGroupTopN(boolean produceRanking) {
TypeOperators typeOperators = new TypeOperators();
BlockTypeOperators blockTypeOperators = new BlockTypeOperators(typeOperators);
List<Type> types = ImmutableList.of(DOUBLE);
GroupedTopNBuilder groupedTopNBuilder = new GroupedTopNRankBuilder(types, new SimplePageWithPositionComparator(types, ImmutableList.of(0), ImmutableList.of(ASC_NULLS_LAST), typeOperators), new SimplePageWithPositionEqualsAndHash(types, ImmutableList.of(0), blockTypeOperators), 3, produceRanking, new NoChannelGroupByHash());
// Expected effect: [0.2 x 1 => rank=1, 0.3 x 2 => rank=2]
assertTrue(groupedTopNBuilder.processPage(rowPageBuilder(types).row(0.3).row(0.3).row(0.2).build()).process());
// Page should be dropped, because single value 0.4 is too large to be considered
assertTrue(groupedTopNBuilder.processPage(rowPageBuilder(types).row(0.4).build()).process());
// Next page should cause 0.3 values to be evicted (first page will be compacted)
// Expected effect: [0.1 x 2 => rank 1, 0.2 x 3 => rank 3]
assertTrue(groupedTopNBuilder.processPage(rowPageBuilder(types).row(0.1).row(0.2).row(0.3).row(0.2).row(0.1).build()).process());
List<Page> output = ImmutableList.copyOf(groupedTopNBuilder.buildResult());
assertEquals(output.size(), 1);
List<Type> outputTypes = ImmutableList.of(DOUBLE, BIGINT);
Page expected = rowPageBuilder(outputTypes).row(0.1, 1).row(0.1, 1).row(0.2, 3).row(0.2, 3).row(0.2, 3).build();
if (!produceRanking) {
outputTypes = outputTypes.subList(0, outputTypes.size() - 1);
expected = dropLastColumn(expected);
}
assertPageEquals(outputTypes, getOnlyElement(output), expected);
}
Aggregations