use of io.trino.operator.InterpretedHashGenerator in project trino by trinodb.
the class BenchmarkHashBuildAndJoinOperators method buildHash.
private static void buildHash(BuildContext buildContext, JoinBridgeManager<PartitionedLookupSourceFactory> lookupSourceFactoryManager, List<Integer> outputChannels, int partitionCount) {
HashBuilderOperatorFactory hashBuilderOperatorFactory = new HashBuilderOperatorFactory(HASH_BUILD_OPERATOR_ID, TEST_PLAN_NODE_ID, lookupSourceFactoryManager, outputChannels, buildContext.getHashChannels(), buildContext.getHashChannel(), Optional.empty(), Optional.empty(), ImmutableList.of(), 10_000, new PagesIndex.TestingFactory(false), false, SingleStreamSpillerFactory.unsupportedSingleStreamSpillerFactory(), incrementalLoadFactorHashArraySizeSupplier(buildContext.getSession()));
Operator[] operators = IntStream.range(0, partitionCount).mapToObj(i -> buildContext.createTaskContext().addPipelineContext(0, true, true, partitionCount > 1).addDriverContext()).map(hashBuilderOperatorFactory::createOperator).toArray(Operator[]::new);
if (partitionCount == 1) {
for (Page page : buildContext.getBuildPages()) {
operators[0].addInput(page);
}
} else {
PartitionFunction partitionGenerator = new LocalPartitionGenerator(new InterpretedHashGenerator(buildContext.getHashChannels().stream().map(channel -> buildContext.getTypes().get(channel)).collect(toImmutableList()), buildContext.getHashChannels(), TYPE_OPERATOR_FACTORY), partitionCount);
for (Page page : buildContext.getBuildPages()) {
Page[] partitionedPages = partitionPages(page, buildContext.getTypes(), partitionCount, partitionGenerator);
for (int i = 0; i < partitionCount; i++) {
operators[i].addInput(partitionedPages[i]);
}
}
}
LookupSourceFactory lookupSourceFactory = lookupSourceFactoryManager.getJoinBridge(Lifespan.taskWide());
ListenableFuture<LookupSourceProvider> lookupSourceProvider = lookupSourceFactory.createLookupSourceProvider();
for (Operator operator : operators) {
operator.finish();
}
if (!lookupSourceProvider.isDone()) {
throw new AssertionError("Expected lookup source provider to be ready");
}
getFutureValue(lookupSourceProvider).close();
}
use of io.trino.operator.InterpretedHashGenerator in project trino by trinodb.
the class TestLocalExchange method assertPartitionedRemovePage.
private static void assertPartitionedRemovePage(LocalExchangeSource source, int partition, int partitionCount) {
assertTrue(source.waitForReading().isDone());
Page page = source.removePage();
assertNotNull(page);
LocalPartitionGenerator partitionGenerator = new LocalPartitionGenerator(new InterpretedHashGenerator(TYPES, new int[] { 0 }, TYPE_OPERATOR_FACTORY), partitionCount);
for (int position = 0; position < page.getPositionCount(); position++) {
assertEquals(partitionGenerator.getPartition(page, position), partition);
}
}
use of io.trino.operator.InterpretedHashGenerator in project trino by trinodb.
the class LocalExchange method createPartitionFunction.
private static PartitionFunction createPartitionFunction(NodePartitioningManager nodePartitioningManager, Session session, BlockTypeOperators blockTypeOperators, PartitioningHandle partitioning, int partitionCount, List<Integer> partitionChannels, List<Type> partitionChannelTypes, Optional<Integer> partitionHashChannel) {
checkArgument(Integer.bitCount(partitionCount) == 1, "partitionCount must be a power of 2");
if (isSystemPartitioning(partitioning)) {
HashGenerator hashGenerator;
if (partitionHashChannel.isPresent()) {
hashGenerator = new PrecomputedHashGenerator(partitionHashChannel.get());
} else {
hashGenerator = new InterpretedHashGenerator(partitionChannelTypes, Ints.toArray(partitionChannels), blockTypeOperators);
}
return new LocalPartitionGenerator(hashGenerator, partitionCount);
}
// Distribute buckets assigned to this node among threads.
// The same bucket function (with the same bucket count) as for node
// partitioning must be used. This way rows within a single bucket
// will be being processed by single thread.
ConnectorBucketNodeMap connectorBucketNodeMap = nodePartitioningManager.getConnectorBucketNodeMap(session, partitioning);
int bucketCount = connectorBucketNodeMap.getBucketCount();
int[] bucketToPartition = new int[bucketCount];
for (int bucket = 0; bucket < bucketCount; bucket++) {
// mix the bucket bits so we don't use the same bucket number used to distribute between stages
int hashedBucket = (int) XxHash64.hash(Long.reverse(bucket));
bucketToPartition[bucket] = hashedBucket & (partitionCount - 1);
}
return new BucketPartitionFunction(nodePartitioningManager.getBucketFunction(session, partitioning, partitionChannelTypes, bucketCount), bucketToPartition);
}
Aggregations