use of io.trino.operator.HashGenerator in project trino by trinodb.
the class TypeTestUtils method getHashBlock.
public static Block getHashBlock(List<? extends Type> hashTypes, Block... hashBlocks) {
checkArgument(hashTypes.size() == hashBlocks.length);
HashGenerator hashGenerator = InterpretedHashGenerator.createPositionalWithTypes(ImmutableList.copyOf(hashTypes), TYPE_OPERATOR_FACTORY);
int positionCount = hashBlocks[0].getPositionCount();
BlockBuilder builder = BIGINT.createFixedSizeBlockBuilder(positionCount);
Page page = new Page(hashBlocks);
for (int i = 0; i < positionCount; i++) {
BIGINT.writeLong(builder, hashGenerator.hashPosition(i, page));
}
return builder.build();
}
use of io.trino.operator.HashGenerator in project trino by trinodb.
the class LocalExchange method createPartitionFunction.
private static PartitionFunction createPartitionFunction(NodePartitioningManager nodePartitioningManager, Session session, BlockTypeOperators blockTypeOperators, PartitioningHandle partitioning, int partitionCount, List<Integer> partitionChannels, List<Type> partitionChannelTypes, Optional<Integer> partitionHashChannel) {
checkArgument(Integer.bitCount(partitionCount) == 1, "partitionCount must be a power of 2");
if (isSystemPartitioning(partitioning)) {
HashGenerator hashGenerator;
if (partitionHashChannel.isPresent()) {
hashGenerator = new PrecomputedHashGenerator(partitionHashChannel.get());
} else {
hashGenerator = new InterpretedHashGenerator(partitionChannelTypes, Ints.toArray(partitionChannels), blockTypeOperators);
}
return new LocalPartitionGenerator(hashGenerator, partitionCount);
}
// Distribute buckets assigned to this node among threads.
// The same bucket function (with the same bucket count) as for node
// partitioning must be used. This way rows within a single bucket
// will be being processed by single thread.
ConnectorBucketNodeMap connectorBucketNodeMap = nodePartitioningManager.getConnectorBucketNodeMap(session, partitioning);
int bucketCount = connectorBucketNodeMap.getBucketCount();
int[] bucketToPartition = new int[bucketCount];
for (int bucket = 0; bucket < bucketCount; bucket++) {
// mix the bucket bits so we don't use the same bucket number used to distribute between stages
int hashedBucket = (int) XxHash64.hash(Long.reverse(bucket));
bucketToPartition[bucket] = hashedBucket & (partitionCount - 1);
}
return new BucketPartitionFunction(nodePartitioningManager.getBucketFunction(session, partitioning, partitionChannelTypes, bucketCount), bucketToPartition);
}
Aggregations