use of io.trino.spi.connector.ConnectorBucketNodeMap in project trino by trinodb.
the class NodePartitioningManager method getConnectorBucketNodeMap.
public ConnectorBucketNodeMap getConnectorBucketNodeMap(Session session, PartitioningHandle partitioningHandle) {
CatalogName catalogName = partitioningHandle.getConnectorId().orElseThrow(() -> new IllegalArgumentException("No connector ID for partitioning handle: " + partitioningHandle));
ConnectorNodePartitioningProvider partitioningProvider = getPartitioningProvider(catalogName);
ConnectorBucketNodeMap connectorBucketNodeMap = partitioningProvider.getBucketNodeMap(partitioningHandle.getTransactionHandle().orElseThrow(() -> new IllegalArgumentException("No transactionHandle for partitioning handle: " + partitioningHandle)), session.toConnectorSession(catalogName), partitioningHandle.getConnectorHandle());
checkArgument(connectorBucketNodeMap != null, "No partition map %s", partitioningHandle);
return connectorBucketNodeMap;
}
use of io.trino.spi.connector.ConnectorBucketNodeMap in project trino by trinodb.
the class NodePartitioningManager method getBucketNodeMap.
public BucketNodeMap getBucketNodeMap(Session session, PartitioningHandle partitioningHandle, boolean preferDynamic) {
ConnectorBucketNodeMap connectorBucketNodeMap = getConnectorBucketNodeMap(session, partitioningHandle);
if (connectorBucketNodeMap.hasFixedMapping()) {
return new FixedBucketNodeMap(getSplitToBucket(session, partitioningHandle), getFixedMapping(connectorBucketNodeMap));
}
if (preferDynamic) {
return new DynamicBucketNodeMap(getSplitToBucket(session, partitioningHandle), connectorBucketNodeMap.getBucketCount());
}
Optional<CatalogName> catalogName = partitioningHandle.getConnectorId();
checkArgument(catalogName.isPresent(), "No connector ID for partitioning handle: %s", partitioningHandle);
return new FixedBucketNodeMap(getSplitToBucket(session, partitioningHandle), createArbitraryBucketToNode(new ArrayList<>(nodeScheduler.createNodeSelector(session, catalogName).allNodes()), connectorBucketNodeMap.getBucketCount()));
}
use of io.trino.spi.connector.ConnectorBucketNodeMap in project trino by trinodb.
the class NodePartitioningManager method getNodePartitioningMap.
public NodePartitionMap getNodePartitioningMap(Session session, PartitioningHandle partitioningHandle) {
requireNonNull(session, "session is null");
requireNonNull(partitioningHandle, "partitioningHandle is null");
if (partitioningHandle.getConnectorHandle() instanceof SystemPartitioningHandle) {
return ((SystemPartitioningHandle) partitioningHandle.getConnectorHandle()).getNodePartitionMap(session, nodeScheduler);
}
ConnectorBucketNodeMap connectorBucketNodeMap = getConnectorBucketNodeMap(session, partitioningHandle);
// safety check for crazy partitioning
checkArgument(connectorBucketNodeMap.getBucketCount() < 1_000_000, "Too many buckets in partitioning: %s", connectorBucketNodeMap.getBucketCount());
List<InternalNode> bucketToNode;
if (connectorBucketNodeMap.hasFixedMapping()) {
bucketToNode = getFixedMapping(connectorBucketNodeMap);
} else {
CatalogName catalogName = partitioningHandle.getConnectorId().orElseThrow(() -> new IllegalArgumentException("No connector ID for partitioning handle: " + partitioningHandle));
bucketToNode = createArbitraryBucketToNode(nodeScheduler.createNodeSelector(session, Optional.of(catalogName)).allNodes(), connectorBucketNodeMap.getBucketCount());
}
int[] bucketToPartition = new int[connectorBucketNodeMap.getBucketCount()];
BiMap<InternalNode, Integer> nodeToPartition = HashBiMap.create();
int nextPartitionId = 0;
for (int bucket = 0; bucket < bucketToNode.size(); bucket++) {
InternalNode node = bucketToNode.get(bucket);
Integer partitionId = nodeToPartition.get(node);
if (partitionId == null) {
partitionId = nextPartitionId++;
nodeToPartition.put(node, partitionId);
}
bucketToPartition[bucket] = partitionId;
}
List<InternalNode> partitionToNode = IntStream.range(0, nodeToPartition.size()).mapToObj(partitionId -> nodeToPartition.inverse().get(partitionId)).collect(toImmutableList());
return new NodePartitionMap(partitionToNode, bucketToPartition, getSplitToBucket(session, partitioningHandle));
}
use of io.trino.spi.connector.ConnectorBucketNodeMap in project trino by trinodb.
the class DetermineTableScanNodePartitioning method apply.
@Override
public Result apply(TableScanNode node, Captures captures, Context context) {
TableProperties properties = metadata.getTableProperties(context.getSession(), node.getTable());
if (properties.getTablePartitioning().isEmpty()) {
return Result.ofPlanNode(node.withUseConnectorNodePartitioning(false));
}
TablePartitioning partitioning = properties.getTablePartitioning().get();
ConnectorBucketNodeMap bucketNodeMap = nodePartitioningManager.getConnectorBucketNodeMap(context.getSession(), partitioning.getPartitioningHandle());
if (bucketNodeMap.hasFixedMapping()) {
// use connector table scan node partitioning when bucket to node assignments are fixed
return Result.ofPlanNode(node.withUseConnectorNodePartitioning(true));
}
if (!isUseTableScanNodePartitioning(context.getSession())) {
return Result.ofPlanNode(node.withUseConnectorNodePartitioning(false));
}
int numberOfBuckets = bucketNodeMap.getBucketCount();
int numberOfTasks = max(taskCountEstimator.estimateSourceDistributedTaskCount(context.getSession()), 1);
return Result.ofPlanNode(node.withUseConnectorNodePartitioning((double) numberOfBuckets / numberOfTasks >= getTableScanNodePartitioningMinBucketToTaskRatio(context.getSession())));
}
use of io.trino.spi.connector.ConnectorBucketNodeMap in project trino by trinodb.
the class AbstractTestHive method testCreateBucketedTableLayout.
@Test
public void testCreateBucketedTableLayout() {
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
Optional<ConnectorTableLayout> newTableLayout = metadata.getNewTableLayout(session, new ConnectorTableMetadata(new SchemaTableName("schema", "table"), ImmutableList.of(new ColumnMetadata("column1", BIGINT), new ColumnMetadata("column2", BIGINT)), ImmutableMap.of(PARTITIONED_BY_PROPERTY, ImmutableList.of(), BUCKETED_BY_PROPERTY, ImmutableList.of("column1"), BUCKET_COUNT_PROPERTY, 10, SORTED_BY_PROPERTY, ImmutableList.of())));
assertTrue(newTableLayout.isPresent());
ConnectorPartitioningHandle partitioningHandle = new HivePartitioningHandle(BUCKETING_V1, 10, ImmutableList.of(HIVE_LONG), OptionalInt.empty(), false);
assertEquals(newTableLayout.get().getPartitioning(), Optional.of(partitioningHandle));
assertEquals(newTableLayout.get().getPartitionColumns(), ImmutableList.of("column1"));
ConnectorBucketNodeMap connectorBucketNodeMap = nodePartitioningProvider.getBucketNodeMap(transaction.getTransactionHandle(), session, partitioningHandle);
assertEquals(connectorBucketNodeMap.getBucketCount(), 10);
assertFalse(connectorBucketNodeMap.hasFixedMapping());
}
}
Aggregations