use of io.trino.spi.connector.ConnectorPartitioningHandle in project trino by trinodb.
the class RaptorMetadata method getNewTableLayout.
@Override
public Optional<ConnectorTableLayout> getNewTableLayout(ConnectorSession session, ConnectorTableMetadata metadata) {
ImmutableMap.Builder<String, RaptorColumnHandle> map = ImmutableMap.builder();
long columnId = 1;
for (ColumnMetadata column : metadata.getColumns()) {
map.put(column.getName(), new RaptorColumnHandle(column.getName(), columnId, column.getType()));
columnId++;
}
Optional<DistributionInfo> distribution = getOrCreateDistribution(map.buildOrThrow(), metadata.getProperties());
if (distribution.isEmpty()) {
return Optional.empty();
}
List<String> partitionColumns = distribution.get().getBucketColumns().stream().map(RaptorColumnHandle::getColumnName).collect(toList());
ConnectorPartitioningHandle partitioning = getPartitioningHandle(distribution.get().getDistributionId());
return Optional.of(new ConnectorTableLayout(partitioning, partitionColumns));
}
use of io.trino.spi.connector.ConnectorPartitioningHandle in project trino by trinodb.
the class AbstractTestHive method testCreateBucketedTableLayout.
@Test
public void testCreateBucketedTableLayout() {
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
Optional<ConnectorTableLayout> newTableLayout = metadata.getNewTableLayout(session, new ConnectorTableMetadata(new SchemaTableName("schema", "table"), ImmutableList.of(new ColumnMetadata("column1", BIGINT), new ColumnMetadata("column2", BIGINT)), ImmutableMap.of(PARTITIONED_BY_PROPERTY, ImmutableList.of(), BUCKETED_BY_PROPERTY, ImmutableList.of("column1"), BUCKET_COUNT_PROPERTY, 10, SORTED_BY_PROPERTY, ImmutableList.of())));
assertTrue(newTableLayout.isPresent());
ConnectorPartitioningHandle partitioningHandle = new HivePartitioningHandle(BUCKETING_V1, 10, ImmutableList.of(HIVE_LONG), OptionalInt.empty(), false);
assertEquals(newTableLayout.get().getPartitioning(), Optional.of(partitioningHandle));
assertEquals(newTableLayout.get().getPartitionColumns(), ImmutableList.of("column1"));
ConnectorBucketNodeMap connectorBucketNodeMap = nodePartitioningProvider.getBucketNodeMap(transaction.getTransactionHandle(), session, partitioningHandle);
assertEquals(connectorBucketNodeMap.getBucketCount(), 10);
assertFalse(connectorBucketNodeMap.hasFixedMapping());
}
}
use of io.trino.spi.connector.ConnectorPartitioningHandle in project trino by trinodb.
the class AbstractTestHive method insertBucketedTableLayout.
protected void insertBucketedTableLayout(boolean transactional) throws Exception {
SchemaTableName tableName = temporaryTable("empty_bucketed_table");
try {
List<Column> columns = ImmutableList.of(new Column("column1", HIVE_STRING, Optional.empty()), new Column("column2", HIVE_LONG, Optional.empty()));
HiveBucketProperty bucketProperty = new HiveBucketProperty(ImmutableList.of("column1"), BUCKETING_V1, 4, ImmutableList.of());
createEmptyTable(tableName, ORC, columns, ImmutableList.of(), Optional.of(bucketProperty), transactional);
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
Optional<ConnectorTableLayout> insertLayout = metadata.getInsertLayout(session, tableHandle);
assertTrue(insertLayout.isPresent());
ConnectorPartitioningHandle partitioningHandle = new HivePartitioningHandle(bucketProperty.getBucketingVersion(), bucketProperty.getBucketCount(), ImmutableList.of(HIVE_STRING), OptionalInt.empty(), false);
assertEquals(insertLayout.get().getPartitioning(), Optional.of(partitioningHandle));
assertEquals(insertLayout.get().getPartitionColumns(), ImmutableList.of("column1"));
ConnectorBucketNodeMap connectorBucketNodeMap = nodePartitioningProvider.getBucketNodeMap(transaction.getTransactionHandle(), session, partitioningHandle);
assertEquals(connectorBucketNodeMap.getBucketCount(), 4);
assertFalse(connectorBucketNodeMap.hasFixedMapping());
}
} finally {
dropTable(tableName);
}
}
use of io.trino.spi.connector.ConnectorPartitioningHandle in project trino by trinodb.
the class MetadataManager method getCommonPartitioning.
@Override
public Optional<PartitioningHandle> getCommonPartitioning(Session session, PartitioningHandle left, PartitioningHandle right) {
Optional<CatalogName> leftConnectorId = left.getConnectorId();
Optional<CatalogName> rightConnectorId = right.getConnectorId();
if (leftConnectorId.isEmpty() || rightConnectorId.isEmpty() || !leftConnectorId.equals(rightConnectorId)) {
return Optional.empty();
}
if (!left.getTransactionHandle().equals(right.getTransactionHandle())) {
return Optional.empty();
}
CatalogName catalogName = leftConnectorId.get();
CatalogMetadata catalogMetadata = getCatalogMetadata(session, catalogName);
ConnectorMetadata metadata = catalogMetadata.getMetadataFor(session, catalogName);
Optional<ConnectorPartitioningHandle> commonHandle = metadata.getCommonPartitioningHandle(session.toConnectorSession(catalogName), left.getConnectorHandle(), right.getConnectorHandle());
return commonHandle.map(handle -> new PartitioningHandle(Optional.of(catalogName), left.getTransactionHandle(), handle));
}
use of io.trino.spi.connector.ConnectorPartitioningHandle in project trino by trinodb.
the class AbstractTestHive method insertPartitionedBucketedTableLayout.
protected void insertPartitionedBucketedTableLayout(boolean transactional) throws Exception {
SchemaTableName tableName = temporaryTable("empty_partitioned_table");
try {
Column partitioningColumn = new Column("column2", HIVE_LONG, Optional.empty());
List<Column> columns = ImmutableList.of(new Column("column1", HIVE_STRING, Optional.empty()), partitioningColumn);
HiveBucketProperty bucketProperty = new HiveBucketProperty(ImmutableList.of("column1"), BUCKETING_V1, 4, ImmutableList.of());
createEmptyTable(tableName, ORC, columns, ImmutableList.of(partitioningColumn), Optional.of(bucketProperty), transactional);
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
Optional<ConnectorTableLayout> insertLayout = metadata.getInsertLayout(session, tableHandle);
assertTrue(insertLayout.isPresent());
ConnectorPartitioningHandle partitioningHandle = new HivePartitioningHandle(bucketProperty.getBucketingVersion(), bucketProperty.getBucketCount(), ImmutableList.of(HIVE_STRING), OptionalInt.empty(), true);
assertEquals(insertLayout.get().getPartitioning(), Optional.of(partitioningHandle));
assertEquals(insertLayout.get().getPartitionColumns(), ImmutableList.of("column1", "column2"));
ConnectorBucketNodeMap connectorBucketNodeMap = nodePartitioningProvider.getBucketNodeMap(transaction.getTransactionHandle(), session, partitioningHandle);
assertEquals(connectorBucketNodeMap.getBucketCount(), 32);
assertTrue(connectorBucketNodeMap.hasFixedMapping());
assertEquals(connectorBucketNodeMap.getFixedMapping().size(), 32);
}
} finally {
dropTable(tableName);
}
}
Aggregations