use of io.trino.spi.connector.ConnectorTableLayout in project trino by trinodb.
the class DeltaLakeMetadata method getInsertLayout.
@Override
public Optional<ConnectorTableLayout> getInsertLayout(ConnectorSession session, ConnectorTableHandle tableHandle) {
DeltaLakeTableHandle deltaLakeTableHandle = (DeltaLakeTableHandle) tableHandle;
List<String> partitionColumnNames = deltaLakeTableHandle.getMetadataEntry().getCanonicalPartitionColumns();
if (partitionColumnNames.isEmpty()) {
return Optional.empty();
}
return Optional.of(new ConnectorTableLayout(partitionColumnNames));
}
use of io.trino.spi.connector.ConnectorTableLayout in project trino by trinodb.
the class RaptorMetadata method getNewTableLayout.
@Override
public Optional<ConnectorTableLayout> getNewTableLayout(ConnectorSession session, ConnectorTableMetadata metadata) {
ImmutableMap.Builder<String, RaptorColumnHandle> map = ImmutableMap.builder();
long columnId = 1;
for (ColumnMetadata column : metadata.getColumns()) {
map.put(column.getName(), new RaptorColumnHandle(column.getName(), columnId, column.getType()));
columnId++;
}
Optional<DistributionInfo> distribution = getOrCreateDistribution(map.buildOrThrow(), metadata.getProperties());
if (distribution.isEmpty()) {
return Optional.empty();
}
List<String> partitionColumns = distribution.get().getBucketColumns().stream().map(RaptorColumnHandle::getColumnName).collect(toList());
ConnectorPartitioningHandle partitioning = getPartitioningHandle(distribution.get().getDistributionId());
return Optional.of(new ConnectorTableLayout(partitioning, partitionColumns));
}
use of io.trino.spi.connector.ConnectorTableLayout in project trino by trinodb.
the class RaptorMetadata method beginCreateTable.
@Override
public ConnectorOutputTableHandle beginCreateTable(ConnectorSession session, ConnectorTableMetadata tableMetadata, Optional<ConnectorTableLayout> layout) {
if (viewExists(session, tableMetadata.getTable())) {
throw new TrinoException(ALREADY_EXISTS, "View already exists: " + tableMetadata.getTable());
}
Optional<RaptorPartitioningHandle> partitioning = layout.map(ConnectorTableLayout::getPartitioning).map(Optional::get).map(RaptorPartitioningHandle.class::cast);
ImmutableList.Builder<RaptorColumnHandle> columnHandles = ImmutableList.builder();
ImmutableList.Builder<Type> columnTypes = ImmutableList.builder();
long columnId = 1;
for (ColumnMetadata column : tableMetadata.getColumns()) {
columnHandles.add(new RaptorColumnHandle(column.getName(), columnId, column.getType()));
columnTypes.add(column.getType());
columnId++;
}
Map<String, RaptorColumnHandle> columnHandleMap = Maps.uniqueIndex(columnHandles.build(), RaptorColumnHandle::getColumnName);
List<RaptorColumnHandle> sortColumnHandles = getSortColumnHandles(getSortColumns(tableMetadata.getProperties()), columnHandleMap);
Optional<RaptorColumnHandle> temporalColumnHandle = getTemporalColumnHandle(getTemporalColumn(tableMetadata.getProperties()), columnHandleMap);
if (temporalColumnHandle.isPresent()) {
RaptorColumnHandle column = temporalColumnHandle.get();
if (!column.getColumnType().equals(TIMESTAMP_MILLIS) && !column.getColumnType().equals(DATE)) {
throw new TrinoException(NOT_SUPPORTED, "Temporal column must be of type timestamp or date: " + column.getColumnName());
}
}
boolean organized = isOrganized(tableMetadata.getProperties());
if (organized) {
if (temporalColumnHandle.isPresent()) {
throw new TrinoException(NOT_SUPPORTED, "Table with temporal columns cannot be organized");
}
if (sortColumnHandles.isEmpty()) {
throw new TrinoException(NOT_SUPPORTED, "Table organization requires an ordering");
}
}
long transactionId = shardManager.beginTransaction();
setTransactionId(transactionId);
Optional<DistributionInfo> distribution = partitioning.map(handle -> getDistributionInfo(handle.getDistributionId(), columnHandleMap, tableMetadata.getProperties()));
return new RaptorOutputTableHandle(transactionId, tableMetadata.getTable().getSchemaName(), tableMetadata.getTable().getTableName(), columnHandles.build(), columnTypes.build(), sortColumnHandles, nCopies(sortColumnHandles.size(), ASC_NULLS_FIRST), temporalColumnHandle, distribution.map(info -> OptionalLong.of(info.getDistributionId())).orElse(OptionalLong.empty()), distribution.map(info -> OptionalInt.of(info.getBucketCount())).orElse(OptionalInt.empty()), organized, distribution.map(DistributionInfo::getBucketColumns).orElse(ImmutableList.of()));
}
use of io.trino.spi.connector.ConnectorTableLayout in project trino by trinodb.
the class IcebergMetadata method getWriteLayout.
private Optional<ConnectorTableLayout> getWriteLayout(Schema tableSchema, PartitionSpec partitionSpec, boolean forceRepartitioning) {
if (partitionSpec.isUnpartitioned()) {
return Optional.empty();
}
Map<Integer, IcebergColumnHandle> columnById = getColumns(tableSchema, typeManager).stream().collect(toImmutableMap(IcebergColumnHandle::getId, identity()));
List<IcebergColumnHandle> partitioningColumns = partitionSpec.fields().stream().sorted(Comparator.comparing(PartitionField::sourceId)).map(field -> requireNonNull(columnById.get(field.sourceId()), () -> "Cannot find source column for partitioning field " + field)).distinct().collect(toImmutableList());
List<String> partitioningColumnNames = partitioningColumns.stream().map(IcebergColumnHandle::getName).collect(toImmutableList());
if (!forceRepartitioning && partitionSpec.fields().stream().allMatch(field -> field.transform().isIdentity())) {
// Do not set partitioningHandle, to let engine determine whether to repartition data or not, on stat-based basis.
return Optional.of(new ConnectorTableLayout(partitioningColumnNames));
}
IcebergPartitioningHandle partitioningHandle = new IcebergPartitioningHandle(toPartitionFields(partitionSpec), partitioningColumns);
return Optional.of(new ConnectorTableLayout(partitioningHandle, partitioningColumnNames));
}
use of io.trino.spi.connector.ConnectorTableLayout in project trino by trinodb.
the class AbstractTestHive method testPreferredCreateTableLayout.
@Test
public void testPreferredCreateTableLayout() {
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
Optional<ConnectorTableLayout> newTableLayout = metadata.getNewTableLayout(session, new ConnectorTableMetadata(new SchemaTableName("schema", "table"), ImmutableList.of(new ColumnMetadata("column1", BIGINT), new ColumnMetadata("column2", BIGINT)), ImmutableMap.of(PARTITIONED_BY_PROPERTY, ImmutableList.of("column2"), BUCKETED_BY_PROPERTY, ImmutableList.of(), BUCKET_COUNT_PROPERTY, 0, SORTED_BY_PROPERTY, ImmutableList.of())));
assertTrue(newTableLayout.isPresent());
assertFalse(newTableLayout.get().getPartitioning().isPresent());
assertEquals(newTableLayout.get().getPartitionColumns(), ImmutableList.of("column2"));
}
}
Aggregations