use of io.trino.spi.connector.ConnectorTableLayout in project trino by trinodb.
the class HiveMetadata method getNewTableLayout.
@Override
public Optional<ConnectorTableLayout> getNewTableLayout(ConnectorSession session, ConnectorTableMetadata tableMetadata) {
validateTimestampColumns(tableMetadata.getColumns(), getTimestampPrecision(session));
validatePartitionColumns(tableMetadata);
validateBucketColumns(tableMetadata);
validateColumns(tableMetadata);
Optional<HiveBucketProperty> bucketProperty = getBucketProperty(tableMetadata.getProperties());
List<String> partitionedBy = getPartitionedBy(tableMetadata.getProperties());
if (bucketProperty.isEmpty()) {
// return preferred layout which is partitioned by partition columns
if (partitionedBy.isEmpty()) {
return Optional.empty();
}
return Optional.of(new ConnectorTableLayout(partitionedBy));
}
if (!bucketProperty.get().getSortedBy().isEmpty() && !isSortedWritingEnabled(session)) {
throw new TrinoException(NOT_SUPPORTED, "Writing to bucketed sorted Hive tables is disabled");
}
List<String> bucketedBy = bucketProperty.get().getBucketedBy();
Map<String, HiveType> hiveTypeMap = tableMetadata.getColumns().stream().collect(toMap(ColumnMetadata::getName, column -> toHiveType(column.getType())));
return Optional.of(new ConnectorTableLayout(new HivePartitioningHandle(bucketProperty.get().getBucketingVersion(), bucketProperty.get().getBucketCount(), bucketedBy.stream().map(hiveTypeMap::get).collect(toImmutableList()), OptionalInt.of(bucketProperty.get().getBucketCount()), !partitionedBy.isEmpty() && isParallelPartitionedBucketedWrites(session)), ImmutableList.<String>builder().addAll(bucketedBy).addAll(partitionedBy).build()));
}
use of io.trino.spi.connector.ConnectorTableLayout in project trino by trinodb.
the class HiveMetadata method getInsertLayout.
@Override
public Optional<ConnectorTableLayout> getInsertLayout(ConnectorSession session, ConnectorTableHandle tableHandle) {
HiveTableHandle hiveTableHandle = (HiveTableHandle) tableHandle;
SchemaTableName tableName = hiveTableHandle.getSchemaTableName();
Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()).orElseThrow(() -> new TableNotFoundException(tableName));
if (table.getStorage().getBucketProperty().isPresent()) {
if (!isSupportedBucketing(table)) {
throw new TrinoException(NOT_SUPPORTED, "Cannot write to a table bucketed on an unsupported type");
}
} else // Note: we cannot use hiveTableHandle.isInAcidTransaction() here as transaction is not yet set in HiveTableHandle when getInsertLayout is called
if (isFullAcidTable(table.getParameters())) {
table = Table.builder(table).withStorage(storage -> storage.setBucketProperty(Optional.of(new HiveBucketProperty(ImmutableList.of(), HiveBucketing.BucketingVersion.BUCKETING_V2, 1, ImmutableList.of())))).build();
}
Optional<HiveBucketHandle> hiveBucketHandle = getHiveBucketHandle(session, table, typeManager);
List<Column> partitionColumns = table.getPartitionColumns();
if (hiveBucketHandle.isEmpty()) {
// return preferred layout which is partitioned by partition columns
if (partitionColumns.isEmpty()) {
return Optional.empty();
}
return Optional.of(new ConnectorTableLayout(partitionColumns.stream().map(Column::getName).collect(toImmutableList())));
}
HiveBucketProperty bucketProperty = table.getStorage().getBucketProperty().orElseThrow(() -> new NoSuchElementException("Bucket property should be set"));
if (!bucketProperty.getSortedBy().isEmpty() && !isSortedWritingEnabled(session)) {
throw new TrinoException(NOT_SUPPORTED, "Writing to bucketed sorted Hive tables is disabled");
}
ImmutableList.Builder<String> partitioningColumns = ImmutableList.builder();
hiveBucketHandle.get().getColumns().stream().map(HiveColumnHandle::getName).forEach(partitioningColumns::add);
partitionColumns.stream().map(Column::getName).forEach(partitioningColumns::add);
HivePartitioningHandle partitioningHandle = new HivePartitioningHandle(hiveBucketHandle.get().getBucketingVersion(), hiveBucketHandle.get().getTableBucketCount(), hiveBucketHandle.get().getColumns().stream().map(HiveColumnHandle::getHiveType).collect(toImmutableList()), OptionalInt.of(hiveBucketHandle.get().getTableBucketCount()), !partitionColumns.isEmpty() && isParallelPartitionedBucketedWrites(session));
return Optional.of(new ConnectorTableLayout(partitioningHandle, partitioningColumns.build()));
}
use of io.trino.spi.connector.ConnectorTableLayout in project trino by trinodb.
the class AbstractTestHive method testPreferredInsertLayout.
@Test
public void testPreferredInsertLayout() throws Exception {
SchemaTableName tableName = temporaryTable("empty_partitioned_table");
try {
Column partitioningColumn = new Column("column2", HIVE_STRING, Optional.empty());
List<Column> columns = ImmutableList.of(new Column("column1", HIVE_STRING, Optional.empty()), partitioningColumn);
createEmptyTable(tableName, ORC, columns, ImmutableList.of(partitioningColumn));
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
Optional<ConnectorTableLayout> insertLayout = metadata.getInsertLayout(session, tableHandle);
assertTrue(insertLayout.isPresent());
assertFalse(insertLayout.get().getPartitioning().isPresent());
assertEquals(insertLayout.get().getPartitionColumns(), ImmutableList.of(partitioningColumn.getName()));
}
} finally {
dropTable(tableName);
}
}
use of io.trino.spi.connector.ConnectorTableLayout in project trino by trinodb.
the class BlackHoleMetadata method getNewTableLayout.
@Override
public Optional<ConnectorTableLayout> getNewTableLayout(ConnectorSession connectorSession, ConnectorTableMetadata tableMetadata) {
@SuppressWarnings("unchecked") List<String> distributeColumns = (List<String>) tableMetadata.getProperties().get(DISTRIBUTED_ON);
if (distributeColumns.isEmpty()) {
return Optional.empty();
}
Set<String> undefinedColumns = Sets.difference(ImmutableSet.copyOf(distributeColumns), tableMetadata.getColumns().stream().map(ColumnMetadata::getName).collect(toSet()));
if (!undefinedColumns.isEmpty()) {
throw new TrinoException(INVALID_TABLE_PROPERTY, "Distribute columns not defined on table: " + undefinedColumns);
}
return Optional.of(new ConnectorTableLayout(BlackHolePartitioningHandle.INSTANCE, distributeColumns));
}
use of io.trino.spi.connector.ConnectorTableLayout in project trino by trinodb.
the class DeltaLakeMetadata method getNewTableLayout.
/**
* Provides partitioning scheme of table for query planner to decide how to
* write to multiple partitions.
*/
@Override
public Optional<ConnectorTableLayout> getNewTableLayout(ConnectorSession session, ConnectorTableMetadata tableMetadata) {
validateTableColumns(tableMetadata);
List<String> partitionColumnNames = getPartitionedBy(tableMetadata.getProperties());
if (partitionColumnNames.isEmpty()) {
return Optional.empty();
}
return Optional.of(new ConnectorTableLayout(partitionColumnNames));
}
Aggregations