use of io.trino.spi.connector.ColumnMetadata in project trino by trinodb.
the class AbstractTestHiveFileSystemAbfs method ensureTableExists.
private void ensureTableExists(SchemaTableName table, String tableDirectoryName, Map<String, Object> tableProperties) {
try (Transaction transaction = newTransaction()) {
ConnectorTableMetadata tableMetadata = new ConnectorTableMetadata(table, ImmutableList.of(new ColumnMetadata("t_bigint", BIGINT)), ImmutableMap.<String, Object>builder().putAll(tableProperties).put(STORAGE_FORMAT_PROPERTY, HiveStorageFormat.TEXTFILE).put(EXTERNAL_LOCATION_PROPERTY, getBasePath().toString() + "/" + tableDirectoryName).put(BUCKET_COUNT_PROPERTY, 0).put(BUCKETED_BY_PROPERTY, ImmutableList.of()).put(SORTED_BY_PROPERTY, ImmutableList.of()).buildOrThrow());
if (!transaction.getMetadata().listTables(newSession(), Optional.of(table.getSchemaName())).contains(table)) {
transaction.getMetadata().createTable(newSession(), tableMetadata, false);
}
transaction.commit();
}
}
use of io.trino.spi.connector.ColumnMetadata in project trino by trinodb.
the class HiveMetadata method getColumnHandles.
private static List<HiveColumnHandle> getColumnHandles(ConnectorTableMetadata tableMetadata, Set<String> partitionColumnNames) {
validatePartitionColumns(tableMetadata);
validateBucketColumns(tableMetadata);
validateColumns(tableMetadata);
ImmutableList.Builder<HiveColumnHandle> columnHandles = ImmutableList.builder();
int ordinal = 0;
for (ColumnMetadata column : tableMetadata.getColumns()) {
HiveColumnHandle.ColumnType columnType;
if (partitionColumnNames.contains(column.getName())) {
columnType = PARTITION_KEY;
} else if (column.isHidden()) {
columnType = SYNTHESIZED;
} else {
columnType = REGULAR;
}
columnHandles.add(createBaseColumn(column.getName(), ordinal, toHiveType(column.getType()), column.getType(), columnType, Optional.ofNullable(column.getComment())));
ordinal++;
}
return columnHandles.build();
}
use of io.trino.spi.connector.ColumnMetadata in project trino by trinodb.
the class HiveMetadata method doGetTableMetadata.
private ConnectorTableMetadata doGetTableMetadata(ConnectorSession session, SchemaTableName tableName) {
Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()).orElseThrow(() -> new TableNotFoundException(tableName));
if (isIcebergTable(table) || isDeltaLakeTable(table)) {
throw new TrinoException(HIVE_UNSUPPORTED_FORMAT, format("Not a Hive table '%s'", tableName));
}
if (!translateHiveViews && isHiveOrPrestoView(table)) {
throw new TableNotFoundException(tableName);
}
Function<HiveColumnHandle, ColumnMetadata> metadataGetter = columnMetadataGetter(table);
ImmutableList.Builder<ColumnMetadata> columns = ImmutableList.builder();
for (HiveColumnHandle columnHandle : hiveColumnHandles(table, typeManager, getTimestampPrecision(session))) {
columns.add(metadataGetter.apply(columnHandle));
}
// External location property
ImmutableMap.Builder<String, Object> properties = ImmutableMap.builder();
if (table.getTableType().equals(EXTERNAL_TABLE.name())) {
properties.put(EXTERNAL_LOCATION_PROPERTY, table.getStorage().getLocation());
}
// Storage format property
try {
HiveStorageFormat format = extractHiveStorageFormat(table);
properties.put(STORAGE_FORMAT_PROPERTY, format);
} catch (TrinoException ignored) {
// todo fail if format is not known
}
// Partitioning property
List<String> partitionedBy = table.getPartitionColumns().stream().map(Column::getName).collect(toImmutableList());
if (!partitionedBy.isEmpty()) {
properties.put(PARTITIONED_BY_PROPERTY, partitionedBy);
}
// Bucket properties
table.getStorage().getBucketProperty().ifPresent(property -> {
properties.put(BUCKETING_VERSION, property.getBucketingVersion().getVersion());
properties.put(BUCKET_COUNT_PROPERTY, property.getBucketCount());
properties.put(BUCKETED_BY_PROPERTY, property.getBucketedBy());
properties.put(SORTED_BY_PROPERTY, property.getSortedBy());
});
// Transactional properties
String transactionalProperty = table.getParameters().get(HiveMetadata.TRANSACTIONAL);
if (parseBoolean(transactionalProperty)) {
properties.put(HiveTableProperties.TRANSACTIONAL, true);
}
// ORC format specific properties
String orcBloomFilterColumns = table.getParameters().get(ORC_BLOOM_FILTER_COLUMNS_KEY);
if (orcBloomFilterColumns != null) {
properties.put(ORC_BLOOM_FILTER_COLUMNS, Splitter.on(',').trimResults().omitEmptyStrings().splitToList(orcBloomFilterColumns));
}
String orcBloomFilterFfp = table.getParameters().get(ORC_BLOOM_FILTER_FPP_KEY);
if (orcBloomFilterFfp != null) {
properties.put(ORC_BLOOM_FILTER_FPP, Double.parseDouble(orcBloomFilterFfp));
}
// Avro specific property
String avroSchemaUrl = table.getParameters().get(AVRO_SCHEMA_URL_KEY);
if (avroSchemaUrl != null) {
properties.put(AVRO_SCHEMA_URL, avroSchemaUrl);
}
// Textfile and CSV specific properties
getSerdeProperty(table, SKIP_HEADER_COUNT_KEY).ifPresent(skipHeaderCount -> properties.put(SKIP_HEADER_LINE_COUNT, Integer.valueOf(skipHeaderCount)));
getSerdeProperty(table, SKIP_FOOTER_COUNT_KEY).ifPresent(skipFooterCount -> properties.put(SKIP_FOOTER_LINE_COUNT, Integer.valueOf(skipFooterCount)));
// Multi-format property
getSerdeProperty(table, NULL_FORMAT_KEY).ifPresent(nullFormat -> properties.put(NULL_FORMAT_PROPERTY, nullFormat));
// Textfile specific properties
getSerdeProperty(table, TEXT_FIELD_SEPARATOR_KEY).ifPresent(fieldSeparator -> properties.put(TEXTFILE_FIELD_SEPARATOR, fieldSeparator));
getSerdeProperty(table, TEXT_FIELD_SEPARATOR_ESCAPE_KEY).ifPresent(fieldEscape -> properties.put(TEXTFILE_FIELD_SEPARATOR_ESCAPE, fieldEscape));
// CSV specific properties
getCsvSerdeProperty(table, CSV_SEPARATOR_KEY).ifPresent(csvSeparator -> properties.put(CSV_SEPARATOR, csvSeparator));
getCsvSerdeProperty(table, CSV_QUOTE_KEY).ifPresent(csvQuote -> properties.put(CSV_QUOTE, csvQuote));
getCsvSerdeProperty(table, CSV_ESCAPE_KEY).ifPresent(csvEscape -> properties.put(CSV_ESCAPE, csvEscape));
Optional<String> comment = Optional.ofNullable(table.getParameters().get(TABLE_COMMENT));
String autoPurgeProperty = table.getParameters().get(AUTO_PURGE_KEY);
if (parseBoolean(autoPurgeProperty)) {
properties.put(AUTO_PURGE, true);
}
return new ConnectorTableMetadata(tableName, columns.build(), properties.buildOrThrow(), comment);
}
use of io.trino.spi.connector.ColumnMetadata in project trino by trinodb.
the class HiveMetadata method getNewTableLayout.
@Override
public Optional<ConnectorTableLayout> getNewTableLayout(ConnectorSession session, ConnectorTableMetadata tableMetadata) {
validateTimestampColumns(tableMetadata.getColumns(), getTimestampPrecision(session));
validatePartitionColumns(tableMetadata);
validateBucketColumns(tableMetadata);
validateColumns(tableMetadata);
Optional<HiveBucketProperty> bucketProperty = getBucketProperty(tableMetadata.getProperties());
List<String> partitionedBy = getPartitionedBy(tableMetadata.getProperties());
if (bucketProperty.isEmpty()) {
// return preferred layout which is partitioned by partition columns
if (partitionedBy.isEmpty()) {
return Optional.empty();
}
return Optional.of(new ConnectorTableLayout(partitionedBy));
}
if (!bucketProperty.get().getSortedBy().isEmpty() && !isSortedWritingEnabled(session)) {
throw new TrinoException(NOT_SUPPORTED, "Writing to bucketed sorted Hive tables is disabled");
}
List<String> bucketedBy = bucketProperty.get().getBucketedBy();
Map<String, HiveType> hiveTypeMap = tableMetadata.getColumns().stream().collect(toMap(ColumnMetadata::getName, column -> toHiveType(column.getType())));
return Optional.of(new ConnectorTableLayout(new HivePartitioningHandle(bucketProperty.get().getBucketingVersion(), bucketProperty.get().getBucketCount(), bucketedBy.stream().map(hiveTypeMap::get).collect(toImmutableList()), OptionalInt.of(bucketProperty.get().getBucketCount()), !partitionedBy.isEmpty() && isParallelPartitionedBucketedWrites(session)), ImmutableList.<String>builder().addAll(bucketedBy).addAll(partitionedBy).build()));
}
use of io.trino.spi.connector.ColumnMetadata in project trino by trinodb.
the class PropertiesSystemTableProvider method getSystemTable.
@Override
public Optional<SystemTable> getSystemTable(HiveMetadata metadata, ConnectorSession session, SchemaTableName tableName) {
if (!PROPERTIES.matches(tableName)) {
return Optional.empty();
}
SchemaTableName sourceTableName = PROPERTIES.getSourceTableName(tableName);
Table table = metadata.getMetastore().getTable(sourceTableName.getSchemaName(), sourceTableName.getTableName()).orElseThrow(() -> new TableNotFoundException(tableName));
if (isDeltaLakeTable(table) || isIcebergTable(table)) {
return Optional.empty();
}
Map<String, String> sortedTableParameters = ImmutableSortedMap.copyOf(table.getParameters());
List<ColumnMetadata> columns = sortedTableParameters.keySet().stream().map(key -> new ColumnMetadata(key, VarcharType.VARCHAR)).collect(toImmutableList());
List<Type> types = columns.stream().map(ColumnMetadata::getType).collect(toImmutableList());
Iterable<List<Object>> propertyValues = ImmutableList.of(ImmutableList.copyOf(sortedTableParameters.values()));
return Optional.of(createSystemTable(new ConnectorTableMetadata(sourceTableName, columns), constraint -> new InMemoryRecordSet(types, propertyValues).cursor()));
}
Aggregations