use of com.facebook.presto.hive.LocationService.WriteInfo in project presto by prestodb.
the class HiveMetadata method beginCreateTable.
@Override
public HiveOutputTableHandle beginCreateTable(ConnectorSession session, ConnectorTableMetadata tableMetadata, Optional<ConnectorNewTableLayout> layout) {
verifyJvmTimeZone();
if (getExternalLocation(tableMetadata.getProperties()) != null) {
throw new PrestoException(NOT_SUPPORTED, "External tables cannot be created using CREATE TABLE AS");
}
if (getAvroSchemaUrl(tableMetadata.getProperties()) != null) {
throw new PrestoException(NOT_SUPPORTED, "CREATE TABLE AS not supported when Avro schema url is set");
}
HiveStorageFormat tableStorageFormat = getHiveStorageFormat(tableMetadata.getProperties());
List<String> partitionedBy = getPartitionedBy(tableMetadata.getProperties());
Optional<HiveBucketProperty> bucketProperty = getBucketProperty(tableMetadata.getProperties());
List<SortingColumn> preferredOrderingColumns = getPreferredOrderingColumns(tableMetadata.getProperties());
// get the root directory for the database
SchemaTableName schemaTableName = tableMetadata.getTable();
String schemaName = schemaTableName.getSchemaName();
String tableName = schemaTableName.getTableName();
Optional<TableEncryptionProperties> tableEncryptionProperties = getTableEncryptionPropertiesFromTableProperties(tableMetadata, tableStorageFormat, partitionedBy);
List<HiveColumnHandle> columnHandles = getColumnHandles(tableMetadata, ImmutableSet.copyOf(partitionedBy), typeTranslator);
HiveStorageFormat partitionStorageFormat = isRespectTableFormat(session) ? tableStorageFormat : getHiveStorageFormat(session);
// unpartitioned tables ignore the partition storage format
HiveStorageFormat actualStorageFormat = partitionedBy.isEmpty() ? tableStorageFormat : partitionStorageFormat;
validateColumns(actualStorageFormat, columnHandles);
if (tableEncryptionProperties.isPresent() && tableStorageFormat != actualStorageFormat) {
throw new PrestoException(INVALID_TABLE_PROPERTY, format("For encrypted tables, partition format (%s) should match table format (%s). Using the session property %s or appropriately setting %s can help with ensuring this", actualStorageFormat.name(), tableStorageFormat.name(), RESPECT_TABLE_FORMAT, HIVE_STORAGE_FORMAT));
}
MetastoreContext metastoreContext = getMetastoreContext(session);
Map<String, HiveColumnHandle> columnHandlesByName = Maps.uniqueIndex(columnHandles, HiveColumnHandle::getName);
List<Column> partitionColumns = partitionedBy.stream().map(columnHandlesByName::get).map(columnHandle -> columnHandleToColumn(metastoreContext, columnHandle)).collect(toList());
checkPartitionTypesSupported(partitionColumns);
LocationHandle locationHandle = locationService.forNewTable(metastore, session, schemaName, tableName, isTempPathRequired(session, bucketProperty, preferredOrderingColumns));
HdfsContext context = new HdfsContext(session, schemaName, tableName, locationHandle.getTargetPath().toString(), true);
Map<String, String> tableProperties = getEmptyTableProperties(tableMetadata, context, tableStorageFormat, tableEncryptionProperties);
HiveOutputTableHandle result = new HiveOutputTableHandle(schemaName, tableName, columnHandles, metastore.generatePageSinkMetadata(metastoreContext, schemaTableName), locationHandle, tableStorageFormat, partitionStorageFormat, actualStorageFormat, getHiveCompressionCodec(session, false, actualStorageFormat), partitionedBy, bucketProperty, preferredOrderingColumns, session.getUser(), tableProperties, encryptionInformationProvider.getWriteEncryptionInformation(session, tableEncryptionProperties, schemaName, tableName));
WriteInfo writeInfo = locationService.getQueryWriteInfo(locationHandle);
metastore.declareIntentionToWrite(context, metastoreContext, writeInfo.getWriteMode(), writeInfo.getWritePath(), writeInfo.getTempPath(), schemaTableName, false);
return result;
}
Aggregations