use of com.facebook.presto.hive.HdfsContext in project presto by prestodb.
the class IcebergHiveMetadata method beginCreateTable.
@Override
public ConnectorOutputTableHandle beginCreateTable(ConnectorSession session, ConnectorTableMetadata tableMetadata, Optional<ConnectorNewTableLayout> layout) {
SchemaTableName schemaTableName = tableMetadata.getTable();
String schemaName = schemaTableName.getSchemaName();
String tableName = schemaTableName.getTableName();
Schema schema = toIcebergSchema(tableMetadata.getColumns());
PartitionSpec partitionSpec = parsePartitionFields(schema, getPartitioning(tableMetadata.getProperties()));
MetastoreContext metastoreContext = new MetastoreContext(session.getIdentity(), session.getQueryId(), session.getClientInfo(), session.getSource(), Optional.empty(), false, HiveColumnConverterProvider.DEFAULT_COLUMN_CONVERTER_PROVIDER);
Database database = metastore.getDatabase(metastoreContext, schemaName).orElseThrow(() -> new SchemaNotFoundException(schemaName));
HdfsContext hdfsContext = new HdfsContext(session, schemaName, tableName);
String targetPath = getTableLocation(tableMetadata.getProperties());
if (targetPath == null) {
Optional<String> location = database.getLocation();
if (!location.isPresent() || location.get().isEmpty()) {
throw new PrestoException(NOT_SUPPORTED, "Database " + schemaName + " location is not set");
}
Path databasePath = new Path(location.get());
Path resultPath = new Path(databasePath, tableName);
targetPath = resultPath.toString();
}
TableOperations operations = new HiveTableOperations(metastore, new MetastoreContext(session.getIdentity(), session.getQueryId(), session.getClientInfo(), session.getSource(), Optional.empty(), false, HiveColumnConverterProvider.DEFAULT_COLUMN_CONVERTER_PROVIDER), hdfsEnvironment, hdfsContext, schemaName, tableName, session.getUser(), targetPath);
if (operations.current() != null) {
throw new TableAlreadyExistsException(schemaTableName);
}
ImmutableMap.Builder<String, String> propertiesBuilder = ImmutableMap.builderWithExpectedSize(2);
FileFormat fileFormat = getFileFormat(tableMetadata.getProperties());
propertiesBuilder.put(DEFAULT_FILE_FORMAT, fileFormat.toString());
if (tableMetadata.getComment().isPresent()) {
propertiesBuilder.put(TABLE_COMMENT, tableMetadata.getComment().get());
}
TableMetadata metadata = newTableMetadata(schema, partitionSpec, targetPath, propertiesBuilder.build());
transaction = createTableTransaction(tableName, operations, metadata);
return new IcebergWritableTableHandle(schemaName, tableName, SchemaParser.toJson(metadata.schema()), PartitionSpecParser.toJson(metadata.spec()), getColumns(metadata.schema(), typeManager), targetPath, fileFormat, metadata.properties());
}
use of com.facebook.presto.hive.HdfsContext in project presto by prestodb.
the class SemiTransactionalHiveMetastore method dropPartition.
public synchronized void dropPartition(ConnectorSession session, String databaseName, String tableName, String tablePath, List<String> partitionValues) {
setShared();
Map<List<String>, Action<PartitionAndMore>> partitionActionsOfTable = partitionActions.computeIfAbsent(new SchemaTableName(databaseName, tableName), k -> new HashMap<>());
Action<PartitionAndMore> oldPartitionAction = partitionActionsOfTable.get(partitionValues);
if (oldPartitionAction == null) {
HdfsContext context = new HdfsContext(session, databaseName, tableName, tablePath, false);
partitionActionsOfTable.put(partitionValues, new Action<>(ActionType.DROP, null, context));
return;
}
switch(oldPartitionAction.getType()) {
case DROP:
throw new PartitionNotFoundException(new SchemaTableName(databaseName, tableName), partitionValues);
case ADD:
case ALTER:
case INSERT_EXISTING:
throw new PrestoException(NOT_SUPPORTED, format("dropping a partition added in the same transaction is not supported: %s %s %s", databaseName, tableName, partitionValues));
default:
throw new IllegalStateException("Unknown action type");
}
}
use of com.facebook.presto.hive.HdfsContext in project presto by prestodb.
the class SemiTransactionalHiveMetastore method createTable.
/**
* {@code currentLocation} needs to be supplied if a writePath exists for the table.
*/
public synchronized void createTable(ConnectorSession session, Table table, PrincipalPrivileges principalPrivileges, Optional<Path> currentPath, boolean ignoreExisting, PartitionStatistics statistics) {
setShared();
// When creating a table, it should never have partition actions. This is just a sanity check.
checkNoPartitionAction(table.getDatabaseName(), table.getTableName());
Action<TableAndMore> oldTableAction = tableActions.get(table.getSchemaTableName());
TableAndMore tableAndMore = new TableAndMore(table, Optional.of(principalPrivileges), currentPath, Optional.empty(), ignoreExisting, statistics, statistics);
if (oldTableAction == null) {
HdfsContext context = new HdfsContext(session, table.getDatabaseName(), table.getTableName(), table.getStorage().getLocation(), true);
tableActions.put(table.getSchemaTableName(), new Action<>(ActionType.ADD, tableAndMore, context));
return;
}
switch(oldTableAction.getType()) {
case DROP:
throw new PrestoException(TRANSACTION_CONFLICT, "Dropping and then recreating the same table in a transaction is not supported");
case ADD:
case ALTER:
case INSERT_EXISTING:
throw new TableAlreadyExistsException(table.getSchemaTableName());
default:
throw new IllegalStateException("Unknown action type");
}
}
use of com.facebook.presto.hive.HdfsContext in project presto by prestodb.
the class SemiTransactionalHiveMetastore method finishInsertIntoExistingPartition.
public synchronized void finishInsertIntoExistingPartition(ConnectorSession session, String databaseName, String tableName, String tablePath, List<String> partitionValues, Path currentLocation, List<String> fileNames, PartitionStatistics statisticsUpdate) {
setShared();
SchemaTableName schemaTableName = new SchemaTableName(databaseName, tableName);
Map<List<String>, Action<PartitionAndMore>> partitionActionsOfTable = partitionActions.computeIfAbsent(schemaTableName, k -> new HashMap<>());
Action<PartitionAndMore> oldPartitionAction = partitionActionsOfTable.get(partitionValues);
MetastoreContext metastoreContext = new MetastoreContext(session.getIdentity(), session.getQueryId(), session.getClientInfo(), session.getSource(), getMetastoreHeaders(session), isUserDefinedTypeEncodingEnabled(session), columnConverterProvider);
if (oldPartitionAction == null) {
Partition partition = delegate.getPartition(metastoreContext, databaseName, tableName, partitionValues).orElseThrow(() -> new PartitionNotFoundException(schemaTableName, partitionValues));
String partitionName = getPartitionName(metastoreContext, databaseName, tableName, partitionValues);
PartitionStatistics currentStatistics = delegate.getPartitionStatistics(metastoreContext, databaseName, tableName, ImmutableSet.of(partitionName)).get(partitionName);
if (currentStatistics == null) {
throw new PrestoException(HIVE_METASTORE_ERROR, "currentStatistics is null");
}
HdfsContext context = new HdfsContext(session, databaseName, tableName, tablePath, false);
partitionActionsOfTable.put(partitionValues, new Action<>(ActionType.INSERT_EXISTING, new PartitionAndMore(partition, currentLocation, Optional.of(fileNames), merge(currentStatistics, statisticsUpdate), statisticsUpdate), context));
return;
}
switch(oldPartitionAction.getType()) {
case DROP:
throw new PartitionNotFoundException(schemaTableName, partitionValues);
case ADD:
case ALTER:
case INSERT_EXISTING:
throw new UnsupportedOperationException("Inserting into a partition that were added, altered, or inserted into in the same transaction is not supported");
default:
throw new IllegalStateException("Unknown action type");
}
}
use of com.facebook.presto.hive.HdfsContext in project presto by prestodb.
the class SemiTransactionalHiveMetastore method truncateUnpartitionedTable.
public synchronized void truncateUnpartitionedTable(ConnectorSession session, String databaseName, String tableName) {
checkReadable();
Optional<Table> table = getTable(new MetastoreContext(session.getIdentity(), session.getQueryId(), session.getClientInfo(), session.getSource(), getMetastoreHeaders(session), isUserDefinedTypeEncodingEnabled(session), columnConverterProvider), databaseName, tableName);
SchemaTableName schemaTableName = new SchemaTableName(databaseName, tableName);
if (!table.isPresent()) {
throw new TableNotFoundException(schemaTableName);
}
if (!table.get().getTableType().equals(MANAGED_TABLE) && !table.get().getTableType().equals(MATERIALIZED_VIEW)) {
throw new PrestoException(NOT_SUPPORTED, "Cannot delete from non-managed Hive table");
}
if (!table.get().getPartitionColumns().isEmpty()) {
throw new IllegalArgumentException("Table is partitioned");
}
Path path = new Path(table.get().getStorage().getLocation());
HdfsContext context = new HdfsContext(session, databaseName, tableName, table.get().getStorage().getLocation(), false);
setExclusive((delegate, hdfsEnvironment) -> {
RecursiveDeleteResult recursiveDeleteResult = recursiveDeleteFiles(hdfsEnvironment, context, path, ImmutableSet.of(""), false);
if (!recursiveDeleteResult.getNotDeletedEligibleItems().isEmpty()) {
throw new PrestoException(HIVE_FILESYSTEM_ERROR, format("Error deleting from unpartitioned table %s. These items can not be deleted: %s", schemaTableName, recursiveDeleteResult.getNotDeletedEligibleItems()));
}
});
}
Aggregations