use of com.facebook.presto.hive.metastore.MetastoreContext in project presto by prestodb.
the class FileHiveMetastore method getPartitionNames.
@Override
public synchronized Optional<List<String>> getPartitionNames(MetastoreContext metastoreContext, String databaseName, String tableName) {
requireNonNull(databaseName, "databaseName is null");
requireNonNull(tableName, "tableName is null");
Optional<Table> tableReference = getTable(metastoreContext, databaseName, tableName);
if (!tableReference.isPresent()) {
return Optional.empty();
}
Table table = tableReference.get();
Path tableMetadataDirectory = getTableMetadataDirectory(table);
List<ArrayDeque<String>> partitions = listPartitions(tableMetadataDirectory, table.getPartitionColumns());
List<String> partitionNames = partitions.stream().map(partitionValues -> makePartName(table.getPartitionColumns(), ImmutableList.copyOf(partitionValues))).collect(toList());
return Optional.of(ImmutableList.copyOf(partitionNames));
}
use of com.facebook.presto.hive.metastore.MetastoreContext in project presto by prestodb.
the class IcebergHiveMetadata method getRawSystemTable.
private Optional<SystemTable> getRawSystemTable(ConnectorSession session, SchemaTableName tableName) {
IcebergTableName name = IcebergTableName.from(tableName.getTableName());
MetastoreContext metastoreContext = new MetastoreContext(session.getIdentity(), session.getQueryId(), session.getClientInfo(), session.getSource(), Optional.empty(), false, HiveColumnConverterProvider.DEFAULT_COLUMN_CONVERTER_PROVIDER);
Optional<Table> hiveTable = metastore.getTable(metastoreContext, tableName.getSchemaName(), name.getTableName());
if (!hiveTable.isPresent() || !isIcebergTable(hiveTable.get())) {
return Optional.empty();
}
org.apache.iceberg.Table table = getHiveIcebergTable(metastore, hdfsEnvironment, session, new SchemaTableName(tableName.getSchemaName(), name.getTableName()));
return getIcebergSystemTable(tableName, table);
}
use of com.facebook.presto.hive.metastore.MetastoreContext in project presto by prestodb.
the class IcebergHiveMetadata method beginCreateTable.
@Override
public ConnectorOutputTableHandle beginCreateTable(ConnectorSession session, ConnectorTableMetadata tableMetadata, Optional<ConnectorNewTableLayout> layout) {
SchemaTableName schemaTableName = tableMetadata.getTable();
String schemaName = schemaTableName.getSchemaName();
String tableName = schemaTableName.getTableName();
Schema schema = toIcebergSchema(tableMetadata.getColumns());
PartitionSpec partitionSpec = parsePartitionFields(schema, getPartitioning(tableMetadata.getProperties()));
MetastoreContext metastoreContext = new MetastoreContext(session.getIdentity(), session.getQueryId(), session.getClientInfo(), session.getSource(), Optional.empty(), false, HiveColumnConverterProvider.DEFAULT_COLUMN_CONVERTER_PROVIDER);
Database database = metastore.getDatabase(metastoreContext, schemaName).orElseThrow(() -> new SchemaNotFoundException(schemaName));
HdfsContext hdfsContext = new HdfsContext(session, schemaName, tableName);
String targetPath = getTableLocation(tableMetadata.getProperties());
if (targetPath == null) {
Optional<String> location = database.getLocation();
if (!location.isPresent() || location.get().isEmpty()) {
throw new PrestoException(NOT_SUPPORTED, "Database " + schemaName + " location is not set");
}
Path databasePath = new Path(location.get());
Path resultPath = new Path(databasePath, tableName);
targetPath = resultPath.toString();
}
TableOperations operations = new HiveTableOperations(metastore, new MetastoreContext(session.getIdentity(), session.getQueryId(), session.getClientInfo(), session.getSource(), Optional.empty(), false, HiveColumnConverterProvider.DEFAULT_COLUMN_CONVERTER_PROVIDER), hdfsEnvironment, hdfsContext, schemaName, tableName, session.getUser(), targetPath);
if (operations.current() != null) {
throw new TableAlreadyExistsException(schemaTableName);
}
ImmutableMap.Builder<String, String> propertiesBuilder = ImmutableMap.builderWithExpectedSize(2);
FileFormat fileFormat = getFileFormat(tableMetadata.getProperties());
propertiesBuilder.put(DEFAULT_FILE_FORMAT, fileFormat.toString());
if (tableMetadata.getComment().isPresent()) {
propertiesBuilder.put(TABLE_COMMENT, tableMetadata.getComment().get());
}
TableMetadata metadata = newTableMetadata(schema, partitionSpec, targetPath, propertiesBuilder.build());
transaction = createTableTransaction(tableName, operations, metadata);
return new IcebergWritableTableHandle(schemaName, tableName, SchemaParser.toJson(metadata.schema()), PartitionSpecParser.toJson(metadata.spec()), getColumns(metadata.schema(), typeManager), targetPath, fileFormat, metadata.properties());
}
use of com.facebook.presto.hive.metastore.MetastoreContext in project presto by prestodb.
the class IcebergHiveMetadata method dropTable.
@Override
public void dropTable(ConnectorSession session, ConnectorTableHandle tableHandle) {
IcebergTableHandle handle = (IcebergTableHandle) tableHandle;
// TODO: support path override in Iceberg table creation
org.apache.iceberg.Table table = getHiveIcebergTable(metastore, hdfsEnvironment, session, handle.getSchemaTableName());
if (table.properties().containsKey(OBJECT_STORE_PATH) || table.properties().containsKey(WRITE_NEW_DATA_LOCATION) || table.properties().containsKey(WRITE_METADATA_LOCATION)) {
throw new PrestoException(NOT_SUPPORTED, "Table " + handle.getSchemaTableName() + " contains Iceberg path override properties and cannot be dropped from Presto");
}
MetastoreContext metastoreContext = new MetastoreContext(session.getIdentity(), session.getQueryId(), session.getClientInfo(), session.getSource(), Optional.empty(), false, HiveColumnConverterProvider.DEFAULT_COLUMN_CONVERTER_PROVIDER);
metastore.dropTable(metastoreContext, handle.getSchemaName(), handle.getTableName(), true);
}
use of com.facebook.presto.hive.metastore.MetastoreContext in project presto by prestodb.
the class IcebergHiveMetadata method getTableMetadata.
@Override
protected ConnectorTableMetadata getTableMetadata(ConnectorSession session, SchemaTableName table) {
MetastoreContext metastoreContext = new MetastoreContext(session.getIdentity(), session.getQueryId(), session.getClientInfo(), session.getSource(), Optional.empty(), false, HiveColumnConverterProvider.DEFAULT_COLUMN_CONVERTER_PROVIDER);
if (!metastore.getTable(metastoreContext, table.getSchemaName(), table.getTableName()).isPresent()) {
throw new TableNotFoundException(table);
}
org.apache.iceberg.Table icebergTable = getHiveIcebergTable(metastore, hdfsEnvironment, session, table);
List<ColumnMetadata> columns = getColumnMetadatas(icebergTable);
return new ConnectorTableMetadata(table, columns, createMetadataProperties(icebergTable), getTableComment(icebergTable));
}
Aggregations