use of io.trino.plugin.hive.metastore.Database in project trino by trinodb.
the class TestMetadataQueryOptimization method createLocalQueryRunner.
@Override
protected LocalQueryRunner createLocalQueryRunner() {
Session session = testSessionBuilder().setCatalog(ICEBERG_CATALOG).setSchema(SCHEMA_NAME).build();
baseDir = Files.createTempDir();
HiveMetastore metastore = createTestingFileHiveMetastore(baseDir);
LocalQueryRunner queryRunner = LocalQueryRunner.create(session);
queryRunner.createCatalog(ICEBERG_CATALOG, new TestingIcebergConnectorFactory(Optional.of(metastore), Optional.empty(), EMPTY_MODULE), ImmutableMap.of());
Database database = Database.builder().setDatabaseName(SCHEMA_NAME).setOwnerName(Optional.of("public")).setOwnerType(Optional.of(PrincipalType.ROLE)).build();
metastore.createDatabase(database);
return queryRunner;
}
use of io.trino.plugin.hive.metastore.Database in project trino by trinodb.
the class BridgingHiveMetastore method setDatabaseOwner.
@Override
public void setDatabaseOwner(String databaseName, HivePrincipal principal) {
Database database = fromMetastoreApiDatabase(delegate.getDatabase(databaseName).orElseThrow(() -> new SchemaNotFoundException(databaseName)));
Database newDatabase = Database.builder(database).setOwnerName(Optional.of(principal.getName())).setOwnerType(Optional.of(principal.getType())).build();
delegate.alterDatabase(identity, databaseName, toMetastoreApiDatabase(newDatabase));
}
use of io.trino.plugin.hive.metastore.Database in project trino by trinodb.
the class DeltaLakeMetadata method createTable.
@Override
public void createTable(ConnectorSession session, ConnectorTableMetadata tableMetadata, boolean ignoreExisting) {
SchemaTableName schemaTableName = tableMetadata.getTable();
String schemaName = schemaTableName.getSchemaName();
String tableName = schemaTableName.getTableName();
Database schema = metastore.getDatabase(schemaName).orElseThrow(() -> new SchemaNotFoundException(schemaName));
boolean external = true;
String location = getLocation(tableMetadata.getProperties());
if (location == null) {
Optional<String> schemaLocation = getSchemaLocation(schema);
if (schemaLocation.isEmpty()) {
throw new TrinoException(NOT_SUPPORTED, "The 'location' property must be specified either for the table or the schema");
}
location = new Path(schemaLocation.get(), tableName).toString();
checkPathContainsNoFiles(session, new Path(location));
external = false;
}
Path targetPath = new Path(location);
ensurePathExists(session, targetPath);
Path deltaLogDirectory = getTransactionLogDir(targetPath);
Optional<Long> checkpointInterval = DeltaLakeTableProperties.getCheckpointInterval(tableMetadata.getProperties());
try {
FileSystem fileSystem = hdfsEnvironment.getFileSystem(new HdfsContext(session), targetPath);
if (!fileSystem.exists(deltaLogDirectory)) {
validateTableColumns(tableMetadata);
List<String> partitionColumns = getPartitionedBy(tableMetadata.getProperties());
List<DeltaLakeColumnHandle> deltaLakeColumns = tableMetadata.getColumns().stream().map(column -> toColumnHandle(column, partitionColumns)).collect(toImmutableList());
TransactionLogWriter transactionLogWriter = transactionLogWriterFactory.newWriterWithoutTransactionIsolation(session, targetPath.toString());
appendInitialTableEntries(transactionLogWriter, deltaLakeColumns, partitionColumns, buildDeltaMetadataConfiguration(checkpointInterval), CREATE_TABLE_OPERATION, session, nodeVersion, nodeId);
setRollback(() -> deleteRecursivelyIfExists(new HdfsContext(session), hdfsEnvironment, deltaLogDirectory));
transactionLogWriter.flush();
}
} catch (IOException e) {
throw new TrinoException(DELTA_LAKE_BAD_WRITE, "Unable to access file system for: " + location, e);
}
Table.Builder tableBuilder = Table.builder().setDatabaseName(schemaName).setTableName(tableName).setOwner(Optional.of(session.getUser())).setTableType(external ? EXTERNAL_TABLE.name() : MANAGED_TABLE.name()).setDataColumns(DUMMY_DATA_COLUMNS).setParameters(deltaTableProperties(session, location, external));
setDeltaStorageFormat(tableBuilder, location, targetPath);
Table table = tableBuilder.build();
PrincipalPrivileges principalPrivileges = buildInitialPrivilegeSet(table.getOwner().orElseThrow());
metastore.createTable(session, table, principalPrivileges);
}
use of io.trino.plugin.hive.metastore.Database in project trino by trinodb.
the class DeltaLakeMetadata method beginCreateTable.
@Override
public DeltaLakeOutputTableHandle beginCreateTable(ConnectorSession session, ConnectorTableMetadata tableMetadata, Optional<ConnectorTableLayout> layout) {
validateTableColumns(tableMetadata);
SchemaTableName schemaTableName = tableMetadata.getTable();
String schemaName = schemaTableName.getSchemaName();
String tableName = schemaTableName.getTableName();
Database schema = metastore.getDatabase(schemaName).orElseThrow(() -> new SchemaNotFoundException(schemaName));
List<String> partitionedBy = getPartitionedBy(tableMetadata.getProperties());
boolean external = true;
String location = getLocation(tableMetadata.getProperties());
if (location == null) {
Optional<String> schemaLocation = getSchemaLocation(schema);
if (schemaLocation.isEmpty()) {
throw new TrinoException(NOT_SUPPORTED, "The 'location' property must be specified either for the table or the schema");
}
location = new Path(schemaLocation.get(), tableName).toString();
external = false;
}
Path targetPath = new Path(location);
ensurePathExists(session, targetPath);
HdfsContext hdfsContext = new HdfsContext(session);
createDirectory(hdfsContext, hdfsEnvironment, targetPath);
checkPathContainsNoFiles(session, targetPath);
setRollback(() -> deleteRecursivelyIfExists(new HdfsContext(session), hdfsEnvironment, targetPath));
return new DeltaLakeOutputTableHandle(schemaName, tableName, tableMetadata.getColumns().stream().map(column -> toColumnHandle(column, partitionedBy)).collect(toImmutableList()), location, DeltaLakeTableProperties.getCheckpointInterval(tableMetadata.getProperties()), external);
}
use of io.trino.plugin.hive.metastore.Database in project trino by trinodb.
the class DeltaLakeMetadata method createSchema.
@Override
public void createSchema(ConnectorSession session, String schemaName, Map<String, Object> properties, TrinoPrincipal owner) {
Optional<String> location = DeltaLakeSchemaProperties.getLocation(properties).map(locationUri -> {
try {
hdfsEnvironment.getFileSystem(new HdfsContext(session), new Path(locationUri));
} catch (IOException e) {
throw new TrinoException(INVALID_SCHEMA_PROPERTY, "Invalid location URI: " + locationUri, e);
}
return locationUri;
});
Database database = Database.builder().setDatabaseName(schemaName).setLocation(location).setOwnerType(Optional.of(owner.getType())).setOwnerName(Optional.of(owner.getName())).build();
metastore.createDatabase(database);
}
Aggregations