use of io.trino.spi.connector.SchemaNotFoundException in project trino by trinodb.
the class SemiTransactionalHiveMetastore method dropDatabase.
public synchronized void dropDatabase(ConnectorSession session, String schemaName) {
Optional<Path> location = delegate.getDatabase(schemaName).orElseThrow(() -> new SchemaNotFoundException(schemaName)).getLocation().map(Path::new);
setExclusive((delegate, hdfsEnvironment) -> {
// If we see files in the schema location, don't delete it.
// If we see no files, request deletion.
// If we fail to check the schema location, behave according to fallback.
boolean deleteData = location.map(path -> {
HdfsContext context = new HdfsContext(session);
try (FileSystem fs = hdfsEnvironment.getFileSystem(context, path)) {
return !fs.listLocatedStatus(path).hasNext();
} catch (IOException | RuntimeException e) {
log.warn(e, "Could not check schema directory '%s'", path);
return deleteSchemaLocationsFallback;
}
}).orElse(deleteSchemaLocationsFallback);
delegate.dropDatabase(schemaName, deleteData);
});
}
use of io.trino.spi.connector.SchemaNotFoundException in project trino by trinodb.
the class GlueHiveMetastore method renameDatabase.
@Override
public void renameDatabase(String databaseName, String newDatabaseName) {
try {
Database database = getDatabase(databaseName).orElseThrow(() -> new SchemaNotFoundException(databaseName));
DatabaseInput renamedDatabase = GlueInputConverter.convertDatabase(database).withName(newDatabaseName);
stats.getUpdateDatabase().call(() -> glueClient.updateDatabase(new UpdateDatabaseRequest().withCatalogId(catalogId).withName(databaseName).withDatabaseInput(renamedDatabase)));
} catch (AmazonServiceException e) {
throw new TrinoException(HIVE_METASTORE_ERROR, e);
}
}
use of io.trino.spi.connector.SchemaNotFoundException in project trino by trinodb.
the class GlueHiveMetastore method createTable.
@Override
public void createTable(Table table, PrincipalPrivileges principalPrivileges) {
try {
TableInput input = GlueInputConverter.convertTable(table);
stats.getCreateTable().call(() -> glueClient.createTable(new CreateTableRequest().withCatalogId(catalogId).withDatabaseName(table.getDatabaseName()).withTableInput(input)));
} catch (AlreadyExistsException e) {
throw new TableAlreadyExistsException(new SchemaTableName(table.getDatabaseName(), table.getTableName()));
} catch (EntityNotFoundException e) {
throw new SchemaNotFoundException(table.getDatabaseName());
} catch (AmazonServiceException e) {
throw new TrinoException(HIVE_METASTORE_ERROR, e);
}
}
use of io.trino.spi.connector.SchemaNotFoundException in project trino by trinodb.
the class InMemoryThriftMetastore method alterDatabase.
@Override
public synchronized void alterDatabase(HiveIdentity identity, String databaseName, Database newDatabase) {
String newDatabaseName = newDatabase.getName();
if (databaseName.equals(newDatabaseName)) {
if (databases.replace(databaseName, newDatabase) == null) {
throw new SchemaNotFoundException(databaseName);
}
return;
}
Database database = databases.get(databaseName);
if (database == null) {
throw new SchemaNotFoundException(databaseName);
}
if (databases.putIfAbsent(newDatabaseName, database) != null) {
throw new SchemaAlreadyExistsException(newDatabaseName);
}
databases.remove(databaseName);
rewriteKeys(relations, name -> new SchemaTableName(newDatabaseName, name.getTableName()));
rewriteKeys(views, name -> new SchemaTableName(newDatabaseName, name.getTableName()));
rewriteKeys(partitions, name -> name.withSchemaName(newDatabaseName));
rewriteKeys(tablePrivileges, name -> name.withDatabase(newDatabaseName));
}
use of io.trino.spi.connector.SchemaNotFoundException in project trino by trinodb.
the class BigQueryMetadata method dropSchema.
@Override
public void dropSchema(ConnectorSession session, String schemaName) {
BigQueryClient client = bigQueryClientFactory.create(session);
String remoteSchemaName = client.toRemoteDataset(getProjectId(client), schemaName).map(RemoteDatabaseObject::getOnlyRemoteName).orElseThrow(() -> new SchemaNotFoundException(schemaName));
client.dropSchema(DatasetId.of(remoteSchemaName));
}
Aggregations