use of io.trino.spi.connector.SchemaNotFoundException in project trino by trinodb.
the class DeltaLakeMetadata method dropSchema.
@Override
public void dropSchema(ConnectorSession session, String schemaName) {
Optional<Path> location = metastore.getDatabase(schemaName).orElseThrow(() -> new SchemaNotFoundException(schemaName)).getLocation().map(Path::new);
// If we see files in the schema location, don't delete it.
// If we see no files or can't see the location at all, use fallback.
boolean deleteData = location.map(path -> {
// don't catch errors here
HdfsContext context = new HdfsContext(session);
try (FileSystem fs = hdfsEnvironment.getFileSystem(context, path)) {
return !fs.listLocatedStatus(path).hasNext();
} catch (IOException | RuntimeException e) {
LOG.warn(e, "Could not check schema directory '%s'", path);
return deleteSchemaLocationsFallback;
}
}).orElse(deleteSchemaLocationsFallback);
metastore.dropDatabase(schemaName, deleteData);
}
use of io.trino.spi.connector.SchemaNotFoundException in project trino by trinodb.
the class DeltaLakeMetadata method getSchemaProperties.
@Override
public Map<String, Object> getSchemaProperties(ConnectorSession session, CatalogSchemaName schemaName) {
String schema = schemaName.getSchemaName();
checkState(!schema.equals("information_schema") && !schema.equals("sys"), "Schema is not accessible: %s", schemaName);
Optional<Database> db = metastore.getDatabase(schema);
return db.map(DeltaLakeSchemaProperties::fromDatabase).orElseThrow(() -> new SchemaNotFoundException(schema));
}
use of io.trino.spi.connector.SchemaNotFoundException in project trino by trinodb.
the class PhoenixClient method beginCreateTable.
@Override
public JdbcOutputTableHandle beginCreateTable(ConnectorSession session, ConnectorTableMetadata tableMetadata) {
SchemaTableName schemaTableName = tableMetadata.getTable();
String schema = schemaTableName.getSchemaName();
String table = schemaTableName.getTableName();
if (!getSchemaNames(session).contains(schema)) {
throw new SchemaNotFoundException(schema);
}
try (Connection connection = connectionFactory.openConnection(session)) {
ConnectorIdentity identity = session.getIdentity();
schema = getIdentifierMapping().toRemoteSchemaName(identity, connection, schema);
table = getIdentifierMapping().toRemoteTableName(identity, connection, schema, table);
schema = toPhoenixSchemaName(schema);
LinkedList<ColumnMetadata> tableColumns = new LinkedList<>(tableMetadata.getColumns());
Map<String, Object> tableProperties = tableMetadata.getProperties();
Optional<Boolean> immutableRows = PhoenixTableProperties.getImmutableRows(tableProperties);
String immutable = immutableRows.isPresent() && immutableRows.get() ? "IMMUTABLE" : "";
ImmutableList.Builder<String> columnNames = ImmutableList.builder();
ImmutableList.Builder<Type> columnTypes = ImmutableList.builder();
ImmutableList.Builder<String> columnList = ImmutableList.builder();
Set<ColumnMetadata> rowkeyColumns = tableColumns.stream().filter(col -> isPrimaryKey(col, tableProperties)).collect(toSet());
ImmutableList.Builder<String> pkNames = ImmutableList.builder();
Optional<String> rowkeyColumn = Optional.empty();
if (rowkeyColumns.isEmpty()) {
// Add a rowkey when not specified in DDL
columnList.add(ROWKEY + " bigint not null");
pkNames.add(ROWKEY);
execute(session, format("CREATE SEQUENCE %s", getEscapedTableName(schema, table + "_sequence")));
rowkeyColumn = Optional.of(ROWKEY);
}
for (ColumnMetadata column : tableColumns) {
String columnName = getIdentifierMapping().toRemoteColumnName(connection, column.getName());
columnNames.add(columnName);
columnTypes.add(column.getType());
String typeStatement = toWriteMapping(session, column.getType()).getDataType();
if (rowkeyColumns.contains(column)) {
typeStatement += " not null";
pkNames.add(columnName);
}
columnList.add(format("%s %s", getEscapedArgument(columnName), typeStatement));
}
ImmutableList.Builder<String> tableOptions = ImmutableList.builder();
PhoenixTableProperties.getSaltBuckets(tableProperties).ifPresent(value -> tableOptions.add(TableProperty.SALT_BUCKETS + "=" + value));
PhoenixTableProperties.getSplitOn(tableProperties).ifPresent(value -> tableOptions.add("SPLIT ON (" + value.replace('"', '\'') + ")"));
PhoenixTableProperties.getDisableWal(tableProperties).ifPresent(value -> tableOptions.add(TableProperty.DISABLE_WAL + "=" + value));
PhoenixTableProperties.getDefaultColumnFamily(tableProperties).ifPresent(value -> tableOptions.add(TableProperty.DEFAULT_COLUMN_FAMILY + "=" + value));
PhoenixTableProperties.getBloomfilter(tableProperties).ifPresent(value -> tableOptions.add(HColumnDescriptor.BLOOMFILTER + "='" + value + "'"));
PhoenixTableProperties.getVersions(tableProperties).ifPresent(value -> tableOptions.add(HConstants.VERSIONS + "=" + value));
PhoenixTableProperties.getMinVersions(tableProperties).ifPresent(value -> tableOptions.add(HColumnDescriptor.MIN_VERSIONS + "=" + value));
PhoenixTableProperties.getCompression(tableProperties).ifPresent(value -> tableOptions.add(HColumnDescriptor.COMPRESSION + "='" + value + "'"));
PhoenixTableProperties.getTimeToLive(tableProperties).ifPresent(value -> tableOptions.add(HColumnDescriptor.TTL + "=" + value));
PhoenixTableProperties.getDataBlockEncoding(tableProperties).ifPresent(value -> tableOptions.add(HColumnDescriptor.DATA_BLOCK_ENCODING + "='" + value + "'"));
String sql = format("CREATE %s TABLE %s (%s , CONSTRAINT PK PRIMARY KEY (%s)) %s", immutable, getEscapedTableName(schema, table), join(", ", columnList.build()), join(", ", pkNames.build()), join(", ", tableOptions.build()));
execute(session, sql);
return new PhoenixOutputTableHandle(schema, table, columnNames.build(), columnTypes.build(), Optional.empty(), rowkeyColumn);
} catch (SQLException e) {
if (e.getErrorCode() == SQLExceptionCode.TABLE_ALREADY_EXIST.getErrorCode()) {
throw new TrinoException(ALREADY_EXISTS, "Phoenix table already exists", e);
}
throw new TrinoException(PHOENIX_METADATA_ERROR, "Error creating Phoenix table", e);
}
}
use of io.trino.spi.connector.SchemaNotFoundException in project trino by trinodb.
the class BridgingHiveMetastore method setDatabaseOwner.
@Override
public void setDatabaseOwner(String databaseName, HivePrincipal principal) {
Database database = fromMetastoreApiDatabase(delegate.getDatabase(databaseName).orElseThrow(() -> new SchemaNotFoundException(databaseName)));
Database newDatabase = Database.builder(database).setOwnerName(Optional.of(principal.getName())).setOwnerType(Optional.of(principal.getType())).build();
delegate.alterDatabase(identity, databaseName, toMetastoreApiDatabase(newDatabase));
}
use of io.trino.spi.connector.SchemaNotFoundException in project trino by trinodb.
the class BridgingHiveMetastore method renameDatabase.
@Override
public void renameDatabase(String databaseName, String newDatabaseName) {
org.apache.hadoop.hive.metastore.api.Database database = delegate.getDatabase(databaseName).orElseThrow(() -> new SchemaNotFoundException(databaseName));
database.setName(newDatabaseName);
delegate.alterDatabase(identity, databaseName, database);
delegate.getDatabase(databaseName).ifPresent(newDatabase -> {
if (newDatabase.getName().equals(databaseName)) {
throw new TrinoException(NOT_SUPPORTED, "Hive metastore does not support renaming schemas");
}
});
}
Aggregations