use of io.trino.spi.connector.SchemaNotFoundException in project trino by trinodb.
the class TrinoHiveCatalog method dropNamespace.
@Override
public void dropNamespace(ConnectorSession session, String namespace) {
// basic sanity check to provide a better error message
if (!listTables(session, Optional.of(namespace)).isEmpty() || !listViews(session, Optional.of(namespace)).isEmpty()) {
throw new TrinoException(SCHEMA_NOT_EMPTY, "Schema not empty: " + namespace);
}
Optional<Path> location = metastore.getDatabase(namespace).orElseThrow(() -> new SchemaNotFoundException(namespace)).getLocation().map(Path::new);
// If we see files in the schema location, don't delete it.
// If we see no files, request deletion.
// If we fail to check the schema location, behave according to fallback.
boolean deleteData = location.map(path -> {
HdfsContext context = new HdfsContext(session);
try (FileSystem fs = hdfsEnvironment.getFileSystem(context, path)) {
return !fs.listLocatedStatus(path).hasNext();
} catch (IOException e) {
log.warn(e, "Could not check schema directory '%s'", path);
return deleteSchemaLocationsFallback;
}
}).orElse(deleteSchemaLocationsFallback);
metastore.dropDatabase(namespace, deleteData);
}
use of io.trino.spi.connector.SchemaNotFoundException in project trino by trinodb.
the class TrinoGlueCatalog method loadNamespaceMetadata.
@Override
public Map<String, Object> loadNamespaceMetadata(ConnectorSession session, String namespace) {
try {
GetDatabaseRequest getDatabaseRequest = new GetDatabaseRequest().withName(namespace);
Database database = stats.getGetDatabase().call(() -> glueClient.getDatabase(getDatabaseRequest).getDatabase());
ImmutableMap.Builder<String, Object> metadata = ImmutableMap.builder();
if (database.getLocationUri() != null) {
metadata.put(LOCATION_PROPERTY, database.getLocationUri());
}
if (database.getParameters() != null) {
metadata.putAll(database.getParameters());
}
return metadata.buildOrThrow();
} catch (EntityNotFoundException e) {
throw new SchemaNotFoundException(namespace);
} catch (AmazonServiceException e) {
throw new TrinoException(ICEBERG_CATALOG_ERROR, e);
}
}
use of io.trino.spi.connector.SchemaNotFoundException in project trino by trinodb.
the class PhoenixClient method beginCreateTable.
@Override
public JdbcOutputTableHandle beginCreateTable(ConnectorSession session, ConnectorTableMetadata tableMetadata) {
SchemaTableName schemaTableName = tableMetadata.getTable();
String schema = schemaTableName.getSchemaName();
String table = schemaTableName.getTableName();
if (!getSchemaNames(session).contains(schema)) {
throw new SchemaNotFoundException(schema);
}
try (Connection connection = connectionFactory.openConnection(session)) {
ConnectorIdentity identity = session.getIdentity();
schema = getIdentifierMapping().toRemoteSchemaName(identity, connection, schema);
table = getIdentifierMapping().toRemoteTableName(identity, connection, schema, table);
schema = toPhoenixSchemaName(schema);
LinkedList<ColumnMetadata> tableColumns = new LinkedList<>(tableMetadata.getColumns());
Map<String, Object> tableProperties = tableMetadata.getProperties();
Optional<Boolean> immutableRows = PhoenixTableProperties.getImmutableRows(tableProperties);
String immutable = immutableRows.isPresent() && immutableRows.get() ? "IMMUTABLE" : "";
ImmutableList.Builder<String> columnNames = ImmutableList.builder();
ImmutableList.Builder<Type> columnTypes = ImmutableList.builder();
ImmutableList.Builder<String> columnList = ImmutableList.builder();
Set<ColumnMetadata> rowkeyColumns = tableColumns.stream().filter(col -> isPrimaryKey(col, tableProperties)).collect(toSet());
ImmutableList.Builder<String> pkNames = ImmutableList.builder();
Optional<String> rowkeyColumn = Optional.empty();
if (rowkeyColumns.isEmpty()) {
// Add a rowkey when not specified in DDL
columnList.add(ROWKEY + " bigint not null");
pkNames.add(ROWKEY);
execute(session, format("CREATE SEQUENCE %s", getEscapedTableName(schema, table + "_sequence")));
rowkeyColumn = Optional.of(ROWKEY);
}
for (ColumnMetadata column : tableColumns) {
String columnName = getIdentifierMapping().toRemoteColumnName(connection, column.getName());
columnNames.add(columnName);
columnTypes.add(column.getType());
String typeStatement = toWriteMapping(session, column.getType()).getDataType();
if (rowkeyColumns.contains(column)) {
typeStatement += " not null";
pkNames.add(columnName);
}
columnList.add(format("%s %s", getEscapedArgument(columnName), typeStatement));
}
ImmutableList.Builder<String> tableOptions = ImmutableList.builder();
PhoenixTableProperties.getSaltBuckets(tableProperties).ifPresent(value -> tableOptions.add(TableProperty.SALT_BUCKETS + "=" + value));
PhoenixTableProperties.getSplitOn(tableProperties).ifPresent(value -> tableOptions.add("SPLIT ON (" + value.replace('"', '\'') + ")"));
PhoenixTableProperties.getDisableWal(tableProperties).ifPresent(value -> tableOptions.add(TableProperty.DISABLE_WAL + "=" + value));
PhoenixTableProperties.getDefaultColumnFamily(tableProperties).ifPresent(value -> tableOptions.add(TableProperty.DEFAULT_COLUMN_FAMILY + "=" + value));
PhoenixTableProperties.getBloomfilter(tableProperties).ifPresent(value -> tableOptions.add(HColumnDescriptor.BLOOMFILTER + "='" + value + "'"));
PhoenixTableProperties.getVersions(tableProperties).ifPresent(value -> tableOptions.add(HConstants.VERSIONS + "=" + value));
PhoenixTableProperties.getMinVersions(tableProperties).ifPresent(value -> tableOptions.add(HColumnDescriptor.MIN_VERSIONS + "=" + value));
PhoenixTableProperties.getCompression(tableProperties).ifPresent(value -> tableOptions.add(HColumnDescriptor.COMPRESSION + "='" + value + "'"));
PhoenixTableProperties.getTimeToLive(tableProperties).ifPresent(value -> tableOptions.add(HColumnDescriptor.TTL + "=" + value));
PhoenixTableProperties.getDataBlockEncoding(tableProperties).ifPresent(value -> tableOptions.add(HColumnDescriptor.DATA_BLOCK_ENCODING + "='" + value + "'"));
String sql = format("CREATE %s TABLE %s (%s , CONSTRAINT PK PRIMARY KEY (%s)) %s", immutable, getEscapedTableName(schema, table), join(", ", columnList.build()), join(", ", pkNames.build()), join(", ", tableOptions.build()));
execute(session, sql);
return new PhoenixOutputTableHandle(schema, table, columnNames.build(), columnTypes.build(), Optional.empty(), rowkeyColumn);
} catch (SQLException e) {
if (e.getErrorCode() == SQLExceptionCode.TABLE_ALREADY_EXIST.getErrorCode()) {
throw new TrinoException(ALREADY_EXISTS, "Phoenix table already exists", e);
}
throw new TrinoException(PHOENIX_METADATA_ERROR, "Error creating Phoenix table", e);
}
}
use of io.trino.spi.connector.SchemaNotFoundException in project trino by trinodb.
the class KuduClientSession method createTable.
public KuduTable createTable(ConnectorTableMetadata tableMetadata, boolean ignoreExisting) {
try {
String rawName = schemaEmulation.toRawName(tableMetadata.getTable());
if (ignoreExisting) {
if (client.tableExists(rawName)) {
return null;
}
}
if (!schemaEmulation.existsSchema(client, tableMetadata.getTable().getSchemaName())) {
throw new SchemaNotFoundException(tableMetadata.getTable().getSchemaName());
}
List<ColumnMetadata> columns = tableMetadata.getColumns();
Map<String, Object> properties = tableMetadata.getProperties();
Schema schema = buildSchema(columns);
CreateTableOptions options = buildCreateTableOptions(schema, properties);
return client.createTable(rawName, schema, options);
} catch (KuduException e) {
throw new TrinoException(GENERIC_INTERNAL_ERROR, e);
}
}
Aggregations