use of io.trino.spi.connector.SchemaNotFoundException in project trino by trinodb.
the class CassandraMetadata method listTables.
@Override
public List<SchemaTableName> listTables(ConnectorSession session, Optional<String> schemaName1) {
ImmutableList.Builder<SchemaTableName> tableNames = ImmutableList.builder();
List<String> schemaNames = listSchemas(session, schemaName1);
for (String schemaName : schemaNames) {
try {
for (String tableName : cassandraSession.getCaseSensitiveTableNames(schemaName)) {
tableNames.add(new SchemaTableName(schemaName, tableName.toLowerCase(ENGLISH)));
}
} catch (SchemaNotFoundException e) {
// schema disappeared during listing operation
}
}
return tableNames.build();
}
use of io.trino.spi.connector.SchemaNotFoundException in project trino by trinodb.
the class CassandraSession method getKeyspaceByCaseInsensitiveName.
private KeyspaceMetadata getKeyspaceByCaseInsensitiveName(String caseInsensitiveSchemaName) throws SchemaNotFoundException {
List<KeyspaceMetadata> keyspaces = executeWithSession(session -> session.getCluster().getMetadata().getKeyspaces());
KeyspaceMetadata result = null;
// Ensure that the error message is deterministic
List<KeyspaceMetadata> sortedKeyspaces = Ordering.from(comparing(KeyspaceMetadata::getName)).immutableSortedCopy(keyspaces);
for (KeyspaceMetadata keyspace : sortedKeyspaces) {
if (keyspace.getName().equalsIgnoreCase(caseInsensitiveSchemaName)) {
if (result != null) {
throw new TrinoException(NOT_SUPPORTED, format("More than one keyspace has been found for the case insensitive schema name: %s -> (%s, %s)", caseInsensitiveSchemaName, result.getName(), keyspace.getName()));
}
result = keyspace;
}
}
if (result == null) {
throw new SchemaNotFoundException(caseInsensitiveSchemaName);
}
return result;
}
use of io.trino.spi.connector.SchemaNotFoundException in project trino by trinodb.
the class DeltaLakeMetadata method createTable.
@Override
public void createTable(ConnectorSession session, ConnectorTableMetadata tableMetadata, boolean ignoreExisting) {
SchemaTableName schemaTableName = tableMetadata.getTable();
String schemaName = schemaTableName.getSchemaName();
String tableName = schemaTableName.getTableName();
Database schema = metastore.getDatabase(schemaName).orElseThrow(() -> new SchemaNotFoundException(schemaName));
boolean external = true;
String location = getLocation(tableMetadata.getProperties());
if (location == null) {
Optional<String> schemaLocation = getSchemaLocation(schema);
if (schemaLocation.isEmpty()) {
throw new TrinoException(NOT_SUPPORTED, "The 'location' property must be specified either for the table or the schema");
}
location = new Path(schemaLocation.get(), tableName).toString();
checkPathContainsNoFiles(session, new Path(location));
external = false;
}
Path targetPath = new Path(location);
ensurePathExists(session, targetPath);
Path deltaLogDirectory = getTransactionLogDir(targetPath);
Optional<Long> checkpointInterval = DeltaLakeTableProperties.getCheckpointInterval(tableMetadata.getProperties());
try {
FileSystem fileSystem = hdfsEnvironment.getFileSystem(new HdfsContext(session), targetPath);
if (!fileSystem.exists(deltaLogDirectory)) {
validateTableColumns(tableMetadata);
List<String> partitionColumns = getPartitionedBy(tableMetadata.getProperties());
List<DeltaLakeColumnHandle> deltaLakeColumns = tableMetadata.getColumns().stream().map(column -> toColumnHandle(column, partitionColumns)).collect(toImmutableList());
TransactionLogWriter transactionLogWriter = transactionLogWriterFactory.newWriterWithoutTransactionIsolation(session, targetPath.toString());
appendInitialTableEntries(transactionLogWriter, deltaLakeColumns, partitionColumns, buildDeltaMetadataConfiguration(checkpointInterval), CREATE_TABLE_OPERATION, session, nodeVersion, nodeId);
setRollback(() -> deleteRecursivelyIfExists(new HdfsContext(session), hdfsEnvironment, deltaLogDirectory));
transactionLogWriter.flush();
}
} catch (IOException e) {
throw new TrinoException(DELTA_LAKE_BAD_WRITE, "Unable to access file system for: " + location, e);
}
Table.Builder tableBuilder = Table.builder().setDatabaseName(schemaName).setTableName(tableName).setOwner(Optional.of(session.getUser())).setTableType(external ? EXTERNAL_TABLE.name() : MANAGED_TABLE.name()).setDataColumns(DUMMY_DATA_COLUMNS).setParameters(deltaTableProperties(session, location, external));
setDeltaStorageFormat(tableBuilder, location, targetPath);
Table table = tableBuilder.build();
PrincipalPrivileges principalPrivileges = buildInitialPrivilegeSet(table.getOwner().orElseThrow());
metastore.createTable(session, table, principalPrivileges);
}
use of io.trino.spi.connector.SchemaNotFoundException in project trino by trinodb.
the class DeltaLakeMetadata method beginCreateTable.
@Override
public DeltaLakeOutputTableHandle beginCreateTable(ConnectorSession session, ConnectorTableMetadata tableMetadata, Optional<ConnectorTableLayout> layout) {
validateTableColumns(tableMetadata);
SchemaTableName schemaTableName = tableMetadata.getTable();
String schemaName = schemaTableName.getSchemaName();
String tableName = schemaTableName.getTableName();
Database schema = metastore.getDatabase(schemaName).orElseThrow(() -> new SchemaNotFoundException(schemaName));
List<String> partitionedBy = getPartitionedBy(tableMetadata.getProperties());
boolean external = true;
String location = getLocation(tableMetadata.getProperties());
if (location == null) {
Optional<String> schemaLocation = getSchemaLocation(schema);
if (schemaLocation.isEmpty()) {
throw new TrinoException(NOT_SUPPORTED, "The 'location' property must be specified either for the table or the schema");
}
location = new Path(schemaLocation.get(), tableName).toString();
external = false;
}
Path targetPath = new Path(location);
ensurePathExists(session, targetPath);
HdfsContext hdfsContext = new HdfsContext(session);
createDirectory(hdfsContext, hdfsEnvironment, targetPath);
checkPathContainsNoFiles(session, targetPath);
setRollback(() -> deleteRecursivelyIfExists(new HdfsContext(session), hdfsEnvironment, targetPath));
return new DeltaLakeOutputTableHandle(schemaName, tableName, tableMetadata.getColumns().stream().map(column -> toColumnHandle(column, partitionedBy)).collect(toImmutableList()), location, DeltaLakeTableProperties.getCheckpointInterval(tableMetadata.getProperties()), external);
}
use of io.trino.spi.connector.SchemaNotFoundException in project trino by trinodb.
the class TrinoHiveCatalog method defaultTableLocation.
@Override
public String defaultTableLocation(ConnectorSession session, SchemaTableName schemaTableName) {
Database database = metastore.getDatabase(schemaTableName.getSchemaName()).orElseThrow(() -> new SchemaNotFoundException(schemaTableName.getSchemaName()));
String tableNameForLocation = createNewTableName(schemaTableName.getTableName());
return getTableDefaultLocation(database, new HdfsEnvironment.HdfsContext(session), hdfsEnvironment, schemaTableName.getSchemaName(), tableNameForLocation).toString();
}
Aggregations