use of io.trino.spi.connector.SchemaTablePrefix in project trino by trinodb.
the class TestRaptorMetadata method testListTableColumns.
@Test
public void testListTableColumns() {
metadata.createTable(SESSION, getOrdersTable(), false);
Map<SchemaTableName, List<ColumnMetadata>> columns = metadata.listTableColumns(SESSION, new SchemaTablePrefix());
assertEquals(columns, ImmutableMap.of(DEFAULT_TEST_ORDERS, getOrdersTable().getColumns()));
}
use of io.trino.spi.connector.SchemaTablePrefix in project trino by trinodb.
the class AbstractTestHive method testGetAllTableColumns.
@Test
public void testGetAllTableColumns() {
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
Map<SchemaTableName, List<ColumnMetadata>> allColumns = listTableColumns(metadata, newSession(), new SchemaTablePrefix());
assertTrue(allColumns.containsKey(tablePartitionFormat));
assertTrue(allColumns.containsKey(tableUnpartitioned));
}
}
use of io.trino.spi.connector.SchemaTablePrefix in project trino by trinodb.
the class AbstractTestHive method testHideDeltaLakeTables.
@Test
public void testHideDeltaLakeTables() {
ConnectorSession session = newSession();
SchemaTableName tableName = temporaryTable("trino_delta_lake_table");
Table.Builder table = Table.builder().setDatabaseName(tableName.getSchemaName()).setTableName(tableName.getTableName()).setOwner(Optional.of(session.getUser())).setTableType(MANAGED_TABLE.name()).setPartitionColumns(List.of(new Column("a_partition_column", HIVE_INT, Optional.empty()))).setDataColumns(List.of(new Column("a_column", HIVE_STRING, Optional.empty()))).setParameter(SPARK_TABLE_PROVIDER_KEY, DELTA_LAKE_PROVIDER);
table.getStorageBuilder().setStorageFormat(fromHiveStorageFormat(PARQUET)).setLocation(getTableDefaultLocation(metastoreClient.getDatabase(tableName.getSchemaName()).orElseThrow(), new HdfsContext(session.getIdentity()), hdfsEnvironment, tableName.getSchemaName(), tableName.getTableName()).toString());
metastoreClient.createTable(table.build(), NO_PRIVILEGES);
try {
// Verify the table was created as a Delta Lake table
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
metadata.beginQuery(session);
assertThatThrownBy(() -> getTableHandle(metadata, tableName)).hasMessage(format("Cannot query Delta Lake table '%s'", tableName));
}
// Verify the hidden `$properties` and `$partitions` Delta Lake table handle can't be obtained within the hive connector
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
metadata.beginQuery(session);
SchemaTableName propertiesTableName = new SchemaTableName(tableName.getSchemaName(), format("%s$properties", tableName.getTableName()));
assertThat(metadata.getSystemTable(newSession(), propertiesTableName)).isEmpty();
SchemaTableName partitionsTableName = new SchemaTableName(tableName.getSchemaName(), format("%s$partitions", tableName.getTableName()));
assertThat(metadata.getSystemTable(newSession(), partitionsTableName)).isEmpty();
}
// Assert that table is hidden
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
// TODO (https://github.com/trinodb/trino/issues/5426) these assertions should use information_schema instead of metadata directly,
// as information_schema or MetadataManager may apply additional logic
// list all tables
assertThat(metadata.listTables(session, Optional.empty())).doesNotContain(tableName);
// list all tables in a schema
assertThat(metadata.listTables(session, Optional.of(tableName.getSchemaName()))).doesNotContain(tableName);
// list all columns
assertThat(listTableColumns(metadata, session, new SchemaTablePrefix()).keySet()).doesNotContain(tableName);
// list all columns in a schema
assertThat(listTableColumns(metadata, session, new SchemaTablePrefix(tableName.getSchemaName())).keySet()).doesNotContain(tableName);
// list all columns in a table
assertThat(listTableColumns(metadata, session, new SchemaTablePrefix(tableName.getSchemaName(), tableName.getTableName())).keySet()).doesNotContain(tableName);
}
} finally {
// Clean up
metastoreClient.dropTable(tableName.getSchemaName(), tableName.getTableName(), true);
}
}
use of io.trino.spi.connector.SchemaTablePrefix in project trino by trinodb.
the class IcebergMetadata method listTableColumns.
@Override
public Map<SchemaTableName, List<ColumnMetadata>> listTableColumns(ConnectorSession session, SchemaTablePrefix prefix) {
List<SchemaTableName> tables = prefix.getTable().map(ignored -> singletonList(prefix.toSchemaTableName())).orElseGet(() -> listTables(session, prefix.getSchema()));
ImmutableMap.Builder<SchemaTableName, List<ColumnMetadata>> columns = ImmutableMap.builder();
for (SchemaTableName table : tables) {
try {
columns.put(table, getTableMetadata(session, table).getColumns());
} catch (TableNotFoundException e) {
// table disappeared during listing operation
} catch (UnknownTableTypeException e) {
// ignore table of unknown type
} catch (RuntimeException e) {
// Table can be being removed and this may cause all sorts of exceptions. Log, because we're catching broadly.
log.warn(e, "Failed to access metadata of table %s during column listing for %s", table, prefix);
}
}
return columns.buildOrThrow();
}
use of io.trino.spi.connector.SchemaTablePrefix in project trino by trinodb.
the class MetadataManager method listTableColumns.
@Override
public List<TableColumnsMetadata> listTableColumns(Session session, QualifiedTablePrefix prefix) {
requireNonNull(prefix, "prefix is null");
Optional<CatalogMetadata> catalog = getOptionalCatalogMetadata(session, prefix.getCatalogName());
// Track column metadata for every object name to resolve ties between table and view
Map<SchemaTableName, Optional<List<ColumnMetadata>>> tableColumns = new HashMap<>();
if (catalog.isPresent()) {
CatalogMetadata catalogMetadata = catalog.get();
SchemaTablePrefix tablePrefix = prefix.asSchemaTablePrefix();
for (CatalogName catalogName : catalogMetadata.listConnectorIds()) {
ConnectorMetadata metadata = catalogMetadata.getMetadataFor(session, catalogName);
ConnectorSession connectorSession = session.toConnectorSession(catalogName);
// Collect column metadata from tables
metadata.streamTableColumns(connectorSession, tablePrefix).forEach(columnsMetadata -> tableColumns.put(columnsMetadata.getTable(), columnsMetadata.getColumns()));
// Collect column metadata from views. if table and view names overlap, the view wins
for (Entry<QualifiedObjectName, ViewInfo> entry : getViews(session, prefix).entrySet()) {
ImmutableList.Builder<ColumnMetadata> columns = ImmutableList.builder();
for (ViewColumn column : entry.getValue().getColumns()) {
try {
columns.add(new ColumnMetadata(column.getName(), typeManager.getType(column.getType())));
} catch (TypeNotFoundException e) {
throw new TrinoException(INVALID_VIEW, format("Unknown type '%s' for column '%s' in view: %s", column.getType(), column.getName(), entry.getKey()));
}
}
tableColumns.put(entry.getKey().asSchemaTableName(), Optional.of(columns.build()));
}
// if view and materialized view names overlap, the materialized view wins
for (Entry<QualifiedObjectName, ViewInfo> entry : getMaterializedViews(session, prefix).entrySet()) {
ImmutableList.Builder<ColumnMetadata> columns = ImmutableList.builder();
for (ViewColumn column : entry.getValue().getColumns()) {
try {
columns.add(new ColumnMetadata(column.getName(), typeManager.getType(column.getType())));
} catch (TypeNotFoundException e) {
throw new TrinoException(INVALID_VIEW, format("Unknown type '%s' for column '%s' in materialized view: %s", column.getType(), column.getName(), entry.getKey()));
}
}
tableColumns.put(entry.getKey().asSchemaTableName(), Optional.of(columns.build()));
}
}
}
return tableColumns.entrySet().stream().map(entry -> new TableColumnsMetadata(entry.getKey(), entry.getValue())).collect(toImmutableList());
}
Aggregations