use of io.trino.spi.connector.SchemaTableName in project trino by trinodb.
the class CassandraSession method buildColumnHandle.
private Optional<CassandraColumnHandle> buildColumnHandle(AbstractTableMetadata tableMetadata, ColumnMetadata columnMeta, boolean partitionKey, boolean clusteringKey, int ordinalPosition, boolean hidden) {
Optional<CassandraType> cassandraType = toCassandraType(columnMeta.getType());
if (cassandraType.isEmpty()) {
log.debug("Unsupported column type: %s", columnMeta.getType().getName());
return Optional.empty();
}
List<DataType> typeArgs = columnMeta.getType().getTypeArguments();
for (DataType typeArgument : typeArgs) {
if (!isFullySupported(typeArgument)) {
log.debug("%s column has unsupported type: %s", columnMeta.getName(), typeArgument);
return Optional.empty();
}
}
boolean indexed = false;
SchemaTableName schemaTableName = new SchemaTableName(tableMetadata.getKeyspace().getName(), tableMetadata.getName());
if (!isMaterializedView(schemaTableName)) {
TableMetadata table = (TableMetadata) tableMetadata;
for (IndexMetadata idx : table.getIndexes()) {
if (idx.getTarget().equals(columnMeta.getName())) {
indexed = true;
break;
}
}
}
return Optional.of(new CassandraColumnHandle(columnMeta.getName(), ordinalPosition, cassandraType.get(), partitionKey, clusteringKey, indexed, hidden));
}
use of io.trino.spi.connector.SchemaTableName in project trino by trinodb.
the class CassandraTestingUtils method createTestTables.
public static void createTestTables(CassandraSession cassandraSession, String keyspace, Date date) {
createKeyspace(cassandraSession, keyspace);
createTableAllTypes(cassandraSession, new SchemaTableName(keyspace, TABLE_ALL_TYPES), date, 9);
createTableTupleType(cassandraSession, new SchemaTableName(keyspace, TABLE_TUPLE_TYPE));
createTableUserDefinedType(cassandraSession, new SchemaTableName(keyspace, TABLE_USER_DEFINED_TYPE));
createTableDeleteData(cassandraSession, new SchemaTableName(keyspace, TABLE_DELETE_DATA));
}
use of io.trino.spi.connector.SchemaTableName in project trino by trinodb.
the class TestJsonCassandraHandles method testTableHandleDeserialize.
@Test
public void testTableHandleDeserialize() throws Exception {
String json = OBJECT_MAPPER.writeValueAsString(TABLE_HANDLE_AS_MAP);
CassandraTableHandle tableHandle = OBJECT_MAPPER.readValue(json, CassandraTableHandle.class);
assertEquals(tableHandle.getSchemaName(), "cassandra_schema");
assertEquals(tableHandle.getTableName(), "cassandra_table");
assertEquals(tableHandle.getSchemaTableName(), new SchemaTableName("cassandra_schema", "cassandra_table"));
assertEquals(tableHandle.getClusteringKeyPredicates(), "");
}
use of io.trino.spi.connector.SchemaTableName in project trino by trinodb.
the class TestTableSnapshot method testOnlyReadsTrailingJsonFiles.
@Test
public void testOnlyReadsTrailingJsonFiles() throws IOException {
Map<String, Integer> expectedFileAccess = new HashMap<>();
TableSnapshot tableSnapshot = TableSnapshot.load(new SchemaTableName("schema", "person"), accessTrackingFileSystem, tableLocation, parquetReaderOptions, true);
expectedFileAccess.put("_last_checkpoint", 1);
expectedFileAccess.put("00000000000000000011.json", 1);
expectedFileAccess.put("00000000000000000012.json", 1);
expectedFileAccess.put("00000000000000000013.json", 1);
expectedFileAccess.put("00000000000000000014.json", 1);
assertEquals(accessTrackingFileSystem.getOpenCount(), expectedFileAccess);
tableSnapshot.getJsonTransactionLogEntries().forEach(entry -> {
});
assertEquals(accessTrackingFileSystem.getOpenCount(), expectedFileAccess);
}
use of io.trino.spi.connector.SchemaTableName in project trino by trinodb.
the class TestDeltaLakeGlueMetastore method testHideNonDeltaLakeTable.
@Test
public void testHideNonDeltaLakeTable() throws Exception {
SchemaTableName deltaLakeTable = new SchemaTableName(databaseName, "delta_lake_table_" + randomName());
SchemaTableName nonDeltaLakeTable1 = new SchemaTableName(databaseName, "hive_table_" + randomName());
SchemaTableName nonDeltaLakeTable2 = new SchemaTableName(databaseName, "hive_table_" + randomName());
String deltaLakeTableLocation = tableLocation(deltaLakeTable);
createTable(deltaLakeTable, deltaLakeTableLocation, tableBuilder -> {
tableBuilder.setParameter(TABLE_PROVIDER_PROPERTY, TABLE_PROVIDER_VALUE);
tableBuilder.setParameter(LOCATION_PROPERTY, deltaLakeTableLocation);
tableBuilder.getStorageBuilder().setStorageFormat(DELTA_STORAGE_FORMAT).setSerdeParameters(ImmutableMap.of(DeltaLakeMetadata.PATH_PROPERTY, deltaLakeTableLocation)).setLocation(deltaLakeTableLocation);
});
createTransactionLog(deltaLakeTableLocation);
createTable(nonDeltaLakeTable1, tableLocation(nonDeltaLakeTable1), tableBuilder -> {
});
createTable(nonDeltaLakeTable2, tableLocation(nonDeltaLakeTable2), tableBuilder -> tableBuilder.setParameter(TABLE_PROVIDER_PROPERTY, "foo"));
DeltaLakeMetadata metadata = metadataFactory.create(SESSION.getIdentity());
// Verify the tables were created as non Delta Lake tables
assertThatThrownBy(() -> metadata.getTableHandle(session, nonDeltaLakeTable1)).isInstanceOf(TrinoException.class).hasMessage(format("%s is not a Delta Lake table", nonDeltaLakeTable1));
assertThatThrownBy(() -> metadata.getTableHandle(session, nonDeltaLakeTable2)).isInstanceOf(TrinoException.class).hasMessage(format("%s is not a Delta Lake table", nonDeltaLakeTable2));
// TODO (https://github.com/trinodb/trino/issues/5426)
// these assertions should use information_schema instead of metadata directly,
// as information_schema or MetadataManager may apply additional logic
// list all tables
assertThat(metadata.listTables(session, Optional.empty())).contains(deltaLakeTable).doesNotContain(nonDeltaLakeTable1).doesNotContain(nonDeltaLakeTable2);
// list all tables in a schema
assertThat(metadata.listTables(session, Optional.of(databaseName))).contains(deltaLakeTable).doesNotContain(nonDeltaLakeTable1).doesNotContain(nonDeltaLakeTable2);
// list all columns in a schema
assertThat(listTableColumns(metadata, new SchemaTablePrefix(databaseName))).contains(deltaLakeTable).doesNotContain(nonDeltaLakeTable1).doesNotContain(nonDeltaLakeTable2);
// list all columns in a table
assertThat(listTableColumns(metadata, new SchemaTablePrefix(databaseName, deltaLakeTable.getTableName()))).contains(deltaLakeTable).doesNotContain(nonDeltaLakeTable1).doesNotContain(nonDeltaLakeTable2);
assertThat(listTableColumns(metadata, new SchemaTablePrefix(databaseName, nonDeltaLakeTable1.getTableName()))).isEmpty();
assertThat(listTableColumns(metadata, new SchemaTablePrefix(databaseName, nonDeltaLakeTable2.getTableName()))).isEmpty();
}
Aggregations