use of io.trino.spi.connector.CatalogSchemaTableName in project trino by trinodb.
the class ColumnJdbcTable method applyFilter.
@Override
public TupleDomain<ColumnHandle> applyFilter(ConnectorSession connectorSession, Constraint constraint) {
TupleDomain<ColumnHandle> tupleDomain = constraint.getSummary();
if (tupleDomain.isNone() || constraint.predicate().isEmpty()) {
return tupleDomain;
}
Predicate<Map<ColumnHandle, NullableValue>> predicate = constraint.predicate().get();
Set<ColumnHandle> predicateColumns = constraint.getPredicateColumns().orElseThrow(() -> new VerifyException("columns not present for a predicate"));
boolean hasSchemaPredicate = predicateColumns.contains(TABLE_SCHEMA_COLUMN);
boolean hasTablePredicate = predicateColumns.contains(TABLE_NAME_COLUMN);
if (!hasSchemaPredicate && !hasTablePredicate) {
// No filter on schema name and table name at all.
return tupleDomain;
}
Session session = ((FullConnectorSession) connectorSession).getSession();
Optional<String> catalogFilter = tryGetSingleVarcharValue(tupleDomain, TABLE_CATALOG_COLUMN);
Optional<String> schemaFilter = tryGetSingleVarcharValue(tupleDomain, TABLE_SCHEMA_COLUMN);
Optional<String> tableFilter = tryGetSingleVarcharValue(tupleDomain, TABLE_NAME_COLUMN);
if (schemaFilter.isPresent() && tableFilter.isPresent()) {
// No need to narrow down the domain.
return tupleDomain;
}
List<String> catalogs = listCatalogs(session, metadata, accessControl, catalogFilter).keySet().stream().filter(catalogName -> predicate.test(ImmutableMap.of(TABLE_CATALOG_COLUMN, toNullableValue(catalogName)))).collect(toImmutableList());
List<CatalogSchemaName> schemas = catalogs.stream().flatMap(catalogName -> listSchemas(session, metadata, accessControl, catalogName, schemaFilter).stream().filter(schemaName -> !hasSchemaPredicate || predicate.test(ImmutableMap.of(TABLE_CATALOG_COLUMN, toNullableValue(catalogName), TABLE_SCHEMA_COLUMN, toNullableValue(schemaName)))).map(schemaName -> new CatalogSchemaName(catalogName, schemaName))).collect(toImmutableList());
if (!hasTablePredicate) {
return TupleDomain.withColumnDomains(ImmutableMap.<ColumnHandle, Domain>builder().put(TABLE_CATALOG_COLUMN, schemas.stream().map(CatalogSchemaName::getCatalogName).collect(toVarcharDomain()).simplify(MAX_DOMAIN_SIZE)).put(TABLE_SCHEMA_COLUMN, schemas.stream().map(CatalogSchemaName::getSchemaName).collect(toVarcharDomain()).simplify(MAX_DOMAIN_SIZE)).buildOrThrow());
}
List<CatalogSchemaTableName> tables = schemas.stream().flatMap(schema -> {
QualifiedTablePrefix tablePrefix = tableFilter.isPresent() ? new QualifiedTablePrefix(schema.getCatalogName(), schema.getSchemaName(), tableFilter.get()) : new QualifiedTablePrefix(schema.getCatalogName(), schema.getSchemaName());
return listTables(session, metadata, accessControl, tablePrefix).stream().filter(schemaTableName -> predicate.test(ImmutableMap.of(TABLE_CATALOG_COLUMN, toNullableValue(schema.getCatalogName()), TABLE_SCHEMA_COLUMN, toNullableValue(schemaTableName.getSchemaName()), TABLE_NAME_COLUMN, toNullableValue(schemaTableName.getTableName())))).map(schemaTableName -> new CatalogSchemaTableName(schema.getCatalogName(), schemaTableName.getSchemaName(), schemaTableName.getTableName()));
}).collect(toImmutableList());
return TupleDomain.withColumnDomains(ImmutableMap.<ColumnHandle, Domain>builder().put(TABLE_CATALOG_COLUMN, tables.stream().map(CatalogSchemaTableName::getCatalogName).collect(toVarcharDomain()).simplify(MAX_DOMAIN_SIZE)).put(TABLE_SCHEMA_COLUMN, tables.stream().map(catalogSchemaTableName -> catalogSchemaTableName.getSchemaTableName().getSchemaName()).collect(toVarcharDomain()).simplify(MAX_DOMAIN_SIZE)).put(TABLE_NAME_COLUMN, tables.stream().map(catalogSchemaTableName -> catalogSchemaTableName.getSchemaTableName().getTableName()).collect(toVarcharDomain()).simplify(MAX_DOMAIN_SIZE)).buildOrThrow());
}
use of io.trino.spi.connector.CatalogSchemaTableName in project trino by trinodb.
the class MetadataManager method dropTable.
@Override
public void dropTable(Session session, TableHandle tableHandle) {
CatalogName catalogName = tableHandle.getCatalogName();
CatalogMetadata catalogMetadata = getCatalogMetadataForWrite(session, catalogName);
ConnectorMetadata metadata = catalogMetadata.getMetadata(session);
Optional<CatalogSchemaTableName> tableName = getTableNameIfSystemSecurity(session, catalogMetadata, tableHandle);
metadata.dropTable(session.toConnectorSession(catalogName), tableHandle.getConnectorHandle());
tableName.ifPresent(name -> systemSecurityMetadata.tableDropped(session, name));
}
use of io.trino.spi.connector.CatalogSchemaTableName in project trino by trinodb.
the class HiveMetadata method redirectTable.
@Override
public Optional<CatalogSchemaTableName> redirectTable(ConnectorSession session, SchemaTableName tableName) {
requireNonNull(session, "session is null");
requireNonNull(tableName, "tableName is null");
if (isHiveSystemSchema(tableName.getSchemaName())) {
return Optional.empty();
}
// we need to chop off any "$partitions" and similar suffixes from table name while querying the metastore for the Table object
TableNameSplitResult tableNameSplit = splitTableName(tableName.getTableName());
Optional<Table> table = metastore.getTable(tableName.getSchemaName(), tableNameSplit.getBaseTableName());
if (table.isEmpty() || VIRTUAL_VIEW.name().equals(table.get().getTableType())) {
return Optional.empty();
}
Optional<CatalogSchemaTableName> catalogSchemaTableName = tableRedirectionsProvider.redirectTable(session, table.get());
// stitch back the suffix we cut off.
return catalogSchemaTableName.map(name -> new CatalogSchemaTableName(name.getCatalogName(), new SchemaTableName(name.getSchemaTableName().getSchemaName(), name.getSchemaTableName().getTableName() + tableNameSplit.getSuffix().orElse(""))));
}
use of io.trino.spi.connector.CatalogSchemaTableName in project trino by trinodb.
the class AbstractTestHive method setup.
protected final void setup(String databaseName, HiveConfig hiveConfig, HiveMetastore hiveMetastore, HdfsEnvironment hdfsConfiguration) {
setupHive(databaseName);
metastoreClient = hiveMetastore;
hdfsEnvironment = hdfsConfiguration;
HivePartitionManager partitionManager = new HivePartitionManager(hiveConfig);
locationService = new HiveLocationService(hdfsEnvironment);
JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class);
metadataFactory = new HiveMetadataFactory(new CatalogName("hive"), HiveMetastoreFactory.ofInstance(metastoreClient), hdfsEnvironment, partitionManager, 10, 10, 10, false, false, false, true, true, false, 1000, Optional.empty(), true, TESTING_TYPE_MANAGER, NOOP_METADATA_PROVIDER, locationService, partitionUpdateCodec, newFixedThreadPool(2), heartbeatService, TEST_SERVER_VERSION, (session, tableHandle) -> {
if (!tableHandle.getTableName().contains("apply_redirection_tester")) {
return Optional.empty();
}
return Optional.of(new TableScanRedirectApplicationResult(new CatalogSchemaTableName("hive", databaseName, "mock_redirection_target"), ImmutableMap.of(), TupleDomain.all()));
}, ImmutableSet.of(new PartitionsSystemTableProvider(partitionManager, TESTING_TYPE_MANAGER), new PropertiesSystemTableProvider()), metastore -> new NoneHiveMaterializedViewMetadata() {
@Override
public Optional<ConnectorMaterializedViewDefinition> getMaterializedView(ConnectorSession session, SchemaTableName viewName) {
if (!viewName.getTableName().contains("materialized_view_tester")) {
return Optional.empty();
}
return Optional.of(new ConnectorMaterializedViewDefinition("dummy_view_sql", Optional.empty(), Optional.empty(), Optional.empty(), ImmutableList.of(new ConnectorMaterializedViewDefinition.Column("abc", TypeId.of("type"))), Optional.empty(), Optional.of("alice"), ImmutableMap.of()));
}
}, SqlStandardAccessControlMetadata::new, NO_REDIRECTIONS, TableInvalidationCallback.NOOP);
transactionManager = new HiveTransactionManager(metadataFactory);
splitManager = new HiveSplitManager(transactionManager, partitionManager, new NamenodeStats(), hdfsEnvironment, new CachingDirectoryLister(hiveConfig), directExecutor(), new CounterStat(), 100, hiveConfig.getMaxOutstandingSplitsSize(), hiveConfig.getMinPartitionBatchSize(), hiveConfig.getMaxPartitionBatchSize(), hiveConfig.getMaxInitialSplits(), hiveConfig.getSplitLoaderConcurrency(), hiveConfig.getMaxSplitsPerSecond(), false, TESTING_TYPE_MANAGER);
pageSinkProvider = new HivePageSinkProvider(getDefaultHiveFileWriterFactories(hiveConfig, hdfsEnvironment), hdfsEnvironment, PAGE_SORTER, HiveMetastoreFactory.ofInstance(metastoreClient), new GroupByHashPageIndexerFactory(JOIN_COMPILER, BLOCK_TYPE_OPERATORS), TESTING_TYPE_MANAGER, getHiveConfig(), locationService, partitionUpdateCodec, new TestingNodeManager("fake-environment"), new HiveEventClient(), getHiveSessionProperties(hiveConfig), new HiveWriterStats());
pageSourceProvider = new HivePageSourceProvider(TESTING_TYPE_MANAGER, hdfsEnvironment, hiveConfig, getDefaultHivePageSourceFactories(hdfsEnvironment, hiveConfig), getDefaultHiveRecordCursorProviders(hiveConfig, hdfsEnvironment), new GenericHiveRecordCursorProvider(hdfsEnvironment, hiveConfig), Optional.empty());
nodePartitioningProvider = new HiveNodePartitioningProvider(new TestingNodeManager("fake-environment"), TESTING_TYPE_MANAGER);
}
use of io.trino.spi.connector.CatalogSchemaTableName in project trino by trinodb.
the class AbstractTestHive method testApplyRedirection.
@Test
public void testApplyRedirection() throws Exception {
SchemaTableName sourceTableName = temporaryTable("apply_redirection_tester");
doCreateEmptyTable(sourceTableName, ORC, CREATE_TABLE_COLUMNS);
SchemaTableName tableName = temporaryTable("apply_no_redirection_tester");
doCreateEmptyTable(tableName, ORC, CREATE_TABLE_COLUMNS);
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
assertThat(metadata.applyTableScanRedirect(session, getTableHandle(metadata, tableName))).isEmpty();
Optional<TableScanRedirectApplicationResult> result = metadata.applyTableScanRedirect(session, getTableHandle(metadata, sourceTableName));
assertThat(result).isPresent();
assertThat(result.get().getDestinationTable()).isEqualTo(new CatalogSchemaTableName("hive", database, "mock_redirection_target"));
} finally {
dropTable(sourceTableName);
dropTable(tableName);
}
}
Aggregations