Search in sources :

Example 11 with NullableValue

use of io.trino.spi.predicate.NullableValue in project trino by trinodb.

the class TestInformationSchemaMetadata method testConstraint.

/**
 * @see #testConstraintColumns()
 */
private static boolean testConstraint(Map<ColumnHandle, NullableValue> bindings) {
    // test_schema has a table named "another_table" and we filter that out in this predicate
    // Note: the columns inspected here must be in sync with testConstraintColumns()
    NullableValue catalog = bindings.get(new InformationSchemaColumnHandle("table_catalog"));
    NullableValue schema = bindings.get(new InformationSchemaColumnHandle("table_schema"));
    NullableValue table = bindings.get(new InformationSchemaColumnHandle("table_name"));
    boolean isValid = true;
    if (catalog != null) {
        isValid = ((Slice) catalog.getValue()).toStringUtf8().equals("test_catalog");
    }
    if (schema != null) {
        isValid &= ((Slice) schema.getValue()).toStringUtf8().equals("test_schema");
    }
    if (table != null) {
        isValid &= ((Slice) table.getValue()).toStringUtf8().equals("test_view");
    }
    return isValid;
}
Also used : Slice(io.airlift.slice.Slice) InformationSchemaColumnHandle(io.trino.connector.informationschema.InformationSchemaColumnHandle) NullableValue(io.trino.spi.predicate.NullableValue)

Example 12 with NullableValue

use of io.trino.spi.predicate.NullableValue in project trino by trinodb.

the class TestPartitionedOutputOperator method testOutputForSimplePageWithPartitionConstantAndHashBlock.

@Test
public void testOutputForSimplePageWithPartitionConstantAndHashBlock() {
    PartitionedOutputOperator partitionedOutputOperator = partitionedOutputOperator(BIGINT).withPartitionConstants(ImmutableList.of(Optional.empty(), Optional.of(new NullableValue(BIGINT, 1L)))).withPartitionChannels(0, // use first block and constant block at index 1 as input to partitionFunction
    -1).withHashChannels(0, // use both channels to calculate partition (a+b) mod 2
    1).build();
    Page page = new Page(createLongsBlock(0L, 1L, 2L, 3L));
    processPages(partitionedOutputOperator, page);
    List<Object> partition0 = readLongs(outputBuffer.getEnqueuedDeserialized(0), 0);
    assertThat(partition0).containsExactly(1L, 3L);
    List<Object> partition1 = readLongs(outputBuffer.getEnqueuedDeserialized(1), 0);
    assertThat(partition1).containsExactly(0L, 2L);
}
Also used : NullableValue(io.trino.spi.predicate.NullableValue) Page(io.trino.spi.Page) Test(org.testng.annotations.Test)

Example 13 with NullableValue

use of io.trino.spi.predicate.NullableValue in project trino by trinodb.

the class TableMetadataSystemTable method buildPages.

private static List<Page> buildPages(MetadataDao dao, ConnectorTableMetadata tableMetadata, TupleDomain<Integer> tupleDomain) {
    Map<Integer, NullableValue> domainValues = extractFixedValues(tupleDomain).orElse(ImmutableMap.of());
    String schemaName = getStringValue(domainValues.get(getColumnIndex(tableMetadata, SCHEMA_NAME)));
    String tableName = getStringValue(domainValues.get(getColumnIndex(tableMetadata, TABLE_NAME)));
    PageListBuilder pageBuilder = new PageListBuilder(tableMetadata.getColumns().stream().map(ColumnMetadata::getType).collect(toList()));
    List<TableMetadataRow> tableRows = dao.getTableMetadataRows(schemaName, tableName);
    PeekingIterator<ColumnMetadataRow> columnRowIterator = peekingIterator(dao.getColumnMetadataRows(schemaName, tableName).iterator());
    for (TableMetadataRow tableRow : tableRows) {
        while (columnRowIterator.hasNext() && columnRowIterator.peek().getTableId() < tableRow.getTableId()) {
            columnRowIterator.next();
        }
        String temporalColumnName = null;
        SortedMap<Integer, String> sortColumnNames = new TreeMap<>();
        SortedMap<Integer, String> bucketColumnNames = new TreeMap<>();
        OptionalLong temporalColumnId = tableRow.getTemporalColumnId();
        while (columnRowIterator.hasNext() && columnRowIterator.peek().getTableId() == tableRow.getTableId()) {
            ColumnMetadataRow columnRow = columnRowIterator.next();
            if (temporalColumnId.isPresent() && columnRow.getColumnId() == temporalColumnId.getAsLong()) {
                temporalColumnName = columnRow.getColumnName();
            }
            OptionalInt sortOrdinalPosition = columnRow.getSortOrdinalPosition();
            if (sortOrdinalPosition.isPresent()) {
                sortColumnNames.put(sortOrdinalPosition.getAsInt(), columnRow.getColumnName());
            }
            OptionalInt bucketOrdinalPosition = columnRow.getBucketOrdinalPosition();
            if (bucketOrdinalPosition.isPresent()) {
                bucketColumnNames.put(bucketOrdinalPosition.getAsInt(), columnRow.getColumnName());
            }
        }
        pageBuilder.beginRow();
        // schema_name, table_name
        VARCHAR.writeSlice(pageBuilder.nextBlockBuilder(), utf8Slice(tableRow.getSchemaName()));
        VARCHAR.writeSlice(pageBuilder.nextBlockBuilder(), utf8Slice(tableRow.getTableName()));
        // temporal_column
        if (temporalColumnId.isPresent()) {
            if (temporalColumnName == null) {
                throw new TrinoException(RAPTOR_CORRUPT_METADATA, format("Table ID %s has corrupt metadata (invalid temporal column ID)", tableRow.getTableId()));
            }
            VARCHAR.writeSlice(pageBuilder.nextBlockBuilder(), utf8Slice(temporalColumnName));
        } else {
            pageBuilder.nextBlockBuilder().appendNull();
        }
        // ordering_columns
        writeArray(pageBuilder.nextBlockBuilder(), sortColumnNames.values());
        // distribution_name
        Optional<String> distributionName = tableRow.getDistributionName();
        if (distributionName.isPresent()) {
            VARCHAR.writeSlice(pageBuilder.nextBlockBuilder(), utf8Slice(distributionName.get()));
        } else {
            pageBuilder.nextBlockBuilder().appendNull();
        }
        // bucket_count
        OptionalInt bucketCount = tableRow.getBucketCount();
        if (bucketCount.isPresent()) {
            BIGINT.writeLong(pageBuilder.nextBlockBuilder(), bucketCount.getAsInt());
        } else {
            pageBuilder.nextBlockBuilder().appendNull();
        }
        // bucketing_columns
        writeArray(pageBuilder.nextBlockBuilder(), bucketColumnNames.values());
        // organized
        BOOLEAN.writeBoolean(pageBuilder.nextBlockBuilder(), tableRow.isOrganized());
    }
    return pageBuilder.build();
}
Also used : ColumnMetadata(io.trino.spi.connector.ColumnMetadata) NullableValue(io.trino.spi.predicate.NullableValue) ColumnMetadataRow(io.trino.plugin.raptor.legacy.metadata.ColumnMetadataRow) OptionalInt(java.util.OptionalInt) TreeMap(java.util.TreeMap) OptionalLong(java.util.OptionalLong) TrinoException(io.trino.spi.TrinoException) TableMetadataRow(io.trino.plugin.raptor.legacy.metadata.TableMetadataRow)

Example 14 with NullableValue

use of io.trino.spi.predicate.NullableValue in project trino by trinodb.

the class IcebergMetadata method getTableProperties.

@Override
public ConnectorTableProperties getTableProperties(ConnectorSession session, ConnectorTableHandle tableHandle) {
    IcebergTableHandle table = (IcebergTableHandle) tableHandle;
    if (table.getSnapshotId().isEmpty()) {
        // TupleDomain.none() as the predicate
        return new ConnectorTableProperties(TupleDomain.none(), Optional.empty(), Optional.empty(), Optional.empty(), ImmutableList.of());
    }
    Table icebergTable = catalog.loadTable(session, table.getSchemaTableName());
    // Extract identity partition fields that are present in all partition specs, for creating the discrete predicates.
    Set<Integer> partitionSourceIds = identityPartitionColumnsInAllSpecs(icebergTable);
    TupleDomain<IcebergColumnHandle> enforcedPredicate = table.getEnforcedPredicate();
    DiscretePredicates discretePredicates = null;
    if (!partitionSourceIds.isEmpty()) {
        // Extract identity partition columns
        Map<Integer, IcebergColumnHandle> columns = getColumns(icebergTable.schema(), typeManager).stream().filter(column -> partitionSourceIds.contains(column.getId())).collect(toImmutableMap(IcebergColumnHandle::getId, Function.identity()));
        Supplier<List<FileScanTask>> lazyFiles = Suppliers.memoize(() -> {
            TableScan tableScan = icebergTable.newScan().useSnapshot(table.getSnapshotId().get()).filter(toIcebergExpression(enforcedPredicate)).includeColumnStats();
            try (CloseableIterable<FileScanTask> iterator = tableScan.planFiles()) {
                return ImmutableList.copyOf(iterator);
            } catch (IOException e) {
                throw new UncheckedIOException(e);
            }
        });
        Iterable<FileScanTask> files = () -> lazyFiles.get().iterator();
        Iterable<TupleDomain<ColumnHandle>> discreteTupleDomain = Iterables.transform(files, fileScan -> {
            // Extract partition values in the data file
            Map<Integer, Optional<String>> partitionColumnValueStrings = getPartitionKeys(fileScan);
            Map<ColumnHandle, NullableValue> partitionValues = partitionSourceIds.stream().filter(partitionColumnValueStrings::containsKey).collect(toImmutableMap(columns::get, columnId -> {
                IcebergColumnHandle column = columns.get(columnId);
                Object prestoValue = deserializePartitionValue(column.getType(), partitionColumnValueStrings.get(columnId).orElse(null), column.getName());
                return NullableValue.of(column.getType(), prestoValue);
            }));
            return TupleDomain.fromFixedValues(partitionValues);
        });
        discretePredicates = new DiscretePredicates(columns.values().stream().map(ColumnHandle.class::cast).collect(toImmutableList()), discreteTupleDomain);
    }
    return new ConnectorTableProperties(// over all tableScan.planFiles() and caching partition values in table handle.
    enforcedPredicate.transformKeys(ColumnHandle.class::cast), // TODO: implement table partitioning
    Optional.empty(), Optional.empty(), Optional.ofNullable(discretePredicates), ImmutableList.of());
}
Also used : IcebergUtil.getPartitionKeys(io.trino.plugin.iceberg.IcebergUtil.getPartitionKeys) TrinoCatalog(io.trino.plugin.iceberg.catalog.TrinoCatalog) FileSystem(org.apache.hadoop.fs.FileSystem) ConnectorTableExecuteHandle(io.trino.spi.connector.ConnectorTableExecuteHandle) HiveApplyProjectionUtil.replaceWithNewVariables(io.trino.plugin.hive.HiveApplyProjectionUtil.replaceWithNewVariables) Collections.singletonList(java.util.Collections.singletonList) NOT_SUPPORTED(io.trino.spi.StandardErrorCode.NOT_SUPPORTED) TableNotFoundException(io.trino.spi.connector.TableNotFoundException) Matcher(java.util.regex.Matcher) ConnectorOutputTableHandle(io.trino.spi.connector.ConnectorOutputTableHandle) ConnectorTableHandle(io.trino.spi.connector.ConnectorTableHandle) Map(java.util.Map) RewriteFiles(org.apache.iceberg.RewriteFiles) ProjectionApplicationResult(io.trino.spi.connector.ProjectionApplicationResult) HdfsEnvironment(io.trino.plugin.hive.HdfsEnvironment) CloseableIterable(org.apache.iceberg.io.CloseableIterable) IcebergUtil.newCreateTableTransaction(io.trino.plugin.iceberg.IcebergUtil.newCreateTableTransaction) Domain(io.trino.spi.predicate.Domain) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) IcebergTableExecuteHandle(io.trino.plugin.iceberg.procedure.IcebergTableExecuteHandle) Set(java.util.Set) Schema(org.apache.iceberg.Schema) ColumnIdentity.primitiveColumnIdentity(io.trino.plugin.iceberg.ColumnIdentity.primitiveColumnIdentity) SchemaTableName(io.trino.spi.connector.SchemaTableName) PartitionSpecParser(org.apache.iceberg.PartitionSpecParser) Collectors.joining(java.util.stream.Collectors.joining) Type(org.apache.iceberg.types.Type) UncheckedIOException(java.io.UncheckedIOException) ImmutableMap.toImmutableMap(com.google.common.collect.ImmutableMap.toImmutableMap) TrinoPrincipal(io.trino.spi.security.TrinoPrincipal) CatalogSchemaTableName(io.trino.spi.connector.CatalogSchemaTableName) SchemaTablePrefix(io.trino.spi.connector.SchemaTablePrefix) RemoteIterator(org.apache.hadoop.fs.RemoteIterator) Iterables(com.google.common.collect.Iterables) ConnectorTableLayout(io.trino.spi.connector.ConnectorTableLayout) ConnectorInsertTableHandle(io.trino.spi.connector.ConnectorInsertTableHandle) IcebergUtil.deserializePartitionValue(io.trino.plugin.iceberg.IcebergUtil.deserializePartitionValue) Slice(io.airlift.slice.Slice) NullableValue(io.trino.spi.predicate.NullableValue) ColumnMetadata(io.trino.spi.connector.ColumnMetadata) ConnectorTableMetadata(io.trino.spi.connector.ConnectorTableMetadata) Variable(io.trino.spi.expression.Variable) Supplier(java.util.function.Supplier) OptionalLong(java.util.OptionalLong) MaterializedViewFreshness(io.trino.spi.connector.MaterializedViewFreshness) ColumnHandle(io.trino.spi.connector.ColumnHandle) ImmutableSet.toImmutableSet(com.google.common.collect.ImmutableSet.toImmutableSet) FILE_FORMAT_PROPERTY(io.trino.plugin.iceberg.IcebergTableProperties.FILE_FORMAT_PROPERTY) OPTIMIZE(io.trino.plugin.iceberg.procedure.IcebergTableProcedureId.OPTIMIZE) ConstraintApplicationResult(io.trino.spi.connector.ConstraintApplicationResult) DEPENDS_ON_TABLES(io.trino.plugin.iceberg.catalog.hms.TrinoHiveCatalog.DEPENDS_ON_TABLES) Table(org.apache.iceberg.Table) IOException(java.io.IOException) ConnectorSession(io.trino.spi.connector.ConnectorSession) ICEBERG_INVALID_METADATA(io.trino.plugin.iceberg.IcebergErrorCode.ICEBERG_INVALID_METADATA) ConnectorTableProperties(io.trino.spi.connector.ConnectorTableProperties) DiscretePredicates(io.trino.spi.connector.DiscretePredicates) ConnectorExpression(io.trino.spi.expression.ConnectorExpression) PartitionFields.parsePartitionFields(io.trino.plugin.iceberg.PartitionFields.parsePartitionFields) ArrayDeque(java.util.ArrayDeque) MaterializedViewNotFoundException(io.trino.spi.connector.MaterializedViewNotFoundException) TypeConverter.toTrinoType(io.trino.plugin.iceberg.TypeConverter.toTrinoType) IcebergUtil.getColumns(io.trino.plugin.iceberg.IcebergUtil.getColumns) IcebergSessionProperties.isProjectionPushdownEnabled(io.trino.plugin.iceberg.IcebergSessionProperties.isProjectionPushdownEnabled) IcebergTableProcedureId(io.trino.plugin.iceberg.procedure.IcebergTableProcedureId) IcebergSessionProperties.isStatisticsEnabled(io.trino.plugin.iceberg.IcebergSessionProperties.isStatisticsEnabled) AppendFiles(org.apache.iceberg.AppendFiles) NO_RETRIES(io.trino.spi.connector.RetryMode.NO_RETRIES) ConnectorMaterializedViewDefinition(io.trino.spi.connector.ConnectorMaterializedViewDefinition) TypeConverter.toIcebergType(io.trino.plugin.iceberg.TypeConverter.toIcebergType) PartitionField(org.apache.iceberg.PartitionField) DataFiles(org.apache.iceberg.DataFiles) CatalogSchemaName(io.trino.spi.connector.CatalogSchemaName) LOCATION_PROPERTY(io.trino.plugin.iceberg.IcebergTableProperties.LOCATION_PROPERTY) Path(org.apache.hadoop.fs.Path) DATA(io.trino.plugin.iceberg.TableType.DATA) ConnectorViewDefinition(io.trino.spi.connector.ConnectorViewDefinition) FileScanTask(org.apache.iceberg.FileScanTask) DataFile(org.apache.iceberg.DataFile) Splitter(com.google.common.base.Splitter) IcebergTableProperties.getPartitioning(io.trino.plugin.iceberg.IcebergTableProperties.getPartitioning) IcebergUtil.getFileFormat(io.trino.plugin.iceberg.IcebergUtil.getFileFormat) ImmutableSet(com.google.common.collect.ImmutableSet) ImmutableMap(com.google.common.collect.ImmutableMap) Collection(java.util.Collection) HiveWrittenPartitions(io.trino.plugin.hive.HiveWrittenPartitions) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ComputedStatistics(io.trino.spi.statistics.ComputedStatistics) TrinoException(io.trino.spi.TrinoException) TableScan(org.apache.iceberg.TableScan) ConnectorOutputMetadata(io.trino.spi.connector.ConnectorOutputMetadata) String.format(java.lang.String.format) SchemaParser(org.apache.iceberg.SchemaParser) DataSize(io.airlift.units.DataSize) HdfsContext(io.trino.plugin.hive.HdfsEnvironment.HdfsContext) List(java.util.List) BIGINT(io.trino.spi.type.BigintType.BIGINT) ClassLoaderSafeSystemTable(io.trino.plugin.base.classloader.ClassLoaderSafeSystemTable) IcebergUtil.getTableComment(io.trino.plugin.iceberg.IcebergUtil.getTableComment) Assignment(io.trino.spi.connector.Assignment) HiveApplyProjectionUtil(io.trino.plugin.hive.HiveApplyProjectionUtil) BeginTableExecuteResult(io.trino.spi.connector.BeginTableExecuteResult) PartitionSpec(org.apache.iceberg.PartitionSpec) Function.identity(java.util.function.Function.identity) TableProperties(org.apache.iceberg.TableProperties) Optional(java.util.Optional) ProjectedColumnRepresentation(io.trino.plugin.hive.HiveApplyProjectionUtil.ProjectedColumnRepresentation) ConnectorMetadata(io.trino.spi.connector.ConnectorMetadata) Pattern(java.util.regex.Pattern) SystemTable(io.trino.spi.connector.SystemTable) JsonCodec(io.airlift.json.JsonCodec) Constraint(io.trino.spi.connector.Constraint) IcebergOptimizeHandle(io.trino.plugin.iceberg.procedure.IcebergOptimizeHandle) Logger(io.airlift.log.Logger) PartitionFields.toPartitionFields(io.trino.plugin.iceberg.PartitionFields.toPartitionFields) HashMap(java.util.HashMap) Deque(java.util.Deque) Function(java.util.function.Function) ExpressionConverter.toIcebergExpression(io.trino.plugin.iceberg.ExpressionConverter.toIcebergExpression) PARTITIONING_PROPERTY(io.trino.plugin.iceberg.IcebergTableProperties.PARTITIONING_PROPERTY) HashSet(java.util.HashSet) BiPredicate(java.util.function.BiPredicate) ImmutableList(com.google.common.collect.ImmutableList) Verify.verify(com.google.common.base.Verify.verify) Objects.requireNonNull(java.util.Objects.requireNonNull) Suppliers(com.google.common.base.Suppliers) TableStatistics(io.trino.spi.statistics.TableStatistics) HiveApplyProjectionUtil.extractSupportedProjectedColumns(io.trino.plugin.hive.HiveApplyProjectionUtil.extractSupportedProjectedColumns) VerifyException(com.google.common.base.VerifyException) RetryMode(io.trino.spi.connector.RetryMode) Iterator(java.util.Iterator) TupleDomain(io.trino.spi.predicate.TupleDomain) IcebergUtil.toIcebergSchema(io.trino.plugin.iceberg.IcebergUtil.toIcebergSchema) Transaction(org.apache.iceberg.Transaction) Comparator(java.util.Comparator) TypeManager(io.trino.spi.type.TypeManager) HiveUtil.isStructuralType(io.trino.plugin.hive.util.HiveUtil.isStructuralType) Snapshot(org.apache.iceberg.Snapshot) UncheckedIOException(java.io.UncheckedIOException) Collections.singletonList(java.util.Collections.singletonList) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) List(java.util.List) ImmutableList(com.google.common.collect.ImmutableList) TableScan(org.apache.iceberg.TableScan) ColumnHandle(io.trino.spi.connector.ColumnHandle) Table(org.apache.iceberg.Table) ClassLoaderSafeSystemTable(io.trino.plugin.base.classloader.ClassLoaderSafeSystemTable) SystemTable(io.trino.spi.connector.SystemTable) Optional(java.util.Optional) DiscretePredicates(io.trino.spi.connector.DiscretePredicates) NullableValue(io.trino.spi.predicate.NullableValue) UncheckedIOException(java.io.UncheckedIOException) IOException(java.io.IOException) TupleDomain(io.trino.spi.predicate.TupleDomain) FileScanTask(org.apache.iceberg.FileScanTask) ConnectorTableProperties(io.trino.spi.connector.ConnectorTableProperties)

Example 15 with NullableValue

use of io.trino.spi.predicate.NullableValue in project trino by trinodb.

the class AbstractTestHive method testBucketedTableDoubleFloat.

@Test
public void testBucketedTableDoubleFloat() throws Exception {
    try (Transaction transaction = newTransaction()) {
        ConnectorMetadata metadata = transaction.getMetadata();
        ConnectorSession session = newSession();
        metadata.beginQuery(session);
        ConnectorTableHandle tableHandle = getTableHandle(metadata, tableBucketedDoubleFloat);
        List<ColumnHandle> columnHandles = ImmutableList.copyOf(metadata.getColumnHandles(session, tableHandle).values());
        Map<String, Integer> columnIndex = indexColumns(columnHandles);
        assertTableIsBucketed(tableHandle, transaction, session);
        ImmutableMap<ColumnHandle, NullableValue> bindings = ImmutableMap.<ColumnHandle, NullableValue>builder().put(columnHandles.get(columnIndex.get("t_float")), NullableValue.of(REAL, (long) floatToRawIntBits(87.1f))).put(columnHandles.get(columnIndex.get("t_double")), NullableValue.of(DOUBLE, 88.2)).buildOrThrow();
        // floats and doubles are not supported, so we should see all splits
        MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.fromFixedValues(bindings), OptionalInt.of(32), Optional.empty());
        assertEquals(result.getRowCount(), 100);
    }
}
Also used : HiveColumnHandle.bucketColumnHandle(io.trino.plugin.hive.HiveColumnHandle.bucketColumnHandle) ColumnHandle(io.trino.spi.connector.ColumnHandle) NullableValue(io.trino.spi.predicate.NullableValue) ConnectorSession(io.trino.spi.connector.ConnectorSession) TestingConnectorSession(io.trino.testing.TestingConnectorSession) ConnectorMetadata(io.trino.spi.connector.ConnectorMetadata) MaterializedResult(io.trino.testing.MaterializedResult) ConnectorTableHandle(io.trino.spi.connector.ConnectorTableHandle) Test(org.testng.annotations.Test)

Aggregations

NullableValue (io.trino.spi.predicate.NullableValue)33 ColumnHandle (io.trino.spi.connector.ColumnHandle)22 Optional (java.util.Optional)16 ConnectorSession (io.trino.spi.connector.ConnectorSession)15 TupleDomain (io.trino.spi.predicate.TupleDomain)15 ImmutableList (com.google.common.collect.ImmutableList)14 List (java.util.List)14 Map (java.util.Map)14 Set (java.util.Set)13 ImmutableList.toImmutableList (com.google.common.collect.ImmutableList.toImmutableList)12 Domain (io.trino.spi.predicate.Domain)12 Objects.requireNonNull (java.util.Objects.requireNonNull)12 ImmutableMap (com.google.common.collect.ImmutableMap)11 ImmutableSet (com.google.common.collect.ImmutableSet)10 TrinoException (io.trino.spi.TrinoException)10 ConnectorTableHandle (io.trino.spi.connector.ConnectorTableHandle)10 Constraint (io.trino.spi.connector.Constraint)9 ImmutableSet.toImmutableSet (com.google.common.collect.ImmutableSet.toImmutableSet)8 ColumnMetadata (io.trino.spi.connector.ColumnMetadata)8 SchemaTableName (io.trino.spi.connector.SchemaTableName)8