Search in sources :

Example 66 with Domain

use of io.trino.spi.predicate.Domain in project trino by trinodb.

the class TestDynamicFilterService method testDynamicFilterSummaryCompletion.

@Test
public void testDynamicFilterSummaryCompletion() {
    DynamicFilterService dynamicFilterService = createDynamicFilterService();
    DynamicFilterId filterId = new DynamicFilterId("df");
    QueryId queryId = new QueryId("query");
    StageId stageId = new StageId(queryId, 0);
    dynamicFilterService.registerQuery(queryId, session, ImmutableSet.of(filterId), ImmutableSet.of(filterId), ImmutableSet.of());
    dynamicFilterService.stageCannotScheduleMoreTasks(stageId, 0, 3);
    assertFalse(dynamicFilterService.getSummary(queryId, filterId).isPresent());
    // assert initial dynamic filtering stats
    DynamicFiltersStats stats = dynamicFilterService.getDynamicFilteringStats(queryId, session);
    assertEquals(stats.getTotalDynamicFilters(), 1);
    assertEquals(stats.getDynamicFiltersCompleted(), 0);
    assertEquals(stats.getLazyDynamicFilters(), 1);
    assertEquals(stats.getReplicatedDynamicFilters(), 0);
    dynamicFilterService.addTaskDynamicFilters(new TaskId(stageId, 0, 0), ImmutableMap.of(filterId, singleValue(INTEGER, 1L)));
    assertFalse(dynamicFilterService.getSummary(queryId, filterId).isPresent());
    stats = dynamicFilterService.getDynamicFilteringStats(queryId, session);
    assertEquals(stats.getDynamicFiltersCompleted(), 0);
    dynamicFilterService.addTaskDynamicFilters(new TaskId(stageId, 1, 0), ImmutableMap.of(filterId, singleValue(INTEGER, 2L)));
    assertFalse(dynamicFilterService.getSummary(queryId, filterId).isPresent());
    stats = dynamicFilterService.getDynamicFilteringStats(queryId, session);
    assertEquals(stats.getDynamicFiltersCompleted(), 0);
    dynamicFilterService.addTaskDynamicFilters(new TaskId(stageId, 2, 0), ImmutableMap.of(filterId, singleValue(INTEGER, 3L)));
    Optional<Domain> summary = dynamicFilterService.getSummary(queryId, filterId);
    assertTrue(summary.isPresent());
    assertEquals(summary.get(), multipleValues(INTEGER, ImmutableList.of(1L, 2L, 3L)));
    stats = dynamicFilterService.getDynamicFilteringStats(queryId, session);
    assertEquals(stats.getDynamicFiltersCompleted(), 1);
    assertEquals(stats.getLazyDynamicFilters(), 1);
    assertEquals(stats.getReplicatedDynamicFilters(), 0);
    assertEquals(stats.getDynamicFilterDomainStats(), ImmutableList.of(new DynamicFilterDomainStats(filterId, getSimplifiedDomainString(1L, 3L, 3, INTEGER))));
}
Also used : TaskId(io.trino.execution.TaskId) DynamicFilterDomainStats(io.trino.server.DynamicFilterService.DynamicFilterDomainStats) QueryId(io.trino.spi.QueryId) StageId(io.trino.execution.StageId) DynamicFiltersStats(io.trino.server.DynamicFilterService.DynamicFiltersStats) Domain(io.trino.spi.predicate.Domain) TupleDomain(io.trino.spi.predicate.TupleDomain) DynamicFilterId(io.trino.sql.planner.plan.DynamicFilterId) Test(org.testng.annotations.Test)

Example 67 with Domain

use of io.trino.spi.predicate.Domain in project trino by trinodb.

the class DeltaLakeSplitManager method getSplits.

private Stream<DeltaLakeSplit> getSplits(ConnectorTransactionHandle transaction, DeltaLakeTableHandle tableHandle, ConnectorSession session, Optional<DataSize> maxScannedFileSize, Set<ColumnHandle> columnsCoveredByDynamicFilter, Constraint constraint) {
    DeltaLakeMetastore metastore = getMetastore(session, transaction);
    String tableLocation = metastore.getTableLocation(tableHandle.getSchemaTableName(), session);
    List<AddFileEntry> validDataFiles = metastore.getValidDataFiles(tableHandle.getSchemaTableName(), session);
    TupleDomain<DeltaLakeColumnHandle> enforcedPartitionConstraint = tableHandle.getEnforcedPartitionConstraint();
    TupleDomain<DeltaLakeColumnHandle> nonPartitionConstraint = tableHandle.getNonPartitionConstraint();
    // Delta Lake handles updates and deletes by copying entire data files, minus updates/deletes. Because of this we can only have one Split/UpdatablePageSource
    // per file.
    boolean splittable = tableHandle.getWriteType().isEmpty();
    AtomicInteger remainingInitialSplits = new AtomicInteger(maxInitialSplits);
    Optional<Instant> filesModifiedAfter = tableHandle.getAnalyzeHandle().flatMap(AnalyzeHandle::getFilesModifiedAfter);
    Optional<Long> maxScannedFileSizeInBytes = maxScannedFileSize.map(DataSize::toBytes);
    Set<String> predicatedColumnNames = Stream.concat(nonPartitionConstraint.getDomains().orElseThrow().keySet().stream(), columnsCoveredByDynamicFilter.stream().map(DeltaLakeColumnHandle.class::cast)).map(// TODO is DeltaLakeColumnHandle.name normalized?
    column -> column.getName().toLowerCase(ENGLISH)).collect(toImmutableSet());
    List<ColumnMetadata> schema = extractSchema(tableHandle.getMetadataEntry(), typeManager);
    List<ColumnMetadata> predicatedColumns = schema.stream().filter(// ColumnMetadata.name is lowercase
    column -> predicatedColumnNames.contains(column.getName())).collect(toImmutableList());
    return validDataFiles.stream().flatMap(addAction -> {
        if (tableHandle.getAnalyzeHandle().isPresent() && !tableHandle.getAnalyzeHandle().get().isInitialAnalyze() && !addAction.isDataChange()) {
            // skip files which do not introduce data change on non-initial ANALYZE
            return Stream.empty();
        }
        if (filesModifiedAfter.isPresent() && addAction.getModificationTime() <= filesModifiedAfter.get().toEpochMilli()) {
            return Stream.empty();
        }
        if (maxScannedFileSizeInBytes.isPresent() && addAction.getSize() > maxScannedFileSizeInBytes.get()) {
            return Stream.empty();
        }
        Map<DeltaLakeColumnHandle, Domain> enforcedDomains = enforcedPartitionConstraint.getDomains().orElseThrow();
        if (!partitionMatchesPredicate(addAction.getCanonicalPartitionValues(), enforcedDomains)) {
            return Stream.empty();
        }
        TupleDomain<DeltaLakeColumnHandle> statisticsPredicate = createStatisticsPredicate(addAction, predicatedColumns, tableHandle.getMetadataEntry().getCanonicalPartitionColumns());
        if (!nonPartitionConstraint.overlaps(statisticsPredicate)) {
            return Stream.empty();
        }
        if (constraint.predicate().isPresent()) {
            Map<String, Optional<String>> partitionValues = addAction.getCanonicalPartitionValues();
            Map<ColumnHandle, NullableValue> deserializedValues = constraint.getPredicateColumns().orElseThrow().stream().filter(column -> column instanceof DeltaLakeColumnHandle).filter(column -> partitionValues.containsKey(((DeltaLakeColumnHandle) column).getName())).collect(toImmutableMap(identity(), column -> {
                DeltaLakeColumnHandle deltaLakeColumn = (DeltaLakeColumnHandle) column;
                return NullableValue.of(deltaLakeColumn.getType(), deserializePartitionValue(deltaLakeColumn, addAction.getCanonicalPartitionValues().get(deltaLakeColumn.getName())));
            }));
            if (!constraint.predicate().get().test(deserializedValues)) {
                return Stream.empty();
            }
        }
        return splitsForFile(session, addAction, tableLocation, addAction.getCanonicalPartitionValues(), statisticsPredicate, splittable, remainingInitialSplits).stream();
    });
}
Also used : ConnectorSplitManager(io.trino.spi.connector.ConnectorSplitManager) Constraint(io.trino.spi.connector.Constraint) DeltaLakeSessionProperties.getMaxInitialSplitSize(io.trino.plugin.deltalake.DeltaLakeSessionProperties.getMaxInitialSplitSize) URLDecoder(java.net.URLDecoder) NullableValue(io.trino.spi.predicate.NullableValue) ColumnMetadata(io.trino.spi.connector.ColumnMetadata) BiFunction(java.util.function.BiFunction) DeltaLakeSchemaSupport.extractSchema(io.trino.plugin.deltalake.transactionlog.DeltaLakeSchemaSupport.extractSchema) AddFileEntry(io.trino.plugin.deltalake.transactionlog.AddFileEntry) FixedSplitSource(io.trino.spi.connector.FixedSplitSource) Inject(javax.inject.Inject) DeltaLakeMetastore(io.trino.plugin.deltalake.metastore.DeltaLakeMetastore) DeltaLakeMetadata.createStatisticsPredicate(io.trino.plugin.deltalake.DeltaLakeMetadata.createStatisticsPredicate) ImmutableList(com.google.common.collect.ImmutableList) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ConnectorTableHandle(io.trino.spi.connector.ConnectorTableHandle) Map(java.util.Map) Objects.requireNonNull(java.util.Objects.requireNonNull) ColumnHandle(io.trino.spi.connector.ColumnHandle) ImmutableSet.toImmutableSet(com.google.common.collect.ImmutableSet.toImmutableSet) ENGLISH(java.util.Locale.ENGLISH) ExecutorService(java.util.concurrent.ExecutorService) UTF_8(java.nio.charset.StandardCharsets.UTF_8) Domain(io.trino.spi.predicate.Domain) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) Set(java.util.Set) ConnectorSplitSource(io.trino.spi.connector.ConnectorSplitSource) ConnectorSession(io.trino.spi.connector.ConnectorSession) TupleDomain(io.trino.spi.predicate.TupleDomain) Instant(java.time.Instant) DeltaLakeSessionProperties.getMaxSplitSize(io.trino.plugin.deltalake.DeltaLakeSessionProperties.getMaxSplitSize) DataSize(io.airlift.units.DataSize) List(java.util.List) ImmutableMap.toImmutableMap(com.google.common.collect.ImmutableMap.toImmutableMap) ClassLoaderSafeConnectorSplitSource(io.trino.plugin.base.classloader.ClassLoaderSafeConnectorSplitSource) Stream(java.util.stream.Stream) DynamicFilter(io.trino.spi.connector.DynamicFilter) Function.identity(java.util.function.Function.identity) Optional(java.util.Optional) TypeManager(io.trino.spi.type.TypeManager) TransactionLogParser.deserializePartitionValue(io.trino.plugin.deltalake.transactionlog.TransactionLogParser.deserializePartitionValue) HiveTransactionHandle(io.trino.plugin.hive.HiveTransactionHandle) ConnectorTransactionHandle(io.trino.spi.connector.ConnectorTransactionHandle) ColumnMetadata(io.trino.spi.connector.ColumnMetadata) DataSize(io.airlift.units.DataSize) ColumnHandle(io.trino.spi.connector.ColumnHandle) Optional(java.util.Optional) Instant(java.time.Instant) NullableValue(io.trino.spi.predicate.NullableValue) DeltaLakeMetastore(io.trino.plugin.deltalake.metastore.DeltaLakeMetastore) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) AddFileEntry(io.trino.plugin.deltalake.transactionlog.AddFileEntry) Domain(io.trino.spi.predicate.Domain) TupleDomain(io.trino.spi.predicate.TupleDomain)

Example 68 with Domain

use of io.trino.spi.predicate.Domain in project trino by trinodb.

the class DeltaLakeSplitManager method partitionMatchesPredicate.

public static boolean partitionMatchesPredicate(Map<String, Optional<String>> partitionKeys, Map<DeltaLakeColumnHandle, Domain> domains) {
    for (Map.Entry<DeltaLakeColumnHandle, Domain> enforcedDomainsEntry : domains.entrySet()) {
        DeltaLakeColumnHandle partitionColumn = enforcedDomainsEntry.getKey();
        Domain partitionDomain = enforcedDomainsEntry.getValue();
        if (!partitionDomain.includesNullableValue(deserializePartitionValue(partitionColumn, partitionKeys.get(partitionColumn.getName())))) {
            return false;
        }
    }
    return true;
}
Also used : Domain(io.trino.spi.predicate.Domain) TupleDomain(io.trino.spi.predicate.TupleDomain) Map(java.util.Map) ImmutableMap.toImmutableMap(com.google.common.collect.ImmutableMap.toImmutableMap)

Example 69 with Domain

use of io.trino.spi.predicate.Domain in project trino by trinodb.

the class ParquetPageSourceFactory method getParquetTupleDomain.

public static TupleDomain<ColumnDescriptor> getParquetTupleDomain(Map<List<String>, RichColumnDescriptor> descriptorsByPath, TupleDomain<HiveColumnHandle> effectivePredicate, MessageType fileSchema, boolean useColumnNames) {
    if (effectivePredicate.isNone()) {
        return TupleDomain.none();
    }
    ImmutableMap.Builder<ColumnDescriptor, Domain> predicate = ImmutableMap.builder();
    for (Entry<HiveColumnHandle, Domain> entry : effectivePredicate.getDomains().get().entrySet()) {
        HiveColumnHandle columnHandle = entry.getKey();
        // skip looking up predicates for complex types as Parquet only stores stats for primitives
        if (columnHandle.getHiveType().getCategory() != PRIMITIVE || columnHandle.getColumnType() != REGULAR) {
            continue;
        }
        RichColumnDescriptor descriptor;
        if (useColumnNames) {
            descriptor = descriptorsByPath.get(ImmutableList.of(columnHandle.getName()));
        } else {
            org.apache.parquet.schema.Type parquetField = getParquetType(columnHandle, fileSchema, false);
            if (parquetField == null || !parquetField.isPrimitive()) {
                // Or the field is a complex type
                continue;
            }
            descriptor = descriptorsByPath.get(ImmutableList.of(parquetField.getName()));
        }
        if (descriptor != null) {
            predicate.put(descriptor, entry.getValue());
        }
    }
    return TupleDomain.withColumnDomains(predicate.buildOrThrow());
}
Also used : RichColumnDescriptor(io.trino.parquet.RichColumnDescriptor) ColumnDescriptor(org.apache.parquet.column.ColumnDescriptor) RichColumnDescriptor(io.trino.parquet.RichColumnDescriptor) Domain(io.trino.spi.predicate.Domain) TupleDomain(io.trino.spi.predicate.TupleDomain) ImmutableMap(com.google.common.collect.ImmutableMap) HiveColumnHandle(io.trino.plugin.hive.HiveColumnHandle)

Example 70 with Domain

use of io.trino.spi.predicate.Domain in project trino by trinodb.

the class ParquetPageSourceFactory method getColumnIndexStore.

private static Optional<ColumnIndexStore> getColumnIndexStore(ParquetDataSource dataSource, BlockMetaData blockMetadata, Map<List<String>, RichColumnDescriptor> descriptorsByPath, TupleDomain<ColumnDescriptor> parquetTupleDomain, ParquetReaderOptions options) {
    if (!options.isUseColumnIndex() || parquetTupleDomain.isAll() || parquetTupleDomain.isNone()) {
        return Optional.empty();
    }
    boolean hasColumnIndex = false;
    for (ColumnChunkMetaData column : blockMetadata.getColumns()) {
        if (column.getColumnIndexReference() != null && column.getOffsetIndexReference() != null) {
            hasColumnIndex = true;
            break;
        }
    }
    if (!hasColumnIndex) {
        return Optional.empty();
    }
    Set<ColumnPath> columnsReadPaths = new HashSet<>(descriptorsByPath.size());
    for (List<String> path : descriptorsByPath.keySet()) {
        columnsReadPaths.add(ColumnPath.get(path.toArray(new String[0])));
    }
    Map<ColumnDescriptor, Domain> parquetDomains = parquetTupleDomain.getDomains().orElseThrow(() -> new IllegalStateException("Predicate other than none should have domains"));
    Set<ColumnPath> columnsFilteredPaths = parquetDomains.keySet().stream().map(column -> ColumnPath.get(column.getPath())).collect(toImmutableSet());
    return Optional.of(new TrinoColumnIndexStore(dataSource, blockMetadata, columnsReadPaths, columnsFilteredPaths));
}
Also used : DateTimeZone(org.joda.time.DateTimeZone) HIVE_MISSING_DATA(io.trino.plugin.hive.HiveErrorCode.HIVE_MISSING_DATA) FileSystem(org.apache.hadoop.fs.FileSystem) HIVE_CANNOT_OPEN_SPLIT(io.trino.plugin.hive.HiveErrorCode.HIVE_CANNOT_OPEN_SPLIT) MetadataReader(io.trino.parquet.reader.MetadataReader) HiveSessionProperties.isParquetUseColumnIndex(io.trino.plugin.hive.HiveSessionProperties.isParquetUseColumnIndex) BlockMissingException(org.apache.hadoop.hdfs.BlockMissingException) ParquetDataSourceId(io.trino.parquet.ParquetDataSourceId) HiveSessionProperties.isUseParquetColumnNames(io.trino.plugin.hive.HiveSessionProperties.isUseParquetColumnNames) Preconditions.checkArgument(com.google.common.base.Preconditions.checkArgument) HiveParquetColumnIOConverter.constructField(io.trino.plugin.hive.parquet.HiveParquetColumnIOConverter.constructField) Configuration(org.apache.hadoop.conf.Configuration) Map(java.util.Map) Path(org.apache.hadoop.fs.Path) HiveUtil.getDeserializerClassName(io.trino.plugin.hive.util.HiveUtil.getDeserializerClassName) ConnectorPageSource(io.trino.spi.connector.ConnectorPageSource) HiveColumnHandle(io.trino.plugin.hive.HiveColumnHandle) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) AcidTransaction(io.trino.plugin.hive.acid.AcidTransaction) ImmutableSet(com.google.common.collect.ImmutableSet) FileFormatDataSourceStats(io.trino.plugin.hive.FileFormatDataSourceStats) HdfsEnvironment(io.trino.plugin.hive.HdfsEnvironment) ConnectorIdentity(io.trino.spi.security.ConnectorIdentity) GroupType(org.apache.parquet.schema.GroupType) ImmutableMap(com.google.common.collect.ImmutableMap) Domain(io.trino.spi.predicate.Domain) ParquetReader(io.trino.parquet.reader.ParquetReader) ReaderColumns(io.trino.plugin.hive.ReaderColumns) Set(java.util.Set) TrinoException(io.trino.spi.TrinoException) ParquetTypeUtils.getColumnIO(io.trino.parquet.ParquetTypeUtils.getColumnIO) ColumnIndexStore(org.apache.parquet.internal.filter2.columnindex.ColumnIndexStore) ColumnChunkMetaData(org.apache.parquet.hadoop.metadata.ColumnChunkMetaData) FileNotFoundException(java.io.FileNotFoundException) String.format(java.lang.String.format) ReaderPageSource(io.trino.plugin.hive.ReaderPageSource) MessageType(org.apache.parquet.schema.MessageType) List(java.util.List) HiveSessionProperties.getParquetMaxReadBlockSize(io.trino.plugin.hive.HiveSessionProperties.getParquetMaxReadBlockSize) BIGINT(io.trino.spi.type.BigintType.BIGINT) ColumnDescriptor(org.apache.parquet.column.ColumnDescriptor) BlockMetaData(org.apache.parquet.hadoop.metadata.BlockMetaData) Entry(java.util.Map.Entry) Optional(java.util.Optional) HivePageSourceFactory(io.trino.plugin.hive.HivePageSourceFactory) ParquetCorruptionException(io.trino.parquet.ParquetCorruptionException) MessageColumnIO(org.apache.parquet.io.MessageColumnIO) ColumnPath(org.apache.parquet.hadoop.metadata.ColumnPath) Strings.nullToEmpty(com.google.common.base.Strings.nullToEmpty) AggregatedMemoryContext.newSimpleAggregatedMemoryContext(io.trino.memory.context.AggregatedMemoryContext.newSimpleAggregatedMemoryContext) Type(io.trino.spi.type.Type) OptionalInt(java.util.OptionalInt) HiveSessionProperties.isParquetIgnoreStatistics(io.trino.plugin.hive.HiveSessionProperties.isParquetIgnoreStatistics) Collectors.toUnmodifiableList(java.util.stream.Collectors.toUnmodifiableList) Inject(javax.inject.Inject) HashSet(java.util.HashSet) HiveType(io.trino.plugin.hive.HiveType) ParquetTypeUtils.lookupColumnByName(io.trino.parquet.ParquetTypeUtils.lookupColumnByName) ImmutableList(com.google.common.collect.ImmutableList) Objects.requireNonNull(java.util.Objects.requireNonNull) ImmutableSet.toImmutableSet(com.google.common.collect.ImmutableSet.toImmutableSet) RichColumnDescriptor(io.trino.parquet.RichColumnDescriptor) ParquetTypeUtils.getParquetTypeByName(io.trino.parquet.ParquetTypeUtils.getParquetTypeByName) ParquetReaderOptions(io.trino.parquet.ParquetReaderOptions) Predicate(io.trino.parquet.predicate.Predicate) HIVE_BAD_DATA(io.trino.plugin.hive.HiveErrorCode.HIVE_BAD_DATA) HivePageSourceProvider.projectSufficientColumns(io.trino.plugin.hive.HivePageSourceProvider.projectSufficientColumns) Properties(java.util.Properties) TrinoColumnIndexStore(io.trino.parquet.reader.TrinoColumnIndexStore) PredicateUtils.predicateMatches(io.trino.parquet.predicate.PredicateUtils.predicateMatches) IOException(java.io.IOException) ConnectorSession(io.trino.spi.connector.ConnectorSession) TupleDomain(io.trino.spi.predicate.TupleDomain) PRIMITIVE(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category.PRIMITIVE) AcidInfo(io.trino.plugin.hive.AcidInfo) ParquetTypeUtils.getDescriptors(io.trino.parquet.ParquetTypeUtils.getDescriptors) HivePageSourceProvider.projectBaseColumns(io.trino.plugin.hive.HivePageSourceProvider.projectBaseColumns) Field(io.trino.parquet.Field) ParquetDataSource(io.trino.parquet.ParquetDataSource) FileMetaData(org.apache.parquet.hadoop.metadata.FileMetaData) ParquetMetadata(org.apache.parquet.hadoop.metadata.ParquetMetadata) PredicateUtils.buildPredicate(io.trino.parquet.predicate.PredicateUtils.buildPredicate) HiveConfig(io.trino.plugin.hive.HiveConfig) REGULAR(io.trino.plugin.hive.HiveColumnHandle.ColumnType.REGULAR) ColumnChunkMetaData(org.apache.parquet.hadoop.metadata.ColumnChunkMetaData) TrinoColumnIndexStore(io.trino.parquet.reader.TrinoColumnIndexStore) ColumnDescriptor(org.apache.parquet.column.ColumnDescriptor) RichColumnDescriptor(io.trino.parquet.RichColumnDescriptor) ColumnPath(org.apache.parquet.hadoop.metadata.ColumnPath) Domain(io.trino.spi.predicate.Domain) TupleDomain(io.trino.spi.predicate.TupleDomain) HashSet(java.util.HashSet)

Aggregations

Domain (io.trino.spi.predicate.Domain)120 TupleDomain (io.trino.spi.predicate.TupleDomain)107 ColumnHandle (io.trino.spi.connector.ColumnHandle)51 Test (org.testng.annotations.Test)45 Map (java.util.Map)38 ImmutableList (com.google.common.collect.ImmutableList)36 ImmutableMap (com.google.common.collect.ImmutableMap)33 List (java.util.List)27 Optional (java.util.Optional)25 ImmutableList.toImmutableList (com.google.common.collect.ImmutableList.toImmutableList)23 Type (io.trino.spi.type.Type)23 ConnectorSession (io.trino.spi.connector.ConnectorSession)21 SchemaTableName (io.trino.spi.connector.SchemaTableName)21 Objects.requireNonNull (java.util.Objects.requireNonNull)21 Set (java.util.Set)20 Range (io.trino.spi.predicate.Range)19 ValueSet (io.trino.spi.predicate.ValueSet)18 Constraint (io.trino.spi.connector.Constraint)17 String.format (java.lang.String.format)17 ImmutableSet (com.google.common.collect.ImmutableSet)16