Search in sources :

Example 1 with HiveBucketFilter

use of io.trino.plugin.hive.util.HiveBucketing.HiveBucketFilter in project trino by trinodb.

the class HivePartitionManager method getPartitions.

public HivePartitionResult getPartitions(SemiTransactionalHiveMetastore metastore, ConnectorTableHandle tableHandle, Constraint constraint) {
    HiveTableHandle hiveTableHandle = (HiveTableHandle) tableHandle;
    TupleDomain<ColumnHandle> effectivePredicate = constraint.getSummary().intersect(hiveTableHandle.getEnforcedConstraint());
    SchemaTableName tableName = hiveTableHandle.getSchemaTableName();
    Optional<HiveBucketHandle> hiveBucketHandle = hiveTableHandle.getBucketHandle();
    List<HiveColumnHandle> partitionColumns = hiveTableHandle.getPartitionColumns();
    if (effectivePredicate.isNone()) {
        return new HivePartitionResult(partitionColumns, Optional.empty(), ImmutableList.of(), TupleDomain.none(), TupleDomain.none(), hiveBucketHandle, Optional.empty());
    }
    Optional<HiveBucketFilter> bucketFilter = getHiveBucketFilter(hiveTableHandle, effectivePredicate);
    TupleDomain<HiveColumnHandle> compactEffectivePredicate = effectivePredicate.transformKeys(HiveColumnHandle.class::cast).simplify(domainCompactionThreshold);
    if (partitionColumns.isEmpty()) {
        return new HivePartitionResult(partitionColumns, Optional.empty(), ImmutableList.of(new HivePartition(tableName)), effectivePredicate, compactEffectivePredicate, hiveBucketHandle, bucketFilter);
    }
    List<Type> partitionTypes = partitionColumns.stream().map(HiveColumnHandle::getType).collect(toList());
    Optional<List<String>> partitionNames = Optional.empty();
    Iterable<HivePartition> partitionsIterable;
    Predicate<Map<ColumnHandle, NullableValue>> predicate = constraint.predicate().orElse(value -> true);
    if (hiveTableHandle.getPartitions().isPresent()) {
        partitionsIterable = hiveTableHandle.getPartitions().get().stream().filter(partition -> partitionMatches(partitionColumns, effectivePredicate, predicate, partition)).collect(toImmutableList());
    } else {
        List<String> partitionNamesList = hiveTableHandle.getPartitionNames().orElseGet(() -> getFilteredPartitionNames(metastore, tableName, partitionColumns, compactEffectivePredicate));
        partitionsIterable = () -> partitionNamesList.stream().map(partitionName -> parseValuesAndFilterPartition(tableName, partitionName, partitionColumns, partitionTypes, effectivePredicate, predicate)).filter(Optional::isPresent).map(Optional::get).iterator();
        partitionNames = Optional.of(partitionNamesList);
    }
    return new HivePartitionResult(partitionColumns, partitionNames, partitionsIterable, effectivePredicate, compactEffectivePredicate, hiveBucketHandle, bucketFilter);
}
Also used : ColumnHandle(io.trino.spi.connector.ColumnHandle) Optional(java.util.Optional) SchemaTableName(io.trino.spi.connector.SchemaTableName) HiveBucketFilter(io.trino.plugin.hive.util.HiveBucketing.HiveBucketFilter) HiveBucketing.getHiveBucketFilter(io.trino.plugin.hive.util.HiveBucketing.getHiveBucketFilter) Type(io.trino.spi.type.Type) ImmutableList(com.google.common.collect.ImmutableList) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) List(java.util.List) Collectors.toList(java.util.stream.Collectors.toList) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap)

Example 2 with HiveBucketFilter

use of io.trino.plugin.hive.util.HiveBucketing.HiveBucketFilter in project trino by trinodb.

the class HiveSplitManager method getSplits.

@Override
public ConnectorSplitSource getSplits(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorTableHandle tableHandle, SplitSchedulingStrategy splitSchedulingStrategy, DynamicFilter dynamicFilter) {
    HiveTableHandle hiveTable = (HiveTableHandle) tableHandle;
    SchemaTableName tableName = hiveTable.getSchemaTableName();
    // get table metadata
    SemiTransactionalHiveMetastore metastore = transactionManager.get(transaction, session.getIdentity()).getMetastore();
    Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()).orElseThrow(() -> new TableNotFoundException(tableName));
    // verify table is not marked as non-readable
    String tableNotReadable = table.getParameters().get(OBJECT_NOT_READABLE);
    if (!isNullOrEmpty(tableNotReadable)) {
        throw new HiveNotReadableException(tableName, Optional.empty(), tableNotReadable);
    }
    // get partitions
    List<HivePartition> partitions = partitionManager.getOrLoadPartitions(metastore, hiveTable);
    // short circuit if we don't have any partitions
    if (partitions.isEmpty()) {
        if (hiveTable.isRecordScannedFiles()) {
            return new FixedSplitSource(ImmutableList.of(), ImmutableList.of());
        }
        return new FixedSplitSource(ImmutableList.of());
    }
    // get buckets from first partition (arbitrary)
    Optional<HiveBucketFilter> bucketFilter = hiveTable.getBucketFilter();
    // validate bucket bucketed execution
    Optional<HiveBucketHandle> bucketHandle = hiveTable.getBucketHandle();
    if ((splitSchedulingStrategy == GROUPED_SCHEDULING) && bucketHandle.isEmpty()) {
        throw new TrinoException(GENERIC_INTERNAL_ERROR, "SchedulingPolicy is bucketed, but BucketHandle is not present");
    }
    // sort partitions
    partitions = Ordering.natural().onResultOf(HivePartition::getPartitionId).reverse().sortedCopy(partitions);
    Iterable<HivePartitionMetadata> hivePartitions = getPartitionMetadata(session, metastore, table, tableName, partitions, bucketHandle.map(HiveBucketHandle::toTableBucketProperty));
    // Only one thread per partition is usable when a table is not transactional
    int concurrency = isTransactionalTable(table.getParameters()) ? splitLoaderConcurrency : min(splitLoaderConcurrency, partitions.size());
    HiveSplitLoader hiveSplitLoader = new BackgroundHiveSplitLoader(table, hiveTable.getTransaction(), hivePartitions, hiveTable.getCompactEffectivePredicate(), dynamicFilter, getDynamicFilteringWaitTimeout(session), typeManager, createBucketSplitInfo(bucketHandle, bucketFilter), session, hdfsEnvironment, namenodeStats, directoryLister, executor, concurrency, recursiveDfsWalkerEnabled, !hiveTable.getPartitionColumns().isEmpty() && isIgnoreAbsentPartitions(session), isOptimizeSymlinkListing(session), metastore.getValidWriteIds(session, hiveTable).map(validTxnWriteIdList -> validTxnWriteIdList.getTableValidWriteIdList(table.getDatabaseName() + "." + table.getTableName())), hiveTable.getMaxScannedFileSize());
    HiveSplitSource splitSource;
    switch(splitSchedulingStrategy) {
        case UNGROUPED_SCHEDULING:
            splitSource = HiveSplitSource.allAtOnce(session, table.getDatabaseName(), table.getTableName(), maxInitialSplits, maxOutstandingSplits, maxOutstandingSplitsSize, maxSplitsPerSecond, hiveSplitLoader, executor, highMemorySplitSourceCounter, hiveTable.isRecordScannedFiles());
            break;
        case GROUPED_SCHEDULING:
            splitSource = HiveSplitSource.bucketed(session, table.getDatabaseName(), table.getTableName(), maxInitialSplits, maxOutstandingSplits, maxOutstandingSplitsSize, maxSplitsPerSecond, hiveSplitLoader, executor, highMemorySplitSourceCounter, hiveTable.isRecordScannedFiles());
            break;
        default:
            throw new IllegalArgumentException("Unknown splitSchedulingStrategy: " + splitSchedulingStrategy);
    }
    hiveSplitLoader.start(splitSource);
    return splitSource;
}
Also used : ConnectorSplitManager(io.trino.spi.connector.ConnectorSplitManager) Iterables.transform(com.google.common.collect.Iterables.transform) MetastoreUtil.makePartitionName(io.trino.plugin.hive.metastore.MetastoreUtil.makePartitionName) HiveBucketFilter(io.trino.plugin.hive.util.HiveBucketing.HiveBucketFilter) HiveSessionProperties.isOptimizeSymlinkListing(io.trino.plugin.hive.HiveSessionProperties.isOptimizeSymlinkListing) FixedSplitSource(io.trino.spi.connector.FixedSplitSource) HiveSessionProperties.isIgnoreAbsentPartitions(io.trino.plugin.hive.HiveSessionProperties.isIgnoreAbsentPartitions) HiveSessionProperties.isUseParquetColumnNames(io.trino.plugin.hive.HiveSessionProperties.isUseParquetColumnNames) Preconditions.checkArgument(com.google.common.base.Preconditions.checkArgument) TableNotFoundException(io.trino.spi.connector.TableNotFoundException) Column(io.trino.plugin.hive.metastore.Column) BoundedExecutor(io.airlift.concurrent.BoundedExecutor) HIVE_PARTITION_SCHEMA_MISMATCH(io.trino.plugin.hive.HiveErrorCode.HIVE_PARTITION_SCHEMA_MISMATCH) Iterables.concat(com.google.common.collect.Iterables.concat) ConnectorTableHandle(io.trino.spi.connector.ConnectorTableHandle) Map(java.util.Map) ENGLISH(java.util.Locale.ENGLISH) Table(io.trino.plugin.hive.metastore.Table) ImmutableMap(com.google.common.collect.ImmutableMap) TableToPartitionMapping.mapColumnsByIndex(io.trino.plugin.hive.TableToPartitionMapping.mapColumnsByIndex) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) TrinoException(io.trino.spi.TrinoException) Math.min(java.lang.Math.min) SchemaTableName(io.trino.spi.connector.SchemaTableName) String.format(java.lang.String.format) DataSize(io.airlift.units.DataSize) List(java.util.List) MetastoreUtil.verifyOnline(io.trino.plugin.hive.metastore.MetastoreUtil.verifyOnline) VersionEmbedder(io.trino.spi.VersionEmbedder) DynamicFilter(io.trino.spi.connector.DynamicFilter) MetastoreUtil.getProtectMode(io.trino.plugin.hive.metastore.MetastoreUtil.getProtectMode) HiveStorageFormat.getHiveStorageFormat(io.trino.plugin.hive.HiveStorageFormat.getHiveStorageFormat) SERVER_SHUTTING_DOWN(io.trino.spi.StandardErrorCode.SERVER_SHUTTING_DOWN) AcidUtils.isTransactionalTable(org.apache.hadoop.hive.ql.io.AcidUtils.isTransactionalTable) Optional(java.util.Optional) MoreObjects.firstNonNull(com.google.common.base.MoreObjects.firstNonNull) HiveCoercionPolicy.canCoerce(io.trino.plugin.hive.util.HiveCoercionPolicy.canCoerce) Partition(io.trino.plugin.hive.metastore.Partition) HiveUtil(io.trino.plugin.hive.util.HiveUtil) Nested(org.weakref.jmx.Nested) BucketSplitInfo.createBucketSplitInfo(io.trino.plugin.hive.BackgroundHiveSplitLoader.BucketSplitInfo.createBucketSplitInfo) Strings.isNullOrEmpty(com.google.common.base.Strings.isNullOrEmpty) CounterStat(io.airlift.stats.CounterStat) UNPARTITIONED_ID(io.trino.plugin.hive.HivePartition.UNPARTITIONED_ID) HiveSessionProperties.isUseOrcColumnNames(io.trino.plugin.hive.HiveSessionProperties.isUseOrcColumnNames) HIVE_PARTITION_DROPPED_DURING_QUERY(io.trino.plugin.hive.HiveErrorCode.HIVE_PARTITION_DROPPED_DURING_QUERY) Inject(javax.inject.Inject) GROUPED_SCHEDULING(io.trino.spi.connector.ConnectorSplitManager.SplitSchedulingStrategy.GROUPED_SCHEDULING) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) HIVE_INVALID_METADATA(io.trino.plugin.hive.HiveErrorCode.HIVE_INVALID_METADATA) Lists(com.google.common.collect.Lists) ImmutableList(com.google.common.collect.ImmutableList) Managed(org.weakref.jmx.Managed) SemiTransactionalHiveMetastore(io.trino.plugin.hive.metastore.SemiTransactionalHiveMetastore) Objects.requireNonNull(java.util.Objects.requireNonNull) Nullable(javax.annotation.Nullable) ExecutorService(java.util.concurrent.ExecutorService) Iterator(java.util.Iterator) Executor(java.util.concurrent.Executor) HiveSessionProperties.isPropagateTableScanSortingProperties(io.trino.plugin.hive.HiveSessionProperties.isPropagateTableScanSortingProperties) AbstractIterator(com.google.common.collect.AbstractIterator) ConnectorSplitSource(io.trino.spi.connector.ConnectorSplitSource) Iterables.getOnlyElement(com.google.common.collect.Iterables.getOnlyElement) ConnectorSession(io.trino.spi.connector.ConnectorSession) GENERIC_INTERNAL_ERROR(io.trino.spi.StandardErrorCode.GENERIC_INTERNAL_ERROR) Ordering(com.google.common.collect.Ordering) HiveSessionProperties.getDynamicFilteringWaitTimeout(io.trino.plugin.hive.HiveSessionProperties.getDynamicFilteringWaitTimeout) SortingColumn(io.trino.plugin.hive.metastore.SortingColumn) TypeManager(io.trino.spi.type.TypeManager) ConnectorTransactionHandle(io.trino.spi.connector.ConnectorTransactionHandle) Table(io.trino.plugin.hive.metastore.Table) AcidUtils.isTransactionalTable(org.apache.hadoop.hive.ql.io.AcidUtils.isTransactionalTable) SemiTransactionalHiveMetastore(io.trino.plugin.hive.metastore.SemiTransactionalHiveMetastore) SchemaTableName(io.trino.spi.connector.SchemaTableName) HiveBucketFilter(io.trino.plugin.hive.util.HiveBucketing.HiveBucketFilter) TableNotFoundException(io.trino.spi.connector.TableNotFoundException) FixedSplitSource(io.trino.spi.connector.FixedSplitSource) TrinoException(io.trino.spi.TrinoException)

Aggregations

ImmutableList (com.google.common.collect.ImmutableList)2 ImmutableList.toImmutableList (com.google.common.collect.ImmutableList.toImmutableList)2 ImmutableMap (com.google.common.collect.ImmutableMap)2 HiveBucketFilter (io.trino.plugin.hive.util.HiveBucketing.HiveBucketFilter)2 MoreObjects.firstNonNull (com.google.common.base.MoreObjects.firstNonNull)1 Preconditions.checkArgument (com.google.common.base.Preconditions.checkArgument)1 Strings.isNullOrEmpty (com.google.common.base.Strings.isNullOrEmpty)1 AbstractIterator (com.google.common.collect.AbstractIterator)1 Iterables.concat (com.google.common.collect.Iterables.concat)1 Iterables.getOnlyElement (com.google.common.collect.Iterables.getOnlyElement)1 Iterables.transform (com.google.common.collect.Iterables.transform)1 Lists (com.google.common.collect.Lists)1 Ordering (com.google.common.collect.Ordering)1 BoundedExecutor (io.airlift.concurrent.BoundedExecutor)1 CounterStat (io.airlift.stats.CounterStat)1 DataSize (io.airlift.units.DataSize)1 BucketSplitInfo.createBucketSplitInfo (io.trino.plugin.hive.BackgroundHiveSplitLoader.BucketSplitInfo.createBucketSplitInfo)1 HIVE_INVALID_METADATA (io.trino.plugin.hive.HiveErrorCode.HIVE_INVALID_METADATA)1 HIVE_PARTITION_DROPPED_DURING_QUERY (io.trino.plugin.hive.HiveErrorCode.HIVE_PARTITION_DROPPED_DURING_QUERY)1 HIVE_PARTITION_SCHEMA_MISMATCH (io.trino.plugin.hive.HiveErrorCode.HIVE_PARTITION_SCHEMA_MISMATCH)1