use of com.facebook.presto.spi.relation.RowExpressionService in project presto by prestodb.
the class HiveFilterPushdown method pushdownFilter.
@VisibleForTesting
public static ConnectorPushdownFilterResult pushdownFilter(ConnectorSession session, ConnectorMetadata metadata, SemiTransactionalHiveMetastore metastore, RowExpressionService rowExpressionService, StandardFunctionResolution functionResolution, HivePartitionManager partitionManager, FunctionMetadataManager functionMetadataManager, ConnectorTableHandle tableHandle, RowExpression filter, Optional<ConnectorTableLayoutHandle> currentLayoutHandle) {
checkArgument(!FALSE_CONSTANT.equals(filter), "Cannot pushdown filter that is always false");
if (TRUE_CONSTANT.equals(filter) && currentLayoutHandle.isPresent()) {
return new ConnectorPushdownFilterResult(metadata.getTableLayout(session, currentLayoutHandle.get()), TRUE_CONSTANT);
}
// Split the filter into 3 groups of conjuncts:
// - range filters that apply to entire columns,
// - range filters that apply to subfields,
// - the rest. Intersect these with possibly pre-existing filters.
DomainTranslator.ExtractionResult<Subfield> decomposedFilter = rowExpressionService.getDomainTranslator().fromPredicate(session, filter, new SubfieldExtractor(functionResolution, rowExpressionService.getExpressionOptimizer(), session).toColumnExtractor());
if (currentLayoutHandle.isPresent()) {
HiveTableLayoutHandle currentHiveLayout = (HiveTableLayoutHandle) currentLayoutHandle.get();
decomposedFilter = intersectExtractionResult(new DomainTranslator.ExtractionResult(currentHiveLayout.getDomainPredicate(), currentHiveLayout.getRemainingPredicate()), decomposedFilter);
}
if (decomposedFilter.getTupleDomain().isNone()) {
return new ConnectorPushdownFilterResult(EMPTY_TABLE_LAYOUT, FALSE_CONSTANT);
}
RowExpression optimizedRemainingExpression = rowExpressionService.getExpressionOptimizer().optimize(decomposedFilter.getRemainingExpression(), OPTIMIZED, session);
if (optimizedRemainingExpression instanceof ConstantExpression) {
ConstantExpression constantExpression = (ConstantExpression) optimizedRemainingExpression;
if (FALSE_CONSTANT.equals(constantExpression) || constantExpression.getValue() == null) {
return new ConnectorPushdownFilterResult(EMPTY_TABLE_LAYOUT, FALSE_CONSTANT);
}
}
Map<String, ColumnHandle> columnHandles = metadata.getColumnHandles(session, tableHandle);
TupleDomain<ColumnHandle> entireColumnDomain = decomposedFilter.getTupleDomain().transform(subfield -> isEntireColumn(subfield) ? subfield.getRootName() : null).transform(columnHandles::get);
if (currentLayoutHandle.isPresent()) {
entireColumnDomain = entireColumnDomain.intersect(((HiveTableLayoutHandle) (currentLayoutHandle.get())).getPartitionColumnPredicate());
}
Constraint<ColumnHandle> constraint = new Constraint<>(entireColumnDomain);
// Extract deterministic conjuncts that apply to partition columns and specify these as Constraint#predicate
if (!TRUE_CONSTANT.equals(decomposedFilter.getRemainingExpression())) {
LogicalRowExpressions logicalRowExpressions = new LogicalRowExpressions(rowExpressionService.getDeterminismEvaluator(), functionResolution, functionMetadataManager);
RowExpression deterministicPredicate = logicalRowExpressions.filterDeterministicConjuncts(decomposedFilter.getRemainingExpression());
if (!TRUE_CONSTANT.equals(deterministicPredicate)) {
ConstraintEvaluator evaluator = new ConstraintEvaluator(rowExpressionService, session, columnHandles, deterministicPredicate);
constraint = new Constraint<>(entireColumnDomain, evaluator::isCandidate);
}
}
HivePartitionResult hivePartitionResult = partitionManager.getPartitions(metastore, tableHandle, constraint, session);
TupleDomain<Subfield> domainPredicate = withColumnDomains(ImmutableMap.<Subfield, Domain>builder().putAll(hivePartitionResult.getUnenforcedConstraint().transform(HiveFilterPushdown::toSubfield).getDomains().orElse(ImmutableMap.of())).putAll(decomposedFilter.getTupleDomain().transform(subfield -> !isEntireColumn(subfield) ? subfield : null).getDomains().orElse(ImmutableMap.of())).build());
Set<String> predicateColumnNames = new HashSet<>();
domainPredicate.getDomains().get().keySet().stream().map(Subfield::getRootName).forEach(predicateColumnNames::add);
// Include only columns referenced in the optimized expression. Although the expression is sent to the worker node
// unoptimized, the worker is expected to optimize the expression before executing.
extractAll(optimizedRemainingExpression).stream().map(VariableReferenceExpression::getName).forEach(predicateColumnNames::add);
Map<String, HiveColumnHandle> predicateColumns = predicateColumnNames.stream().map(columnHandles::get).map(HiveColumnHandle.class::cast).collect(toImmutableMap(HiveColumnHandle::getName, Functions.identity()));
SchemaTableName tableName = ((HiveTableHandle) tableHandle).getSchemaTableName();
LogicalRowExpressions logicalRowExpressions = new LogicalRowExpressions(rowExpressionService.getDeterminismEvaluator(), functionResolution, functionMetadataManager);
List<RowExpression> conjuncts = extractConjuncts(decomposedFilter.getRemainingExpression());
RowExpression dynamicFilterExpression = extractDynamicConjuncts(conjuncts, logicalRowExpressions);
RowExpression remainingExpression = extractStaticConjuncts(conjuncts, logicalRowExpressions);
remainingExpression = removeNestedDynamicFilters(remainingExpression);
Table table = metastore.getTable(new MetastoreContext(session.getIdentity(), session.getQueryId(), session.getClientInfo(), session.getSource(), getMetastoreHeaders(session), isUserDefinedTypeEncodingEnabled(session), metastore.getColumnConverterProvider()), tableName.getSchemaName(), tableName.getTableName()).orElseThrow(() -> new TableNotFoundException(tableName));
return new ConnectorPushdownFilterResult(metadata.getTableLayout(session, new HiveTableLayoutHandle(tableName, table.getStorage().getLocation(), hivePartitionResult.getPartitionColumns(), // remove comments to optimize serialization costs
pruneColumnComments(hivePartitionResult.getDataColumns()), hivePartitionResult.getTableParameters(), hivePartitionResult.getPartitions(), domainPredicate, remainingExpression, predicateColumns, hivePartitionResult.getEnforcedConstraint(), hivePartitionResult.getBucketHandle(), hivePartitionResult.getBucketFilter(), true, createTableLayoutString(session, rowExpressionService, tableName, hivePartitionResult.getBucketHandle(), hivePartitionResult.getBucketFilter(), remainingExpression, domainPredicate), currentLayoutHandle.map(layout -> ((HiveTableLayoutHandle) layout).getRequestedColumns()).orElse(Optional.empty()), false)), dynamicFilterExpression);
}
use of com.facebook.presto.spi.relation.RowExpressionService in project presto by prestodb.
the class HivePageSourceProvider method getPageSourceFromCursorProvider.
private static Optional<ConnectorPageSource> getPageSourceFromCursorProvider(Set<HiveRecordCursorProvider> cursorProviders, Configuration configuration, ConnectorSession session, Path path, long start, long length, long fileSize, Storage storage, TupleDomain<HiveColumnHandle> effectivePredicate, List<HiveColumnHandle> hiveColumns, DateTimeZone hiveStorageTimeZone, TypeManager typeManager, SchemaTableName tableName, List<HiveColumnHandle> partitionKeyColumnHandles, List<Column> tableDataColumns, Map<String, String> tableParameters, int partitionDataColumnCount, TableToPartitionMapping tableToPartitionMapping, boolean s3SelectPushdownEnabled, RowExpression remainingPredicate, boolean isPushdownFilterEnabled, RowExpressionService rowExpressionService, Map<String, String> customSplitInfo, List<HiveColumnHandle> allColumns, List<ColumnMapping> columnMappings, Set<Integer> outputIndices, List<ColumnMapping> regularAndInterimColumnMappings, Optional<BucketAdaptation> bucketAdaptation) {
if (!hiveColumns.isEmpty() && hiveColumns.stream().allMatch(hiveColumnHandle -> hiveColumnHandle.getColumnType() == AGGREGATED)) {
throw new UnsupportedOperationException("Partial aggregation pushdown only supported for ORC/Parquet files. " + "Table " + tableName.toString() + " has file (" + path.toString() + ") of format " + storage.getStorageFormat().getOutputFormat() + ". Set session property hive.pushdown_partial_aggregations_into_scan=false and execute query again");
}
for (HiveRecordCursorProvider provider : cursorProviders) {
// GenericHiveRecordCursor will automatically do the coercion without HiveCoercionRecordCursor
boolean doCoercion = !(provider instanceof GenericHiveRecordCursorProvider);
List<Column> partitionDataColumns = reconstructPartitionSchema(tableDataColumns, partitionDataColumnCount, tableToPartitionMapping.getPartitionSchemaDifference(), tableToPartitionMapping.getTableToPartitionColumns());
Properties schema = getHiveSchema(storage, partitionDataColumns, tableDataColumns, tableParameters, tableName.getSchemaName(), tableName.getTableName(), partitionKeyColumnHandles.stream().map(column -> column.getName()).collect(toImmutableList()), partitionKeyColumnHandles.stream().map(column -> column.getHiveType()).collect(toImmutableList()));
Optional<RecordCursor> cursor = provider.createRecordCursor(configuration, session, path, start, length, fileSize, schema, toColumnHandles(regularAndInterimColumnMappings, doCoercion), effectivePredicate, hiveStorageTimeZone, typeManager, s3SelectPushdownEnabled, customSplitInfo);
if (cursor.isPresent()) {
RecordCursor delegate = cursor.get();
if (bucketAdaptation.isPresent()) {
delegate = new HiveBucketAdapterRecordCursor(bucketAdaptation.get().getBucketColumnIndices(), bucketAdaptation.get().getBucketColumnHiveTypes(), bucketAdaptation.get().getTableBucketCount(), bucketAdaptation.get().getPartitionBucketCount(), bucketAdaptation.get().getBucketToKeep(), typeManager, delegate);
}
// Need to wrap RcText and RcBinary into a wrapper, which will do the coercion for mismatch columns
if (doCoercion) {
delegate = new HiveCoercionRecordCursor(regularAndInterimColumnMappings, typeManager, delegate);
}
HiveRecordCursor hiveRecordCursor = new HiveRecordCursor(columnMappings, hiveStorageTimeZone, typeManager, delegate);
List<Type> columnTypes = allColumns.stream().map(input -> typeManager.getType(input.getTypeSignature())).collect(toList());
RecordPageSource recordPageSource = new RecordPageSource(columnTypes, hiveRecordCursor);
if (isPushdownFilterEnabled) {
return Optional.of(new FilteringPageSource(columnMappings, effectivePredicate, remainingPredicate, typeManager, rowExpressionService, session, outputIndices, recordPageSource));
}
return Optional.of(recordPageSource);
}
}
return Optional.empty();
}
Aggregations