use of io.trino.spi.predicate.TupleDomain in project trino by trinodb.
the class InformationSchemaMetadata method calculatePrefixesWithTableName.
private Set<QualifiedTablePrefix> calculatePrefixesWithTableName(InformationSchemaTable informationSchemaTable, ConnectorSession connectorSession, Set<QualifiedTablePrefix> prefixes, TupleDomain<ColumnHandle> constraint, Optional<Predicate<Map<ColumnHandle, NullableValue>>> predicate) {
Session session = ((FullConnectorSession) connectorSession).getSession();
Optional<Set<String>> tables = filterString(constraint, TABLE_NAME_COLUMN_HANDLE);
if (tables.isPresent()) {
return prefixes.stream().peek(prefix -> verify(prefix.asQualifiedObjectName().isEmpty())).flatMap(prefix -> prefix.getSchemaName().map(schemaName -> Stream.of(prefix)).orElseGet(() -> listSchemaNames(session))).flatMap(prefix -> tables.get().stream().filter(this::isLowerCase).map(table -> new QualifiedObjectName(catalogName, prefix.getSchemaName().get(), table))).filter(objectName -> {
if (!isColumnsEnumeratingTable(informationSchemaTable) || metadata.isMaterializedView(session, objectName) || metadata.isView(session, objectName)) {
return true;
}
// This is a columns enumerating table and the object is not a view
try {
// filtering in case the source table does not exist or there is a problem with redirection.
return metadata.getRedirectionAwareTableHandle(session, objectName).getTableHandle().isPresent();
} catch (TrinoException e) {
if (e.getErrorCode().equals(TABLE_REDIRECTION_ERROR.toErrorCode())) {
// Ignore redirection errors for listing, treat as if the table does not exist
return false;
}
throw e;
}
}).filter(objectName -> predicate.isEmpty() || predicate.get().test(asFixedValues(objectName))).map(QualifiedObjectName::asQualifiedTablePrefix).collect(toImmutableSet());
}
if (predicate.isEmpty() || !isColumnsEnumeratingTable(informationSchemaTable)) {
return prefixes;
}
return prefixes.stream().flatMap(prefix -> Stream.concat(metadata.listTables(session, prefix).stream(), metadata.listViews(session, prefix).stream())).filter(objectName -> predicate.get().test(asFixedValues(objectName))).map(QualifiedObjectName::asQualifiedTablePrefix).collect(toImmutableSet());
}
use of io.trino.spi.predicate.TupleDomain in project trino by trinodb.
the class PruneTableScanColumns method pruneColumns.
public static Optional<PlanNode> pruneColumns(Metadata metadata, TypeProvider types, Session session, TableScanNode node, Set<Symbol> referencedOutputs) {
List<Symbol> newOutputs = filteredCopy(node.getOutputSymbols(), referencedOutputs::contains);
if (newOutputs.size() == node.getOutputSymbols().size()) {
return Optional.empty();
}
List<ConnectorExpression> projections = newOutputs.stream().map(symbol -> new Variable(symbol.getName(), types.get(symbol))).collect(toImmutableList());
TableHandle handle = node.getTable();
Optional<ProjectionApplicationResult<TableHandle>> result = metadata.applyProjection(session, handle, projections, newOutputs.stream().collect(toImmutableMap(Symbol::getName, node.getAssignments()::get)));
Map<Symbol, ColumnHandle> newAssignments;
// Bail out if the connector does anything other than limit the list of columns (e.g., if it synthesizes arbitrary expressions)
if (result.isPresent() && result.get().getProjections().stream().allMatch(Variable.class::isInstance)) {
handle = result.get().getHandle();
Map<String, ColumnHandle> assignments = result.get().getAssignments().stream().collect(toImmutableMap(Assignment::getVariable, Assignment::getColumn));
ImmutableMap.Builder<Symbol, ColumnHandle> builder = ImmutableMap.builder();
for (int i = 0; i < newOutputs.size(); i++) {
Variable variable = (Variable) result.get().getProjections().get(i);
builder.put(newOutputs.get(i), assignments.get(variable.getName()));
}
newAssignments = builder.buildOrThrow();
} else {
newAssignments = newOutputs.stream().collect(toImmutableMap(Function.identity(), node.getAssignments()::get));
}
Set<ColumnHandle> visibleColumns = ImmutableSet.copyOf(newAssignments.values());
TupleDomain<ColumnHandle> enforcedConstraint = node.getEnforcedConstraint().filter((columnHandle, domain) -> visibleColumns.contains(columnHandle));
Optional<PlanNodeStatsEstimate> newStatistics = node.getStatistics().map(statistics -> new PlanNodeStatsEstimate(statistics.getOutputRowCount(), statistics.getSymbolStatistics().entrySet().stream().filter(entry -> newAssignments.containsKey(entry.getKey())).collect(toImmutableMap(Entry::getKey, Entry::getValue))));
return Optional.of(new TableScanNode(node.getId(), handle, newOutputs, newAssignments, enforcedConstraint, newStatistics, node.isUpdateTarget(), node.getUseConnectorNodePartitioning()));
}
use of io.trino.spi.predicate.TupleDomain in project trino by trinodb.
the class DeltaLakeSplitManager method getSplits.
private Stream<DeltaLakeSplit> getSplits(ConnectorTransactionHandle transaction, DeltaLakeTableHandle tableHandle, ConnectorSession session, Optional<DataSize> maxScannedFileSize, Set<ColumnHandle> columnsCoveredByDynamicFilter, Constraint constraint) {
DeltaLakeMetastore metastore = getMetastore(session, transaction);
String tableLocation = metastore.getTableLocation(tableHandle.getSchemaTableName(), session);
List<AddFileEntry> validDataFiles = metastore.getValidDataFiles(tableHandle.getSchemaTableName(), session);
TupleDomain<DeltaLakeColumnHandle> enforcedPartitionConstraint = tableHandle.getEnforcedPartitionConstraint();
TupleDomain<DeltaLakeColumnHandle> nonPartitionConstraint = tableHandle.getNonPartitionConstraint();
// Delta Lake handles updates and deletes by copying entire data files, minus updates/deletes. Because of this we can only have one Split/UpdatablePageSource
// per file.
boolean splittable = tableHandle.getWriteType().isEmpty();
AtomicInteger remainingInitialSplits = new AtomicInteger(maxInitialSplits);
Optional<Instant> filesModifiedAfter = tableHandle.getAnalyzeHandle().flatMap(AnalyzeHandle::getFilesModifiedAfter);
Optional<Long> maxScannedFileSizeInBytes = maxScannedFileSize.map(DataSize::toBytes);
Set<String> predicatedColumnNames = Stream.concat(nonPartitionConstraint.getDomains().orElseThrow().keySet().stream(), columnsCoveredByDynamicFilter.stream().map(DeltaLakeColumnHandle.class::cast)).map(// TODO is DeltaLakeColumnHandle.name normalized?
column -> column.getName().toLowerCase(ENGLISH)).collect(toImmutableSet());
List<ColumnMetadata> schema = extractSchema(tableHandle.getMetadataEntry(), typeManager);
List<ColumnMetadata> predicatedColumns = schema.stream().filter(// ColumnMetadata.name is lowercase
column -> predicatedColumnNames.contains(column.getName())).collect(toImmutableList());
return validDataFiles.stream().flatMap(addAction -> {
if (tableHandle.getAnalyzeHandle().isPresent() && !tableHandle.getAnalyzeHandle().get().isInitialAnalyze() && !addAction.isDataChange()) {
// skip files which do not introduce data change on non-initial ANALYZE
return Stream.empty();
}
if (filesModifiedAfter.isPresent() && addAction.getModificationTime() <= filesModifiedAfter.get().toEpochMilli()) {
return Stream.empty();
}
if (maxScannedFileSizeInBytes.isPresent() && addAction.getSize() > maxScannedFileSizeInBytes.get()) {
return Stream.empty();
}
Map<DeltaLakeColumnHandle, Domain> enforcedDomains = enforcedPartitionConstraint.getDomains().orElseThrow();
if (!partitionMatchesPredicate(addAction.getCanonicalPartitionValues(), enforcedDomains)) {
return Stream.empty();
}
TupleDomain<DeltaLakeColumnHandle> statisticsPredicate = createStatisticsPredicate(addAction, predicatedColumns, tableHandle.getMetadataEntry().getCanonicalPartitionColumns());
if (!nonPartitionConstraint.overlaps(statisticsPredicate)) {
return Stream.empty();
}
if (constraint.predicate().isPresent()) {
Map<String, Optional<String>> partitionValues = addAction.getCanonicalPartitionValues();
Map<ColumnHandle, NullableValue> deserializedValues = constraint.getPredicateColumns().orElseThrow().stream().filter(column -> column instanceof DeltaLakeColumnHandle).filter(column -> partitionValues.containsKey(((DeltaLakeColumnHandle) column).getName())).collect(toImmutableMap(identity(), column -> {
DeltaLakeColumnHandle deltaLakeColumn = (DeltaLakeColumnHandle) column;
return NullableValue.of(deltaLakeColumn.getType(), deserializePartitionValue(deltaLakeColumn, addAction.getCanonicalPartitionValues().get(deltaLakeColumn.getName())));
}));
if (!constraint.predicate().get().test(deserializedValues)) {
return Stream.empty();
}
}
return splitsForFile(session, addAction, tableLocation, addAction.getCanonicalPartitionValues(), statisticsPredicate, splittable, remainingInitialSplits).stream();
});
}
use of io.trino.spi.predicate.TupleDomain in project trino by trinodb.
the class ParquetPageSourceFactory method getParquetTupleDomain.
public static TupleDomain<ColumnDescriptor> getParquetTupleDomain(Map<List<String>, RichColumnDescriptor> descriptorsByPath, TupleDomain<HiveColumnHandle> effectivePredicate, MessageType fileSchema, boolean useColumnNames) {
if (effectivePredicate.isNone()) {
return TupleDomain.none();
}
ImmutableMap.Builder<ColumnDescriptor, Domain> predicate = ImmutableMap.builder();
for (Entry<HiveColumnHandle, Domain> entry : effectivePredicate.getDomains().get().entrySet()) {
HiveColumnHandle columnHandle = entry.getKey();
// skip looking up predicates for complex types as Parquet only stores stats for primitives
if (columnHandle.getHiveType().getCategory() != PRIMITIVE || columnHandle.getColumnType() != REGULAR) {
continue;
}
RichColumnDescriptor descriptor;
if (useColumnNames) {
descriptor = descriptorsByPath.get(ImmutableList.of(columnHandle.getName()));
} else {
org.apache.parquet.schema.Type parquetField = getParquetType(columnHandle, fileSchema, false);
if (parquetField == null || !parquetField.isPrimitive()) {
// Or the field is a complex type
continue;
}
descriptor = descriptorsByPath.get(ImmutableList.of(parquetField.getName()));
}
if (descriptor != null) {
predicate.put(descriptor, entry.getValue());
}
}
return TupleDomain.withColumnDomains(predicate.buildOrThrow());
}
use of io.trino.spi.predicate.TupleDomain in project trino by trinodb.
the class ParquetPageSourceFactory method getColumnIndexStore.
private static Optional<ColumnIndexStore> getColumnIndexStore(ParquetDataSource dataSource, BlockMetaData blockMetadata, Map<List<String>, RichColumnDescriptor> descriptorsByPath, TupleDomain<ColumnDescriptor> parquetTupleDomain, ParquetReaderOptions options) {
if (!options.isUseColumnIndex() || parquetTupleDomain.isAll() || parquetTupleDomain.isNone()) {
return Optional.empty();
}
boolean hasColumnIndex = false;
for (ColumnChunkMetaData column : blockMetadata.getColumns()) {
if (column.getColumnIndexReference() != null && column.getOffsetIndexReference() != null) {
hasColumnIndex = true;
break;
}
}
if (!hasColumnIndex) {
return Optional.empty();
}
Set<ColumnPath> columnsReadPaths = new HashSet<>(descriptorsByPath.size());
for (List<String> path : descriptorsByPath.keySet()) {
columnsReadPaths.add(ColumnPath.get(path.toArray(new String[0])));
}
Map<ColumnDescriptor, Domain> parquetDomains = parquetTupleDomain.getDomains().orElseThrow(() -> new IllegalStateException("Predicate other than none should have domains"));
Set<ColumnPath> columnsFilteredPaths = parquetDomains.keySet().stream().map(column -> ColumnPath.get(column.getPath())).collect(toImmutableSet());
return Optional.of(new TrinoColumnIndexStore(dataSource, blockMetadata, columnsReadPaths, columnsFilteredPaths));
}
Aggregations