use of io.trino.spi.predicate.NullableValue in project trino by trinodb.
the class InformationSchemaMetadata method calculatePrefixesWithSchemaName.
private Set<QualifiedTablePrefix> calculatePrefixesWithSchemaName(ConnectorSession connectorSession, TupleDomain<ColumnHandle> constraint, Optional<Predicate<Map<ColumnHandle, NullableValue>>> predicate) {
Optional<Set<String>> schemas = filterString(constraint, SCHEMA_COLUMN_HANDLE);
if (schemas.isPresent()) {
return schemas.get().stream().filter(this::isLowerCase).filter(schema -> predicate.isEmpty() || predicate.get().test(schemaAsFixedValues(schema))).map(schema -> new QualifiedTablePrefix(catalogName, schema)).collect(toImmutableSet());
}
if (predicate.isEmpty()) {
return ImmutableSet.of(new QualifiedTablePrefix(catalogName));
}
Session session = ((FullConnectorSession) connectorSession).getSession();
return listSchemaNames(session).filter(prefix -> predicate.get().test(schemaAsFixedValues(prefix.getSchemaName().get()))).collect(toImmutableSet());
}
use of io.trino.spi.predicate.NullableValue in project trino by trinodb.
the class InformationSchemaMetadata method calculatePrefixesWithTableName.
private Set<QualifiedTablePrefix> calculatePrefixesWithTableName(InformationSchemaTable informationSchemaTable, ConnectorSession connectorSession, Set<QualifiedTablePrefix> prefixes, TupleDomain<ColumnHandle> constraint, Optional<Predicate<Map<ColumnHandle, NullableValue>>> predicate) {
Session session = ((FullConnectorSession) connectorSession).getSession();
Optional<Set<String>> tables = filterString(constraint, TABLE_NAME_COLUMN_HANDLE);
if (tables.isPresent()) {
return prefixes.stream().peek(prefix -> verify(prefix.asQualifiedObjectName().isEmpty())).flatMap(prefix -> prefix.getSchemaName().map(schemaName -> Stream.of(prefix)).orElseGet(() -> listSchemaNames(session))).flatMap(prefix -> tables.get().stream().filter(this::isLowerCase).map(table -> new QualifiedObjectName(catalogName, prefix.getSchemaName().get(), table))).filter(objectName -> {
if (!isColumnsEnumeratingTable(informationSchemaTable) || metadata.isMaterializedView(session, objectName) || metadata.isView(session, objectName)) {
return true;
}
// This is a columns enumerating table and the object is not a view
try {
// filtering in case the source table does not exist or there is a problem with redirection.
return metadata.getRedirectionAwareTableHandle(session, objectName).getTableHandle().isPresent();
} catch (TrinoException e) {
if (e.getErrorCode().equals(TABLE_REDIRECTION_ERROR.toErrorCode())) {
// Ignore redirection errors for listing, treat as if the table does not exist
return false;
}
throw e;
}
}).filter(objectName -> predicate.isEmpty() || predicate.get().test(asFixedValues(objectName))).map(QualifiedObjectName::asQualifiedTablePrefix).collect(toImmutableSet());
}
if (predicate.isEmpty() || !isColumnsEnumeratingTable(informationSchemaTable)) {
return prefixes;
}
return prefixes.stream().flatMap(prefix -> Stream.concat(metadata.listTables(session, prefix).stream(), metadata.listViews(session, prefix).stream())).filter(objectName -> predicate.get().test(asFixedValues(objectName))).map(QualifiedObjectName::asQualifiedTablePrefix).collect(toImmutableSet());
}
use of io.trino.spi.predicate.NullableValue in project trino by trinodb.
the class DeltaLakeSplitManager method getSplits.
private Stream<DeltaLakeSplit> getSplits(ConnectorTransactionHandle transaction, DeltaLakeTableHandle tableHandle, ConnectorSession session, Optional<DataSize> maxScannedFileSize, Set<ColumnHandle> columnsCoveredByDynamicFilter, Constraint constraint) {
DeltaLakeMetastore metastore = getMetastore(session, transaction);
String tableLocation = metastore.getTableLocation(tableHandle.getSchemaTableName(), session);
List<AddFileEntry> validDataFiles = metastore.getValidDataFiles(tableHandle.getSchemaTableName(), session);
TupleDomain<DeltaLakeColumnHandle> enforcedPartitionConstraint = tableHandle.getEnforcedPartitionConstraint();
TupleDomain<DeltaLakeColumnHandle> nonPartitionConstraint = tableHandle.getNonPartitionConstraint();
// Delta Lake handles updates and deletes by copying entire data files, minus updates/deletes. Because of this we can only have one Split/UpdatablePageSource
// per file.
boolean splittable = tableHandle.getWriteType().isEmpty();
AtomicInteger remainingInitialSplits = new AtomicInteger(maxInitialSplits);
Optional<Instant> filesModifiedAfter = tableHandle.getAnalyzeHandle().flatMap(AnalyzeHandle::getFilesModifiedAfter);
Optional<Long> maxScannedFileSizeInBytes = maxScannedFileSize.map(DataSize::toBytes);
Set<String> predicatedColumnNames = Stream.concat(nonPartitionConstraint.getDomains().orElseThrow().keySet().stream(), columnsCoveredByDynamicFilter.stream().map(DeltaLakeColumnHandle.class::cast)).map(// TODO is DeltaLakeColumnHandle.name normalized?
column -> column.getName().toLowerCase(ENGLISH)).collect(toImmutableSet());
List<ColumnMetadata> schema = extractSchema(tableHandle.getMetadataEntry(), typeManager);
List<ColumnMetadata> predicatedColumns = schema.stream().filter(// ColumnMetadata.name is lowercase
column -> predicatedColumnNames.contains(column.getName())).collect(toImmutableList());
return validDataFiles.stream().flatMap(addAction -> {
if (tableHandle.getAnalyzeHandle().isPresent() && !tableHandle.getAnalyzeHandle().get().isInitialAnalyze() && !addAction.isDataChange()) {
// skip files which do not introduce data change on non-initial ANALYZE
return Stream.empty();
}
if (filesModifiedAfter.isPresent() && addAction.getModificationTime() <= filesModifiedAfter.get().toEpochMilli()) {
return Stream.empty();
}
if (maxScannedFileSizeInBytes.isPresent() && addAction.getSize() > maxScannedFileSizeInBytes.get()) {
return Stream.empty();
}
Map<DeltaLakeColumnHandle, Domain> enforcedDomains = enforcedPartitionConstraint.getDomains().orElseThrow();
if (!partitionMatchesPredicate(addAction.getCanonicalPartitionValues(), enforcedDomains)) {
return Stream.empty();
}
TupleDomain<DeltaLakeColumnHandle> statisticsPredicate = createStatisticsPredicate(addAction, predicatedColumns, tableHandle.getMetadataEntry().getCanonicalPartitionColumns());
if (!nonPartitionConstraint.overlaps(statisticsPredicate)) {
return Stream.empty();
}
if (constraint.predicate().isPresent()) {
Map<String, Optional<String>> partitionValues = addAction.getCanonicalPartitionValues();
Map<ColumnHandle, NullableValue> deserializedValues = constraint.getPredicateColumns().orElseThrow().stream().filter(column -> column instanceof DeltaLakeColumnHandle).filter(column -> partitionValues.containsKey(((DeltaLakeColumnHandle) column).getName())).collect(toImmutableMap(identity(), column -> {
DeltaLakeColumnHandle deltaLakeColumn = (DeltaLakeColumnHandle) column;
return NullableValue.of(deltaLakeColumn.getType(), deserializePartitionValue(deltaLakeColumn, addAction.getCanonicalPartitionValues().get(deltaLakeColumn.getName())));
}));
if (!constraint.predicate().get().test(deserializedValues)) {
return Stream.empty();
}
}
return splitsForFile(session, addAction, tableLocation, addAction.getCanonicalPartitionValues(), statisticsPredicate, splittable, remainingInitialSplits).stream();
});
}
use of io.trino.spi.predicate.NullableValue in project trino by trinodb.
the class TestHiveBucketing method assertBucketsEqual.
private static void assertBucketsEqual(List<String> hiveTypeStrings, List<List<Object>> hiveValues, int bucketCount, Optional<Set<Integer>> expectedBucketsV1, Optional<Set<Integer>> expectedBucketsV2) {
List<HiveType> hiveTypes = hiveTypeStrings.stream().map(HiveType::valueOf).collect(toImmutableList());
List<TypeInfo> hiveTypeInfos = hiveTypes.stream().map(HiveType::getTypeInfo).collect(toImmutableList());
List<Type> trinoTypes = hiveTypes.stream().map(type -> type.getType(TESTING_TYPE_MANAGER)).collect(toImmutableList());
ImmutableList.Builder<List<NullableValue>> values = ImmutableList.builder();
for (int i = 0; i < hiveValues.size(); i++) {
List<Object> valueList = hiveValues.get(i);
Type trinoType = trinoTypes.get(i);
values.add(valueList.stream().map(value -> new NullableValue(trinoType, toNativeContainerValue(trinoType, value))).collect(toImmutableList()));
}
assertEquals(getHiveBuckets(BUCKETING_V1, bucketCount, hiveTypeInfos, values.build()), expectedBucketsV1);
assertEquals(getHiveBuckets(BUCKETING_V2, bucketCount, hiveTypeInfos, values.build()), expectedBucketsV2);
}
use of io.trino.spi.predicate.NullableValue in project trino by trinodb.
the class CassandraType method buildUserTypeValue.
private Block buildUserTypeValue(GettableByIndexData row, int position) {
verify(this.kind == Kind.UDT, "Not a user defined type: %s", this.kind);
UDTValue udtValue = row.getUDTValue(position);
String[] fieldNames = udtValue.getType().getFieldNames().toArray(String[]::new);
RowBlockBuilder blockBuilder = (RowBlockBuilder) this.trinoType.createBlockBuilder(null, 1);
SingleRowBlockWriter singleRowBlockWriter = blockBuilder.beginBlockEntry();
int tuplePosition = 0;
for (CassandraType argumentType : this.getArgumentTypes()) {
int finalTuplePosition = tuplePosition;
NullableValue value = argumentType.getColumnValue(udtValue, tuplePosition, () -> udtValue.getType().getFieldType(fieldNames[finalTuplePosition]));
writeNativeValue(argumentType.getTrinoType(), singleRowBlockWriter, value.getValue());
tuplePosition++;
}
blockBuilder.closeEntry();
return (Block) this.trinoType.getObject(blockBuilder, 0);
}
Aggregations