use of com.facebook.presto.spi.Constraint in project presto by prestodb.
the class AbstractTestHiveClientS3 method testGetRecordsS3.
@Test
public void testGetRecordsS3() throws Exception {
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
ConnectorTableHandle table = getTableHandle(metadata, tableS3);
List<ColumnHandle> columnHandles = ImmutableList.copyOf(metadata.getColumnHandles(session, table).values());
Map<String, Integer> columnIndex = indexColumns(columnHandles);
List<ConnectorTableLayoutResult> tableLayoutResults = metadata.getTableLayouts(session, table, new Constraint<>(TupleDomain.all(), bindings -> true), Optional.empty());
HiveTableLayoutHandle layoutHandle = (HiveTableLayoutHandle) getOnlyElement(tableLayoutResults).getTableLayout().getHandle();
assertEquals(layoutHandle.getPartitions().get().size(), 1);
ConnectorSplitSource splitSource = splitManager.getSplits(transaction.getTransactionHandle(), session, layoutHandle);
long sum = 0;
for (ConnectorSplit split : getAllSplits(splitSource)) {
try (ConnectorPageSource pageSource = pageSourceProvider.createPageSource(transaction.getTransactionHandle(), session, split, columnHandles)) {
MaterializedResult result = materializeSourceDataStream(session, pageSource, getTypes(columnHandles));
for (MaterializedRow row : result) {
sum += (Long) row.getField(columnIndex.get("t_bigint"));
}
}
}
assertEquals(sum, 78300);
}
}
use of com.facebook.presto.spi.Constraint in project presto by prestodb.
the class HivePartitionManager method getPartitions.
public HivePartitionResult getPartitions(SemiTransactionalHiveMetastore metastore, ConnectorTableHandle tableHandle, Constraint<ColumnHandle> constraint) {
HiveTableHandle hiveTableHandle = (HiveTableHandle) tableHandle;
TupleDomain<ColumnHandle> effectivePredicate = constraint.getSummary();
SchemaTableName tableName = hiveTableHandle.getSchemaTableName();
Table table = getTable(metastore, tableName);
Optional<HiveBucketHandle> hiveBucketHandle = getHiveBucketHandle(connectorId, table);
List<HiveColumnHandle> partitionColumns = getPartitionKeyColumnHandles(connectorId, table);
List<HiveBucket> buckets = getHiveBucketNumbers(table, effectivePredicate);
TupleDomain<HiveColumnHandle> compactEffectivePredicate = toCompactTupleDomain(effectivePredicate, domainCompactionThreshold);
if (effectivePredicate.isNone()) {
return new HivePartitionResult(partitionColumns, ImmutableList.of(), TupleDomain.none(), TupleDomain.none(), hiveBucketHandle);
}
if (partitionColumns.isEmpty()) {
return new HivePartitionResult(partitionColumns, ImmutableList.of(new HivePartition(tableName, compactEffectivePredicate, buckets)), effectivePredicate, TupleDomain.none(), hiveBucketHandle);
}
List<Type> partitionTypes = partitionColumns.stream().map(column -> typeManager.getType(column.getTypeSignature())).collect(toList());
List<String> partitionNames = getFilteredPartitionNames(metastore, tableName, partitionColumns, effectivePredicate);
// do a final pass to filter based on fields that could not be used to filter the partitions
int partitionCount = 0;
ImmutableList.Builder<HivePartition> partitions = ImmutableList.builder();
for (String partitionName : partitionNames) {
Optional<Map<ColumnHandle, NullableValue>> values = parseValuesAndFilterPartition(partitionName, partitionColumns, partitionTypes, constraint);
if (values.isPresent()) {
if (partitionCount == maxPartitions) {
throw new PrestoException(HIVE_EXCEEDED_PARTITION_LIMIT, format("Query over table '%s' can potentially read more than %s partitions", hiveTableHandle.getSchemaTableName(), maxPartitions));
}
partitionCount++;
partitions.add(new HivePartition(tableName, compactEffectivePredicate, partitionName, values.get(), buckets));
}
}
// All partition key domains will be fully evaluated, so we don't need to include those
TupleDomain<ColumnHandle> remainingTupleDomain = TupleDomain.withColumnDomains(Maps.filterKeys(effectivePredicate.getDomains().get(), not(Predicates.in(partitionColumns))));
TupleDomain<ColumnHandle> enforcedTupleDomain = TupleDomain.withColumnDomains(Maps.filterKeys(effectivePredicate.getDomains().get(), Predicates.in(partitionColumns)));
return new HivePartitionResult(partitionColumns, partitions.build(), remainingTupleDomain, enforcedTupleDomain, hiveBucketHandle);
}
use of com.facebook.presto.spi.Constraint in project presto by prestodb.
the class HivePartitionManager method parseValuesAndFilterPartition.
private Optional<Map<ColumnHandle, NullableValue>> parseValuesAndFilterPartition(String partitionName, List<HiveColumnHandle> partitionColumns, List<Type> partitionTypes, Constraint<ColumnHandle> constraint) {
List<String> partitionValues = extractPartitionKeyValues(partitionName);
Map<ColumnHandle, Domain> domains = constraint.getSummary().getDomains().get();
ImmutableMap.Builder<ColumnHandle, NullableValue> builder = ImmutableMap.builder();
for (int i = 0; i < partitionColumns.size(); i++) {
HiveColumnHandle column = partitionColumns.get(i);
NullableValue parsedValue = parsePartitionValue(partitionName, partitionValues.get(i), partitionTypes.get(i), timeZone);
Domain allowedDomain = domains.get(column);
if (allowedDomain != null && !allowedDomain.includesNullableValue(parsedValue.getValue())) {
return Optional.empty();
}
builder.put(column, parsedValue);
}
Map<ColumnHandle, NullableValue> values = builder.build();
if (!constraint.predicate().test(values)) {
return Optional.empty();
}
return Optional.of(values);
}
use of com.facebook.presto.spi.Constraint in project presto by prestodb.
the class MetadataManager method getLayouts.
@Override
public List<TableLayoutResult> getLayouts(Session session, TableHandle table, Constraint<ColumnHandle> constraint, Optional<Set<ColumnHandle>> desiredColumns) {
if (constraint.getSummary().isNone()) {
return ImmutableList.of();
}
ConnectorId connectorId = table.getConnectorId();
ConnectorTableHandle connectorTable = table.getConnectorHandle();
CatalogMetadata catalogMetadata = getCatalogMetadata(session, connectorId);
ConnectorMetadata metadata = catalogMetadata.getMetadataFor(connectorId);
ConnectorTransactionHandle transaction = catalogMetadata.getTransactionHandleFor(connectorId);
ConnectorSession connectorSession = session.toConnectorSession(connectorId);
List<ConnectorTableLayoutResult> layouts = metadata.getTableLayouts(connectorSession, connectorTable, constraint, desiredColumns);
return layouts.stream().map(layout -> new TableLayoutResult(fromConnectorLayout(connectorId, transaction, layout.getTableLayout()), layout.getUnenforcedConstraint())).collect(toImmutableList());
}
use of com.facebook.presto.spi.Constraint in project presto by prestodb.
the class ConnectorMetadata method getInsertLayout.
/**
* Get the physical layout for a inserting into an existing table.
*/
default default Optional<ConnectorNewTableLayout> getInsertLayout(ConnectorSession session, ConnectorTableHandle tableHandle) {
List<ConnectorTableLayout> layouts = getTableLayouts(session, tableHandle, new Constraint<>(TupleDomain.all(), map -> true), Optional.empty()).stream().map(ConnectorTableLayoutResult::getTableLayout).filter(layout -> layout.getNodePartitioning().isPresent()).collect(toList());
if (layouts.isEmpty()) {
return Optional.empty();
}
if (layouts.size() > 1) {
throw new PrestoException(NOT_SUPPORTED, "Tables with multiple layouts can not be written");
}
ConnectorTableLayout layout = layouts.get(0);
ConnectorPartitioningHandle partitioningHandle = layout.getNodePartitioning().get().getPartitioningHandle();
Map<ColumnHandle, String> columnNamesByHandle = getColumnHandles(session, tableHandle).entrySet().stream().collect(Collectors.toMap(Map.Entry::getValue, Map.Entry::getKey));
List<String> partitionColumns = layout.getNodePartitioning().get().getPartitioningColumns().stream().map(columnNamesByHandle::get).collect(toList());
return Optional.of(new ConnectorNewTableLayout(partitioningHandle, partitionColumns));
}
Aggregations