use of io.prestosql.spi.predicate.NullableValue in project hetu-core by openlookeng.
the class ActualProperties method translateRowExpression.
public ActualProperties translateRowExpression(Map<Symbol, RowExpression> assignments, TypeProvider types) {
Map<Symbol, SymbolReference> inputToOutputSymbol = new HashMap<>();
for (Map.Entry<Symbol, RowExpression> assignment : assignments.entrySet()) {
RowExpression expression = assignment.getValue();
SymbolReference symbolReference = SymbolUtils.toSymbolReference(assignment.getKey());
if (isExpression(expression)) {
if (castToExpression(expression) instanceof SymbolReference) {
inputToOutputSymbol.put(SymbolUtils.from(castToExpression(expression)), symbolReference);
}
} else {
if (expression instanceof VariableReferenceExpression) {
inputToOutputSymbol.put(new Symbol(((VariableReferenceExpression) expression).getName()), symbolReference);
}
}
}
Map<Symbol, Partitioning.ArgumentBinding> inputToOutputMappings = inputToOutputSymbol.entrySet().stream().collect(Collectors.toMap(v -> v.getKey(), v -> expressionBinding(v.getValue())));
Map<Symbol, NullableValue> translatedConstants = new HashMap<>();
for (Map.Entry<Symbol, NullableValue> entry : constants.entrySet()) {
if (inputToOutputSymbol.containsKey(entry.getKey())) {
Symbol symbol = SymbolUtils.from(inputToOutputSymbol.get(entry.getKey()));
translatedConstants.put(symbol, entry.getValue());
} else {
inputToOutputMappings.put(entry.getKey(), constantBinding(entry.getValue()));
}
}
return builder().global(global.translateRowExpression(inputToOutputMappings, assignments, types)).local(LocalProperties.translate(localProperties, symbol -> inputToOutputSymbol.containsKey(symbol) ? Optional.of(SymbolUtils.from(inputToOutputSymbol.get(symbol))) : Optional.empty())).constants(translatedConstants).build();
}
use of io.prestosql.spi.predicate.NullableValue in project hetu-core by openlookeng.
the class LocalExecutionPlanner method plan.
public LocalExecutionPlan plan(TaskContext taskContext, PlanNode plan, TypeProvider types, PartitioningScheme partitioningScheme, StageExecutionDescriptor stageExecutionDescriptor, List<PlanNodeId> partitionedSourceOrder, OutputBuffer outputBuffer, Optional<PlanFragmentId> feederCTEId, Optional<PlanNodeId> feederCTEParentId, Map<String, CommonTableExecutionContext> cteCtx) {
List<Symbol> outputLayout = partitioningScheme.getOutputLayout();
if (partitioningScheme.getPartitioning().getHandle().equals(FIXED_BROADCAST_DISTRIBUTION) || partitioningScheme.getPartitioning().getHandle().equals(FIXED_ARBITRARY_DISTRIBUTION) || partitioningScheme.getPartitioning().getHandle().equals(SCALED_WRITER_DISTRIBUTION) || partitioningScheme.getPartitioning().getHandle().equals(SINGLE_DISTRIBUTION) || partitioningScheme.getPartitioning().getHandle().equals(COORDINATOR_DISTRIBUTION)) {
return plan(taskContext, stageExecutionDescriptor, plan, outputLayout, types, partitionedSourceOrder, outputBuffer, new TaskOutputFactory(outputBuffer), feederCTEId, feederCTEParentId, cteCtx);
}
// We can convert the symbols directly into channels, because the root must be a sink and therefore the layout is fixed
List<Integer> partitionChannels;
List<Optional<NullableValue>> partitionConstants;
List<Type> partitionChannelTypes;
if (partitioningScheme.getHashColumn().isPresent()) {
partitionChannels = ImmutableList.of(outputLayout.indexOf(partitioningScheme.getHashColumn().get()));
partitionConstants = ImmutableList.of(Optional.empty());
partitionChannelTypes = ImmutableList.of(BIGINT);
} else {
partitionChannels = partitioningScheme.getPartitioning().getArguments().stream().map(argument -> {
if (argument.isConstant()) {
return -1;
}
return outputLayout.indexOf(argument.getColumn());
}).collect(toImmutableList());
partitionConstants = partitioningScheme.getPartitioning().getArguments().stream().map(argument -> {
if (argument.isConstant()) {
return Optional.of(argument.getConstant());
}
return Optional.<NullableValue>empty();
}).collect(toImmutableList());
partitionChannelTypes = partitioningScheme.getPartitioning().getArguments().stream().map(argument -> {
if (argument.isConstant()) {
return argument.getConstant().getType();
}
return types.get(argument.getColumn());
}).collect(toImmutableList());
}
PartitionFunction partitionFunction = nodePartitioningManager.getPartitionFunction(taskContext.getSession(), partitioningScheme, partitionChannelTypes);
OptionalInt nullChannel = OptionalInt.empty();
Set<Symbol> partitioningColumns = partitioningScheme.getPartitioning().getColumns();
// partitioningColumns expected to have one column in the normal case, and zero columns when partitioning on a constant
checkArgument(!partitioningScheme.isReplicateNullsAndAny() || partitioningColumns.size() <= 1);
if (partitioningScheme.isReplicateNullsAndAny() && partitioningColumns.size() == 1) {
nullChannel = OptionalInt.of(outputLayout.indexOf(getOnlyElement(partitioningColumns)));
}
return plan(taskContext, stageExecutionDescriptor, plan, outputLayout, types, partitionedSourceOrder, outputBuffer, new PartitionedOutputFactory(partitionFunction, partitionChannels, partitionConstants, partitioningScheme.isReplicateNullsAndAny(), nullChannel, outputBuffer, maxPagePartitioningBufferSize), feederCTEId, feederCTEParentId, cteCtx);
}
use of io.prestosql.spi.predicate.NullableValue in project hetu-core by openlookeng.
the class HiveMetadata method getPartitionsSystemTable.
private Optional<SystemTable> getPartitionsSystemTable(ConnectorSession session, SchemaTableName tableName, SchemaTableName sourceTableName) {
HiveTableHandle sourceTableHandle = getTableHandle(session, sourceTableName);
if (sourceTableHandle == null) {
return Optional.empty();
}
SchemaTableName schemaTableName = sourceTableHandle.getSchemaTableName();
Table table = metastore.getTable(new HiveIdentity(session), schemaTableName.getSchemaName(), schemaTableName.getTableName()).orElseThrow(() -> new TableNotFoundException(schemaTableName));
List<HiveColumnHandle> partitionColumns = sourceTableHandle.getPartitionColumns();
if (partitionColumns.isEmpty()) {
return Optional.empty();
}
List<Type> partitionColumnTypes = partitionColumns.stream().map(HiveColumnHandle::getTypeSignature).map(typeManager::getType).collect(toImmutableList());
List<ColumnMetadata> partitionSystemTableColumns = partitionColumns.stream().map(column -> new ColumnMetadata(column.getName(), typeManager.getType(column.getTypeSignature()), column.getComment().orElse(null), column.isHidden())).collect(toImmutableList());
Map<Integer, HiveColumnHandle> fieldIdToColumnHandle = IntStream.range(0, partitionColumns.size()).boxed().collect(toImmutableMap(identity(), partitionColumns::get));
return Optional.of(createSystemTable(new ConnectorTableMetadata(tableName, partitionSystemTableColumns), constraint -> {
TupleDomain<ColumnHandle> targetTupleDomain = constraint.transform(fieldIdToColumnHandle::get);
Predicate<Map<ColumnHandle, NullableValue>> targetPredicate = convertToPredicate(targetTupleDomain);
Constraint targetConstraint = new Constraint(targetTupleDomain, targetPredicate);
Iterable<List<Object>> records = () -> stream(partitionManager.getPartitions(metastore, new HiveIdentity(session), sourceTableHandle, targetConstraint, table).getPartitions()).map(hivePartition -> IntStream.range(0, partitionColumns.size()).mapToObj(fieldIdToColumnHandle::get).map(columnHandle -> hivePartition.getKeys().get(columnHandle).getValue()).collect(toList())).iterator();
return new InMemoryRecordSet(partitionColumnTypes, records).cursor();
}));
}
use of io.prestosql.spi.predicate.NullableValue in project hetu-core by openlookeng.
the class HiveMetadata method buildColumnDomain.
private static Domain buildColumnDomain(ColumnHandle column, List<HivePartition> partitions) {
checkArgument(!partitions.isEmpty(), "partitions cannot be empty");
boolean hasNull = false;
List<Object> nonNullValues = new ArrayList<>();
Type type = null;
for (HivePartition partition : partitions) {
NullableValue value = partition.getKeys().get(column);
if (value == null) {
throw new PrestoException(HiveErrorCode.HIVE_UNKNOWN_ERROR, format("Partition %s does not have a value for partition column %s", partition, column));
}
if (value.isNull()) {
hasNull = true;
} else {
nonNullValues.add(value.getValue());
}
if (type == null) {
type = value.getType();
}
}
if (!nonNullValues.isEmpty()) {
Domain domain = Domain.multipleValues(type, nonNullValues);
if (hasNull) {
return domain.union(Domain.onlyNull(type));
}
return domain;
}
return Domain.onlyNull(type);
}
use of io.prestosql.spi.predicate.NullableValue in project hetu-core by openlookeng.
the class HivePartitionManager method getPartitions.
public HivePartitionResult getPartitions(SemiTransactionalHiveMetastore metastore, HiveIdentity identity, ConnectorTableHandle tableHandle, Constraint constraint, Table table) {
HiveTableHandle hiveTableHandle = (HiveTableHandle) tableHandle;
TupleDomain<ColumnHandle> effectivePredicate = constraint.getSummary().intersect(hiveTableHandle.getEnforcedConstraint());
SchemaTableName tableName = hiveTableHandle.getSchemaTableName();
Optional<HiveBucketHandle> hiveBucketHandle = hiveTableHandle.getBucketHandle();
List<HiveColumnHandle> partitionColumns = hiveTableHandle.getPartitionColumns();
if (effectivePredicate.isNone()) {
return new HivePartitionResult(partitionColumns, ImmutableList.of(), none(), none(), none(), hiveBucketHandle, Optional.empty());
}
Optional<HiveBucketing.HiveBucketFilter> bucketFilter = HiveBucketing.getHiveBucketFilter(table, effectivePredicate);
TupleDomain<HiveColumnHandle> compactEffectivePredicate = toCompactTupleDomain(effectivePredicate, domainCompactionThreshold);
if (partitionColumns.isEmpty()) {
return new HivePartitionResult(partitionColumns, ImmutableList.of(new HivePartition(tableName)), compactEffectivePredicate, effectivePredicate, all(), hiveBucketHandle, bucketFilter);
}
List<Type> partitionTypes = partitionColumns.stream().map(column -> typeManager.getType(column.getTypeSignature())).collect(toList());
Iterable<HivePartition> partitionsIterable;
Predicate<Map<ColumnHandle, NullableValue>> predicate = constraint.predicate().orElse(value -> true);
if (hiveTableHandle.getPartitions().isPresent()) {
partitionsIterable = hiveTableHandle.getPartitions().get().stream().filter(partition -> partitionMatches(partitionColumns, effectivePredicate, predicate, partition)).collect(toImmutableList());
} else {
List<String> partitionNames = getFilteredPartitionNames(metastore, identity, tableName, partitionColumns, effectivePredicate, table);
partitionsIterable = () -> partitionNames.stream().map(partitionName -> parseValuesAndFilterPartition(tableName, partitionName, partitionColumns, partitionTypes, effectivePredicate, predicate)).filter(Optional::isPresent).map(Optional::get).iterator();
}
// All partition key domains will be fully evaluated, so we don't need to include those
TupleDomain<ColumnHandle> remainingTupleDomain = TupleDomain.withColumnDomains(Maps.filterKeys(effectivePredicate.getDomains().get(), not(Predicates.in(partitionColumns))));
TupleDomain<ColumnHandle> enforcedTupleDomain = TupleDomain.withColumnDomains(Maps.filterKeys(effectivePredicate.getDomains().get(), Predicates.in(partitionColumns)));
return new HivePartitionResult(partitionColumns, partitionsIterable, compactEffectivePredicate, remainingTupleDomain, enforcedTupleDomain, hiveBucketHandle, bucketFilter);
}
Aggregations