use of com.apple.foundationdb.record.metadata.Key in project fdb-record-layer by FoundationDB.
the class FDBRecordStore method loadSyntheticRecord.
/**
* Load a {@link FDBSyntheticRecord synthetic record} by loading its stored constituent records and synthesizing it from them.
* @param primaryKey the primary key of the synthetic record, which includes the primary keys of the constituents
* @return a future which completes to the synthesized record
*/
@Nonnull
@API(API.Status.EXPERIMENTAL)
public CompletableFuture<FDBSyntheticRecord> loadSyntheticRecord(@Nonnull Tuple primaryKey) {
SyntheticRecordType<?> syntheticRecordType = getRecordMetaData().getSyntheticRecordTypeFromRecordTypeKey(primaryKey.get(0));
int nconstituents = syntheticRecordType.getConstituents().size();
if (nconstituents != primaryKey.size() - 1) {
throw recordCoreException("Primary key does not have correct number of nested keys: " + primaryKey);
}
final Map<String, FDBStoredRecord<? extends Message>> constituents = new ConcurrentHashMap<>(nconstituents);
final CompletableFuture<?>[] futures = new CompletableFuture<?>[nconstituents];
for (int i = 0; i < nconstituents; i++) {
final SyntheticRecordType.Constituent constituent = syntheticRecordType.getConstituents().get(i);
final Tuple constituentKey = primaryKey.getNestedTuple(i + 1);
if (constituentKey == null) {
futures[i] = AsyncUtil.DONE;
} else {
futures[i] = loadRecordAsync(constituentKey).thenApply(rec -> {
if (rec == null) {
throw new RecordDoesNotExistException("constituent record not found: " + constituent.getName());
}
constituents.put(constituent.getName(), rec);
return null;
});
}
}
return CompletableFuture.allOf(futures).thenApply(vignore -> FDBSyntheticRecord.of(syntheticRecordType, constituents));
}
use of com.apple.foundationdb.record.metadata.Key in project fdb-record-layer by FoundationDB.
the class ImplementDistinctUnionRule method onMatch.
@Override
@SuppressWarnings("java:S135")
public void onMatch(@Nonnull PlannerRuleCall call) {
final PlanContext context = call.getContext();
final Optional<Set<RequestedOrdering>> requiredOrderingsOptional = call.getInterestingProperty(OrderingAttribute.ORDERING);
if (requiredOrderingsOptional.isEmpty()) {
return;
}
final Set<RequestedOrdering> requestedOrderings = requiredOrderingsOptional.get();
final KeyExpression commonPrimaryKey = context.getCommonPrimaryKey();
if (commonPrimaryKey == null) {
return;
}
final List<KeyExpression> commonPrimaryKeyParts = commonPrimaryKey.normalizeKeyForPositions();
final PlannerBindings bindings = call.getBindings();
final Quantifier.ForEach unionForEachQuantifier = bindings.get(unionForEachQuantifierMatcher);
final List<? extends Collection<? extends RecordQueryPlan>> plansByQuantifier = bindings.getAll(unionLegPlansMatcher);
// group each leg's plans by their provided ordering
final ImmutableList<Set<Map.Entry<Ordering, ImmutableList<RecordQueryPlan>>>> plansByQuantifierOrdering = plansByQuantifier.stream().map(plansForQuantifier -> {
final Map<Ordering, ImmutableList<RecordQueryPlan>> groupedBySortedness = plansForQuantifier.stream().flatMap(plan -> {
final Optional<Ordering> orderingForLegOptional = OrderingProperty.evaluate(plan, context);
return orderingForLegOptional.stream().map(ordering -> Pair.of(ordering, plan));
}).collect(Collectors.groupingBy(Pair::getLeft, Collectors.mapping(Pair::getRight, ImmutableList.toImmutableList())));
return groupedBySortedness.entrySet();
}).collect(ImmutableList.toImmutableList());
for (final List<Map.Entry<Ordering, ImmutableList<RecordQueryPlan>>> entries : CrossProduct.crossProduct(plansByQuantifierOrdering)) {
final ImmutableList<Optional<Ordering>> orderingOptionals = entries.stream().map(entry -> Optional.of(entry.getKey())).collect(ImmutableList.toImmutableList());
for (final RequestedOrdering requestedOrdering : requestedOrderings) {
final Optional<Ordering> combinedOrderingOptional = OrderingProperty.deriveForUnionFromOrderings(orderingOptionals, requestedOrdering, Ordering::intersectEqualityBoundKeys);
pushInterestingOrders(call, unionForEachQuantifier, orderingOptionals, requestedOrdering);
if (combinedOrderingOptional.isEmpty()) {
//
continue;
}
final Ordering ordering = combinedOrderingOptional.get();
final Set<KeyExpression> equalityBoundKeys = ordering.getEqualityBoundKeys();
final List<KeyPart> orderingKeyParts = ordering.getOrderingKeyParts();
final List<KeyExpression> orderingKeys = orderingKeyParts.stream().map(KeyPart::getNormalizedKeyExpression).collect(ImmutableList.toImmutableList());
// make sure the common primary key parts are either bound through equality or they are part of the ordering
if (!isPrimaryKeyCompatibleWithOrdering(commonPrimaryKeyParts, orderingKeys, equalityBoundKeys)) {
continue;
}
//
// At this point we know we can implement the distinct union over the partitions of compatibly ordered plans
//
final KeyExpression comparisonKey = orderingKeys.size() == 1 ? Iterables.getOnlyElement(orderingKeys) : Key.Expressions.concat(orderingKeys);
//
// create new references
//
final ImmutableList<Quantifier.Physical> newQuantifiers = entries.stream().map(Map.Entry::getValue).map(GroupExpressionRef::from).map(Quantifier::physical).collect(ImmutableList.toImmutableList());
call.yield(call.ref(RecordQueryUnionPlan.fromQuantifiers(newQuantifiers, comparisonKey, true)));
}
}
}
use of com.apple.foundationdb.record.metadata.Key in project fdb-record-layer by FoundationDB.
the class AbstractDataAccessRule method onMatch.
/**
* Method that does the leg work to create the appropriate expression dag for data access using value indexes or
* value index-like scans (primary scans).
*
* Conceptually we do the following work:
*
* <ul>
* <li> This method yields a scan plan for each matching primary candidate ({@link PrimaryScanMatchCandidate}).
* There is only ever going to be exactly one {@link PrimaryScanMatchCandidate} for a primary key. Due to the
* candidate being solely based upon a primary key, the match structure is somewhat limited. In essence, there
* is an implicit guarantee that we can always create a primary scan for a data source.
* </li>
* <li> This method yields an index scan plan for each matching value index candidate
* ({@link ValueIndexScanMatchCandidate}).
* </li>
* <li> This method yields the combinatorial expansion of intersections of distinct-ed index scan plans.
* </li>
* </ul>
*
* The work described above is semantically correct in a sense that it creates a search space that can be explored
* and pruned in suitable ways that will eventually converge into an optimal data access plan.
*
* We can choose to create an index scan for every index that is available regardless what the coverage
* of an index is. The coverage of an index is a measurement that tells us how well an index can answer what a
* filter (or by extension a query) asks for. For instance, a high number of search arguments used in the index scan
* can be associated with high coverage (as in the index scan covers more of the query) and vice versa.
*
* Similarly, we can choose to create the intersection of all possible combinations of suitable scans over indexes
* (that we have matches for). Since we create a logical intersection of these access plans we can leave it up to
* the respective implementation rules (e.g., {@link ImplementIntersectionRule}) to do the right thing and implement
* the physical plan for the intersection if possible (e.g. ensuring compatibly ordered legs, etc.).
*
* In fact, the two before-mentioned approaches are completely valid with respect to correctness of the plan and
* the guaranteed creation of the optimal plan. However, in reality using this approach, although valid and probably
* the conceptually better and more orthogonal approach, will result in a ballooning of the search space very quickly.
* While that may be acceptable for group-by engines and only few index access paths, in an OLTP world where there
* are potentially dozens of indexes, memory footprint and the sheer number of tasks that would be created for
* subsequent exploration and implementation of all these alternatives make the purist approach to planning these
* indexes infeasible.
*
* Thus we would like to eliminate unnecessary exploration by avoiding variations we know can never be successful
* either in creating a successful executable plan (e.g. logical expression may not ever be able to produce a
* compatible ordering) or cannot ever create an optimal plan. In a nutshell, we try to utilize additional
* information that is available in addition to the matching partition in order to make decisions about which
* expression variation to create and which to avoid:
*
* <ul>
* <li> For a matching primary scan candidate ({@link PrimaryScanMatchCandidate})
* we will not create a primary scan if the scan is incompatible with an interesting order that has been
* communicated downwards in the graph.
* </li>
* <li> For a matching index scan candidate ({@link ValueIndexScanMatchCandidate})
* we will not create an index scan if the scan is incompatible with an interesting order that has been
* communicated downwards in the graph.
* </li>
* <li> We will only create a scan if there is no other index scan with a greater coverage (think of coverage
* as the assumed amount of filtering or currently the number of bound predicates) for the search arguments
* which are bound by the query.
* For instance, an index scan {@code INDEX SCAN(i1, a = [5, 5], b = [10, 10])} is still planned along
* {@code INDEX SCAN(i2, x = ["hello", "hello"], y = ["world", "world"], z = [10, inf])} even though
* the latter utilizes three search arguments while the former one only uses two. However, an index scan
* {@code INDEX SCAN(i1, a = [5, 5], b = [10, 10])} is not created (and yielded) if there we also
* have a choice to plan {@code INDEX SCAN(i2, b = [10, 10], a = [5, 5], c = ["Guten", "Morgen"])} as that
* index {@code i2} has a higher coverage compared to {@code i1} <em>and</em> all bound arguments in the scan
* over {@code i2} are also bound in the scan over {@code i1}.
* <li>
* We will only create intersections of scans if we can already establish that the logical intersection
* can be implemented by a {@link com.apple.foundationdb.record.query.plan.plans.RecordQueryIntersectionPlan}.
* That requires that the legs of the intersection are compatibly ordered <em>and</em> that that ordering follows
* a potentially required ordering.
* </li>
* </ul>
*
* @param call the call associated with this planner rule execution
*/
@Override
@SuppressWarnings("java:S135")
public void onMatch(@Nonnull PlannerRuleCall call) {
final PlannerBindings bindings = call.getBindings();
final List<? extends PartialMatch> completeMatches = bindings.getAll(getCompleteMatchMatcher());
final R expression = bindings.get(getExpressionMatcher());
//
if (completeMatches.isEmpty()) {
return;
}
//
// return if there is no pre-determined interesting ordering
//
final Optional<Set<RequestedOrdering>> requestedOrderingsOptional = call.getInterestingProperty(OrderingAttribute.ORDERING);
if (requestedOrderingsOptional.isEmpty()) {
return;
}
final Set<RequestedOrdering> requestedOrderings = requestedOrderingsOptional.get();
//
// group matches by candidates
//
final LinkedHashMap<MatchCandidate, ? extends ImmutableList<? extends PartialMatch>> completeMatchMap = completeMatches.stream().collect(Collectors.groupingBy(PartialMatch::getMatchCandidate, LinkedHashMap::new, ImmutableList.toImmutableList()));
// find the best match for a candidate as there may be more than one due to partial matching
final ImmutableSet<PartialMatch> maximumCoverageMatchPerCandidate = completeMatchMap.entrySet().stream().flatMap(entry -> {
final List<? extends PartialMatch> completeMatchesForCandidate = entry.getValue();
final Optional<? extends PartialMatch> bestMatchForCandidateOptional = completeMatchesForCandidate.stream().max(Comparator.comparing(PartialMatch::getNumBoundParameterPrefix));
return bestMatchForCandidateOptional.map(Stream::of).orElse(Stream.empty());
}).collect(ImmutableSet.toImmutableSet());
final List<PartialMatch> bestMaximumCoverageMatches = maximumCoverageMatches(maximumCoverageMatchPerCandidate, requestedOrderings);
if (bestMaximumCoverageMatches.isEmpty()) {
return;
}
// create scans for all best matches
final Map<PartialMatch, RelationalExpression> bestMatchToExpressionMap = createScansForMatches(bestMaximumCoverageMatches);
final ExpressionRef<RelationalExpression> toBeInjectedReference = GroupExpressionRef.empty();
// create single scan accesses
for (final PartialMatch bestMatch : bestMaximumCoverageMatches) {
final RelationalExpression dataAccessAndCompensationExpression = compensateSingleDataAccess(bestMatch, bestMatchToExpressionMap.get(bestMatch));
toBeInjectedReference.insert(dataAccessAndCompensationExpression);
}
final Map<PartialMatch, RelationalExpression> bestMatchToDistinctExpressionMap = distinctMatchToScanMap(bestMatchToExpressionMap);
@Nullable final KeyExpression commonPrimaryKey = call.getContext().getCommonPrimaryKey();
if (commonPrimaryKey != null) {
final var commonPrimaryKeyParts = commonPrimaryKey.normalizeKeyForPositions();
final var boundPartitions = Lists.<List<PartialMatch>>newArrayList();
// create intersections for all n choose k partitions from k = 2 .. n
IntStream.range(2, bestMaximumCoverageMatches.size() + 1).mapToObj(k -> ChooseK.chooseK(bestMaximumCoverageMatches, k)).flatMap(iterable -> StreamSupport.stream(iterable.spliterator(), false)).forEach(boundPartitions::add);
boundPartitions.stream().flatMap(partition -> createIntersectionAndCompensation(commonPrimaryKeyParts, bestMatchToDistinctExpressionMap, partition, requestedOrderings).stream()).forEach(toBeInjectedReference::insert);
}
call.yield(inject(expression, completeMatches, toBeInjectedReference));
}
use of com.apple.foundationdb.record.metadata.Key in project fdb-record-layer by FoundationDB.
the class AbstractDataAccessRule method createIntersectionAndCompensation.
/**
* Private helper method to plan an intersection and subsequently compensate it using the partial match structures
* kept for all participating data accesses.
* Planning the data access and its compensation for a given match is a two-step approach as we compute
* the compensation for intersections by intersecting the {@link Compensation} for the single data accesses first
* before using the resulting {@link Compensation} to compute the compensating expression for the entire
* intersection.
* @param commonPrimaryKeyParts normalized common primary key
* @param matchToExpressionMap a map from match to single data access expression
* @param partition a partition (i.e. a list of {@link PartialMatch}es that the caller would like to compute
* and intersected data access for
* @param requestedOrderings a set of ordering that have been requested by consuming expressions/plan operators
* @return a optional containing a new {@link RelationalExpression} that represents the data access and its
* compensation, {@code Optional.empty()} if this method was unable to compute the intersection expression
*/
@Nonnull
private static List<RelationalExpression> createIntersectionAndCompensation(@Nonnull final List<KeyExpression> commonPrimaryKeyParts, @Nonnull final Map<PartialMatch, RelationalExpression> matchToExpressionMap, @Nonnull final List<PartialMatch> partition, @Nonnull final Set<RequestedOrdering> requestedOrderings) {
final var expressionsBuilder = ImmutableList.<RelationalExpression>builder();
final var orderingPartialOrder = intersectionOrdering(partition);
final ImmutableSet<BoundKeyPart> equalityBoundKeyParts = partition.stream().map(partialMatch -> partialMatch.getMatchInfo().getBoundKeyParts()).flatMap(boundOrderingKeyParts -> boundOrderingKeyParts.stream().filter(boundOrderingKey -> boundOrderingKey.getComparisonRangeType() == ComparisonRange.Type.EQUALITY)).collect(ImmutableSet.toImmutableSet());
for (final var requestedOrdering : requestedOrderings) {
final var satisfyingOrderingPartsOptional = Ordering.satisfiesKeyPartsOrdering(orderingPartialOrder, requestedOrdering.getOrderingKeyParts(), BoundKeyPart::getKeyPart);
final var comparisonKeyOptional = satisfyingOrderingPartsOptional.map(parts -> parts.stream().filter(part -> !equalityBoundKeyParts.contains(part)).collect(ImmutableList.toImmutableList())).flatMap(parts -> comparisonKey(commonPrimaryKeyParts, equalityBoundKeyParts, parts));
if (comparisonKeyOptional.isEmpty()) {
continue;
}
final KeyExpression comparisonKey = comparisonKeyOptional.get();
final var compensation = partition.stream().map(partialMatch -> partialMatch.compensate(partialMatch.getBoundParameterPrefixMap())).reduce(Compensation.impossibleCompensation(), Compensation::intersect);
final ImmutableList<RelationalExpression> scans = partition.stream().map(partialMatch -> Objects.requireNonNull(matchToExpressionMap.get(partialMatch))).collect(ImmutableList.toImmutableList());
final var logicalIntersectionExpression = LogicalIntersectionExpression.from(scans, comparisonKey);
final var compensatedIntersection = compensation.isNeeded() ? compensation.apply(GroupExpressionRef.of(logicalIntersectionExpression)) : logicalIntersectionExpression;
expressionsBuilder.add(compensatedIntersection);
}
return expressionsBuilder.build();
}
Aggregations