use of com.apple.foundationdb.record.ScanProperties in project fdb-record-layer by FoundationDB.
the class TextScan method scan.
@Nonnull
// try-with-resources - the two cursors returned cannot be closed because they are wrapped and returned
@SuppressWarnings("squid:S2095")
private <M extends Message> RecordCursor<IndexEntry> scan(@Nonnull FDBRecordStoreBase<M> store, @Nonnull EvaluationContext context, @Nullable Tuple prefix, @Nullable TupleRange suffix, @Nonnull Index index, @Nonnull List<String> tokenList, @Nullable byte[] continuation, @Nonnull ScanProperties scanProperties) {
if (tokenList.isEmpty()) {
return RecordCursor.empty();
}
final int prefixEntries = 1 + (prefix != null ? prefix.size() : 0);
final Comparisons.Type comparisonType = textComparison.getType();
if (comparisonType.equals(Comparisons.Type.TEXT_CONTAINS_PREFIX) || (tokenList.size() == 1 && (comparisonType.equals(Comparisons.Type.TEXT_CONTAINS_ALL_PREFIXES) || comparisonType.equals(Comparisons.Type.TEXT_CONTAINS_ANY_PREFIX)))) {
if (tokenList.size() != 1) {
throw new RecordCoreException("text prefix comparison included " + tokenList.size() + " comparands instead of one");
}
return scanTokenPrefix(store, tokenList.get(0), prefix, suffix, index, scanProperties).apply(continuation);
} else if (tokenList.size() == 1) {
// is necessary, not just nice to have.
return scanToken(store, tokenList.get(0), prefix, suffix, index, scanProperties).apply(continuation);
} else if (comparisonType.equals(Comparisons.Type.TEXT_CONTAINS_ALL)) {
// Take the intersection of all children. Note that to handle skip and the returned row limit correctly,
// the skip and limit are both removed and then applied later.
final ScanProperties childScanProperties = scanProperties.with(ExecuteProperties::clearSkipAndLimit);
List<Function<byte[], RecordCursor<IndexEntry>>> intersectionChildren = tokenList.stream().map(token -> scanToken(store, token, prefix, suffix, index, childScanProperties)).collect(Collectors.toList());
return IntersectionCursor.create(suffixComparisonKeyFunction(prefixEntries), scanProperties.isReverse(), intersectionChildren, continuation, store.getTimer()).skip(scanProperties.getExecuteProperties().getSkip()).limitRowsTo(scanProperties.getExecuteProperties().getReturnedRowLimit());
} else if (comparisonType.equals(Comparisons.Type.TEXT_CONTAINS_ALL_PREFIXES)) {
final Comparisons.TextContainsAllPrefixesComparison allPrefixesComparison = (Comparisons.TextContainsAllPrefixesComparison) textComparison;
final ScanProperties childScanProperties = scanProperties.with(ExecuteProperties::clearSkipAndLimit);
List<Function<byte[], RecordCursor<IndexEntry>>> intersectionChildren = tokenList.stream().map(token -> scanTokenPrefix(store, token, prefix, suffix, index, childScanProperties)).collect(Collectors.toList());
return ProbableIntersectionCursor.create(suffixComparisonKeyFunction(prefixEntries), intersectionChildren, allPrefixesComparison.getExpectedRecords(), allPrefixesComparison.getFalsePositivePercentage(), continuation, store.getTimer()).skip(scanProperties.getExecuteProperties().getSkip()).limitRowsTo(scanProperties.getExecuteProperties().getReturnedRowLimit());
} else if (comparisonType.equals(Comparisons.Type.TEXT_CONTAINS_ANY)) {
// Take the union of all children. Note that to handle skip and the returned row limit correctly,
// the skip is removed from the children and applied to the returned cursor. Also, the limit
// is adjusted upwards and then must be applied again to returned union.
final ScanProperties childScanProperties = scanProperties.with(ExecuteProperties::clearSkipAndAdjustLimit);
List<Function<byte[], RecordCursor<IndexEntry>>> unionChildren = tokenList.stream().map(token -> scanToken(store, token, prefix, suffix, index, childScanProperties)).collect(Collectors.toList());
return UnionCursor.create(suffixComparisonKeyFunction(prefixEntries), scanProperties.isReverse(), unionChildren, continuation, store.getTimer()).skip(scanProperties.getExecuteProperties().getSkip()).limitRowsTo(scanProperties.getExecuteProperties().getReturnedRowLimit());
} else if (comparisonType.equals(Comparisons.Type.TEXT_CONTAINS_ANY_PREFIX)) {
final ScanProperties childScanProperties = scanProperties.with(ExecuteProperties::clearSkipAndAdjustLimit);
List<Function<byte[], RecordCursor<IndexEntry>>> unionChildren = tokenList.stream().map(token -> scanTokenPrefix(store, token, prefix, suffix, index, childScanProperties)).collect(Collectors.toList());
return UnorderedUnionCursor.create(unionChildren, continuation, store.getTimer()).skip(scanProperties.getExecuteProperties().getSkip()).limitRowsTo(scanProperties.getExecuteProperties().getReturnedRowLimit());
} else {
// Apply the filter based on the position lists
final Function<List<IndexEntry>, Boolean> predicate;
if (comparisonType.equals(Comparisons.Type.TEXT_CONTAINS_ALL_WITHIN) && textComparison instanceof Comparisons.TextWithMaxDistanceComparison) {
int maxDistance = ((Comparisons.TextWithMaxDistanceComparison) textComparison).getMaxDistance();
predicate = entries -> entriesContainAllWithin(entries, maxDistance);
} else if (comparisonType.equals(Comparisons.Type.TEXT_CONTAINS_PHRASE)) {
List<String> tokensWithStopWords = getTokenList(store, context, false);
predicate = entries -> entriesContainPhrase(entries, tokensWithStopWords);
} else {
throw new RecordCoreException("unsupported comparison type for text query: " + comparisonType);
}
// It's either TEXT_CONTAINS_ALL_WITHIN_DISTANCE or TEXT_CONTAINS_PHRASE. In any case, we need to scan
// all tokens, intersect, and then apply a filter on the returned list.
final ScanProperties childScanProperties = scanProperties.with(ExecuteProperties::clearSkipAndLimit);
List<Function<byte[], RecordCursor<IndexEntry>>> intersectionChildren = tokenList.stream().map(token -> scanToken(store, token, prefix, suffix, index, childScanProperties)).collect(Collectors.toList());
final RecordCursor<List<IndexEntry>> intersectionCursor = IntersectionMultiCursor.create(suffixComparisonKeyFunction(prefixEntries), scanProperties.isReverse(), intersectionChildren, continuation, store.getTimer());
return intersectionCursor.filterInstrumented(predicate, store.getTimer(), inCounts, duringEvents, successCounts, failureCounts).map(indexEntries -> indexEntries.get(0)).skip(scanProperties.getExecuteProperties().getSkip()).limitRowsTo(scanProperties.getExecuteProperties().getReturnedRowLimit());
}
}
use of com.apple.foundationdb.record.ScanProperties in project fdb-record-layer by FoundationDB.
the class StandardIndexMaintainer method validateMissingEntries.
/**
* Validate entries in the index. It scans the records and checks if the index entries associated with each record
* exist. Note that it may not work for indexes on synthetic record types (e.g., join indexes).
* @param continuation any continuation from a previous validation invocation
* @param scanProperties skip, limit and other properties of the validation
* @return a cursor over records that have no associated index entries including the reason
*/
@Nonnull
protected RecordCursor<InvalidIndexEntry> validateMissingEntries(@Nullable byte[] continuation, @Nonnull ScanProperties scanProperties) {
final Collection<RecordType> recordTypes = state.store.getRecordMetaData().recordTypesForIndex(state.index);
final FDBRecordStoreBase.PipelineSizer pipelineSizer = state.store.getPipelineSizer();
return RecordCursor.flatMapPipelined(cont -> state.store.scanRecords(TupleRange.ALL, cont, scanProperties).filter(rec -> recordTypes.contains(rec.getRecordType())), (record, cont) -> {
List<IndexEntry> filteredIndexEntries = filteredIndexEntries(record);
return RecordCursor.fromList(filteredIndexEntries == null ? Collections.emptyList() : filteredIndexEntries.stream().map(indexEntryWithoutPrimaryKey -> new IndexEntry(indexEntryWithoutPrimaryKey.getIndex(), indexEntryKey(indexEntryWithoutPrimaryKey.getKey(), record.getPrimaryKey()), indexEntryWithoutPrimaryKey.getValue())).map(indexEntry -> Pair.of(indexEntry, record)).collect(Collectors.toList()), cont);
}, continuation, pipelineSizer.getPipelineSize(PipelineOperation.RECORD_FUNCTION)).filterAsync(indexEntryRecordPair -> {
final byte[] keyBytes = state.indexSubspace.pack(indexEntryRecordPair.getLeft().getKey());
return state.transaction.get(keyBytes).thenApply(Objects::isNull);
}, pipelineSizer.getPipelineSize(PipelineOperation.INDEX_ASYNC_FILTER)).map(indexEntryKeyRecordPair -> InvalidIndexEntry.newMissing(indexEntryKeyRecordPair.getLeft(), indexEntryKeyRecordPair.getRight()));
}
use of com.apple.foundationdb.record.ScanProperties in project fdb-record-layer by FoundationDB.
the class AtomicMutationIndexMaintainer method evaluateAggregateFunction.
@Override
@Nonnull
public CompletableFuture<Tuple> evaluateAggregateFunction(@Nonnull IndexAggregateFunction function, @Nonnull TupleRange range, @Nonnull IsolationLevel isolationveLevel) {
if (!matchesAggregateFunction(function)) {
throw new MetaDataException("this index does not support aggregate function: " + function);
}
final RecordCursor<IndexEntry> cursor = scan(IndexScanType.BY_GROUP, range, null, new ScanProperties(ExecuteProperties.newBuilder().setIsolationLevel(isolationveLevel).build()));
final BiFunction<Tuple, Tuple, Tuple> aggregator = mutation.getAggregator();
return cursor.reduce(mutation.getIdentity(), (accum, kv) -> aggregator.apply(accum, kv.getValue()));
}
use of com.apple.foundationdb.record.ScanProperties in project fdb-record-layer by FoundationDB.
the class ResolverMappingReplicator method copyInternal.
private CompletableFuture<Void> copyInternal(@Nonnull final LocatableResolver replica, @Nonnull final LongAccumulator accumulator, @Nonnull final AtomicInteger counter) {
ExecuteProperties executeProperties = ExecuteProperties.newBuilder().setReturnedRowLimit(transactionRowLimit).setTimeLimit(transactionTimeLimitMillis).setIsolationLevel(IsolationLevel.SNAPSHOT).build();
final AtomicReference<byte[]> continuation = new AtomicReference<>(null);
return AsyncUtil.whileTrue(() -> {
final FDBRecordContext context = runner.openContext();
return primary.getMappingSubspaceAsync().thenCompose(primaryMappingSubspace -> {
RecordCursor<KeyValue> cursor = KeyValueCursor.Builder.withSubspace(primaryMappingSubspace).setScanProperties(new ScanProperties(executeProperties)).setContext(context).setContinuation(continuation.get()).build();
return cursor.forEachResultAsync(result -> {
KeyValue kv = result.get();
final String mappedString = primaryMappingSubspace.unpack(kv.getKey()).getString(0);
final ResolverResult mappedValue = valueDeserializer.apply(kv.getValue());
accumulator.accumulate(mappedValue.getValue());
counter.incrementAndGet();
return replica.setMapping(context, mappedString, mappedValue);
}).thenCompose(lastResult -> context.commitAsync().thenRun(() -> {
byte[] nextContinuationBytes = lastResult.getContinuation().toBytes();
if (LOGGER.isInfoEnabled()) {
LOGGER.info(KeyValueLogMessage.of("committing batch", LogMessageKeys.SCANNED_SO_FAR, counter.get(), LogMessageKeys.NEXT_CONTINUATION, ByteArrayUtil2.loggable(nextContinuationBytes)));
}
continuation.set(nextContinuationBytes);
})).whenComplete((vignore, eignore) -> cursor.close()).thenApply(vignore -> Objects.nonNull(continuation.get()));
});
}, runner.getExecutor());
}
Aggregations