use of com.apple.foundationdb.record.ExecuteProperties in project fdb-record-layer by FoundationDB.
the class IndexingScrubDangling method scrubIndexRangeOnly.
@Nonnull
private CompletableFuture<Boolean> scrubIndexRangeOnly(@Nonnull FDBRecordStore store, byte[] startBytes, byte[] endBytes, @Nonnull AtomicLong recordsScanned) {
// return false when done
Index index = common.getIndex();
final RecordMetaData metaData = store.getRecordMetaData();
final RecordMetaDataProvider recordMetaDataProvider = common.getRecordStoreBuilder().getMetaDataProvider();
if (recordMetaDataProvider == null || !metaData.equals(recordMetaDataProvider.getRecordMetaData())) {
throw new MetaDataException("Store does not have the same metadata");
}
final IndexMaintainer maintainer = store.getIndexMaintainer(index);
// scrubbing only readable, VALUE, idempotence indexes (at least for now)
validateOrThrowEx(maintainer.isIdempotent(), "scrubbed index is not idempotent");
validateOrThrowEx(index.getType().equals(IndexTypes.VALUE) || scrubbingPolicy.ignoreIndexTypeCheck(), "scrubbed index is not a VALUE index");
validateOrThrowEx(store.getIndexState(index) == IndexState.READABLE, "scrubbed index is not readable");
RangeSet rangeSet = new RangeSet(indexScrubIndexRangeSubspace(store, index));
AsyncIterator<Range> ranges = rangeSet.missingRanges(store.ensureContextActive(), startBytes, endBytes).iterator();
final ExecuteProperties.Builder executeProperties = ExecuteProperties.newBuilder().setIsolationLevel(IsolationLevel.SNAPSHOT).setReturnedRowLimit(// always respectLimit in this path; +1 allows a continuation item
getLimit() + 1);
final ScanProperties scanProperties = new ScanProperties(executeProperties.build());
return ranges.onHasNext().thenCompose(hasNext -> {
if (Boolean.FALSE.equals(hasNext)) {
// Here: no more missing ranges - all done
// To avoid stale metadata, we'll keep the scrubbed-ranges indicator empty until the next scrub call.
Transaction tr = store.getContext().ensureActive();
tr.clear(indexScrubIndexRangeSubspace(store, index).range());
return AsyncUtil.READY_FALSE;
}
final Range range = ranges.next();
final Tuple rangeStart = RangeSet.isFirstKey(range.begin) ? null : Tuple.fromBytes(range.begin);
final Tuple rangeEnd = RangeSet.isFinalKey(range.end) ? null : Tuple.fromBytes(range.end);
final TupleRange tupleRange = TupleRange.between(rangeStart, rangeEnd);
RecordCursor<FDBIndexedRecord<Message>> cursor = store.scanIndexRecords(index.getName(), IndexScanType.BY_VALUE, tupleRange, null, IndexOrphanBehavior.RETURN, scanProperties);
final AtomicBoolean hasMore = new AtomicBoolean(true);
final AtomicReference<RecordCursorResult<FDBIndexedRecord<Message>>> lastResult = new AtomicReference<>(RecordCursorResult.exhausted());
final long scanLimit = scrubbingPolicy.getEntriesScanLimit();
return iterateRangeOnly(store, cursor, this::deleteIndexIfDangling, lastResult, hasMore, recordsScanned, true).thenApply(vignore -> hasMore.get() ? lastResult.get().get().getIndexEntry().getKey() : rangeEnd).thenCompose(cont -> rangeSet.insertRange(store.ensureContextActive(), packOrNull(rangeStart), packOrNull(cont), true).thenApply(ignore -> {
if (scanLimit > 0) {
scanCounter += recordsScanned.get();
if (scanLimit <= scanCounter) {
return false;
}
}
return !Objects.equals(cont, rangeEnd);
}));
});
}
use of com.apple.foundationdb.record.ExecuteProperties in project fdb-record-layer by FoundationDB.
the class TextScan method scan.
@Nonnull
// try-with-resources - the two cursors returned cannot be closed because they are wrapped and returned
@SuppressWarnings("squid:S2095")
private <M extends Message> RecordCursor<IndexEntry> scan(@Nonnull FDBRecordStoreBase<M> store, @Nonnull EvaluationContext context, @Nullable Tuple prefix, @Nullable TupleRange suffix, @Nonnull Index index, @Nonnull List<String> tokenList, @Nullable byte[] continuation, @Nonnull ScanProperties scanProperties) {
if (tokenList.isEmpty()) {
return RecordCursor.empty();
}
final int prefixEntries = 1 + (prefix != null ? prefix.size() : 0);
final Comparisons.Type comparisonType = textComparison.getType();
if (comparisonType.equals(Comparisons.Type.TEXT_CONTAINS_PREFIX) || (tokenList.size() == 1 && (comparisonType.equals(Comparisons.Type.TEXT_CONTAINS_ALL_PREFIXES) || comparisonType.equals(Comparisons.Type.TEXT_CONTAINS_ANY_PREFIX)))) {
if (tokenList.size() != 1) {
throw new RecordCoreException("text prefix comparison included " + tokenList.size() + " comparands instead of one");
}
return scanTokenPrefix(store, tokenList.get(0), prefix, suffix, index, scanProperties).apply(continuation);
} else if (tokenList.size() == 1) {
// is necessary, not just nice to have.
return scanToken(store, tokenList.get(0), prefix, suffix, index, scanProperties).apply(continuation);
} else if (comparisonType.equals(Comparisons.Type.TEXT_CONTAINS_ALL)) {
// Take the intersection of all children. Note that to handle skip and the returned row limit correctly,
// the skip and limit are both removed and then applied later.
final ScanProperties childScanProperties = scanProperties.with(ExecuteProperties::clearSkipAndLimit);
List<Function<byte[], RecordCursor<IndexEntry>>> intersectionChildren = tokenList.stream().map(token -> scanToken(store, token, prefix, suffix, index, childScanProperties)).collect(Collectors.toList());
return IntersectionCursor.create(suffixComparisonKeyFunction(prefixEntries), scanProperties.isReverse(), intersectionChildren, continuation, store.getTimer()).skip(scanProperties.getExecuteProperties().getSkip()).limitRowsTo(scanProperties.getExecuteProperties().getReturnedRowLimit());
} else if (comparisonType.equals(Comparisons.Type.TEXT_CONTAINS_ALL_PREFIXES)) {
final Comparisons.TextContainsAllPrefixesComparison allPrefixesComparison = (Comparisons.TextContainsAllPrefixesComparison) textComparison;
final ScanProperties childScanProperties = scanProperties.with(ExecuteProperties::clearSkipAndLimit);
List<Function<byte[], RecordCursor<IndexEntry>>> intersectionChildren = tokenList.stream().map(token -> scanTokenPrefix(store, token, prefix, suffix, index, childScanProperties)).collect(Collectors.toList());
return ProbableIntersectionCursor.create(suffixComparisonKeyFunction(prefixEntries), intersectionChildren, allPrefixesComparison.getExpectedRecords(), allPrefixesComparison.getFalsePositivePercentage(), continuation, store.getTimer()).skip(scanProperties.getExecuteProperties().getSkip()).limitRowsTo(scanProperties.getExecuteProperties().getReturnedRowLimit());
} else if (comparisonType.equals(Comparisons.Type.TEXT_CONTAINS_ANY)) {
// Take the union of all children. Note that to handle skip and the returned row limit correctly,
// the skip is removed from the children and applied to the returned cursor. Also, the limit
// is adjusted upwards and then must be applied again to returned union.
final ScanProperties childScanProperties = scanProperties.with(ExecuteProperties::clearSkipAndAdjustLimit);
List<Function<byte[], RecordCursor<IndexEntry>>> unionChildren = tokenList.stream().map(token -> scanToken(store, token, prefix, suffix, index, childScanProperties)).collect(Collectors.toList());
return UnionCursor.create(suffixComparisonKeyFunction(prefixEntries), scanProperties.isReverse(), unionChildren, continuation, store.getTimer()).skip(scanProperties.getExecuteProperties().getSkip()).limitRowsTo(scanProperties.getExecuteProperties().getReturnedRowLimit());
} else if (comparisonType.equals(Comparisons.Type.TEXT_CONTAINS_ANY_PREFIX)) {
final ScanProperties childScanProperties = scanProperties.with(ExecuteProperties::clearSkipAndAdjustLimit);
List<Function<byte[], RecordCursor<IndexEntry>>> unionChildren = tokenList.stream().map(token -> scanTokenPrefix(store, token, prefix, suffix, index, childScanProperties)).collect(Collectors.toList());
return UnorderedUnionCursor.create(unionChildren, continuation, store.getTimer()).skip(scanProperties.getExecuteProperties().getSkip()).limitRowsTo(scanProperties.getExecuteProperties().getReturnedRowLimit());
} else {
// Apply the filter based on the position lists
final Function<List<IndexEntry>, Boolean> predicate;
if (comparisonType.equals(Comparisons.Type.TEXT_CONTAINS_ALL_WITHIN) && textComparison instanceof Comparisons.TextWithMaxDistanceComparison) {
int maxDistance = ((Comparisons.TextWithMaxDistanceComparison) textComparison).getMaxDistance();
predicate = entries -> entriesContainAllWithin(entries, maxDistance);
} else if (comparisonType.equals(Comparisons.Type.TEXT_CONTAINS_PHRASE)) {
List<String> tokensWithStopWords = getTokenList(store, context, false);
predicate = entries -> entriesContainPhrase(entries, tokensWithStopWords);
} else {
throw new RecordCoreException("unsupported comparison type for text query: " + comparisonType);
}
// It's either TEXT_CONTAINS_ALL_WITHIN_DISTANCE or TEXT_CONTAINS_PHRASE. In any case, we need to scan
// all tokens, intersect, and then apply a filter on the returned list.
final ScanProperties childScanProperties = scanProperties.with(ExecuteProperties::clearSkipAndLimit);
List<Function<byte[], RecordCursor<IndexEntry>>> intersectionChildren = tokenList.stream().map(token -> scanToken(store, token, prefix, suffix, index, childScanProperties)).collect(Collectors.toList());
final RecordCursor<List<IndexEntry>> intersectionCursor = IntersectionMultiCursor.create(suffixComparisonKeyFunction(prefixEntries), scanProperties.isReverse(), intersectionChildren, continuation, store.getTimer());
return intersectionCursor.filterInstrumented(predicate, store.getTimer(), inCounts, duringEvents, successCounts, failureCounts).map(indexEntries -> indexEntries.get(0)).skip(scanProperties.getExecuteProperties().getSkip()).limitRowsTo(scanProperties.getExecuteProperties().getReturnedRowLimit());
}
}
use of com.apple.foundationdb.record.ExecuteProperties in project fdb-record-layer by FoundationDB.
the class ComposedBitmapIndexQueryPlan method executePlan.
@Nonnull
@Override
public <M extends Message> RecordCursor<QueryResult> executePlan(@Nonnull final FDBRecordStoreBase<M> store, @Nonnull final EvaluationContext context, @Nullable final byte[] continuation, @Nonnull final ExecuteProperties executeProperties) {
final ExecuteProperties scanExecuteProperties = executeProperties.getSkip() > 0 ? executeProperties.clearSkipAndAdjustLimit() : executeProperties;
final List<Function<byte[], RecordCursor<IndexEntry>>> cursorFunctions = indexPlans.stream().map(RecordQueryCoveringIndexPlan::getIndexPlan).map(scan -> (Function<byte[], RecordCursor<IndexEntry>>) childContinuation -> scan.executeEntries(store, context, childContinuation, scanExecuteProperties)).collect(Collectors.toList());
return ComposedBitmapIndexCursor.create(cursorFunctions, composer, continuation, store.getTimer()).filter(indexEntry -> indexEntry.getValue().get(0) != null).map(indexPlans.get(0).indexEntryToQueriedRecord(store)).map(QueryResult::of);
}
use of com.apple.foundationdb.record.ExecuteProperties in project fdb-record-layer by FoundationDB.
the class RecordQueryUnionPlanBase method executePlan.
@Nonnull
@Override
public <M extends Message> RecordCursor<QueryResult> executePlan(@Nonnull final FDBRecordStoreBase<M> store, @Nonnull final EvaluationContext context, @Nullable final byte[] continuation, @Nonnull final ExecuteProperties executeProperties) {
final ExecuteProperties childExecuteProperties;
// Can pass the limit down to all sides, since that is the most we'll take total.
if (executeProperties.getSkip() > 0) {
childExecuteProperties = executeProperties.clearSkipAndAdjustLimit();
} else {
childExecuteProperties = executeProperties;
}
final List<Function<byte[], RecordCursor<FDBQueriedRecord<M>>>> childCursorFunctions = getChildStream().map(childPlan -> (Function<byte[], RecordCursor<FDBQueriedRecord<M>>>) ((byte[] childContinuation) -> childPlan.executePlan(store, context, childContinuation, childExecuteProperties).map(result -> result.getQueriedRecord(0)))).collect(Collectors.toList());
return createUnionCursor(store, childCursorFunctions, continuation).skipThenLimit(executeProperties.getSkip(), executeProperties.getReturnedRowLimit()).map(QueryResult::of);
}
use of com.apple.foundationdb.record.ExecuteProperties in project fdb-record-layer by FoundationDB.
the class ResolverMappingReplicator method copyInternal.
private CompletableFuture<Void> copyInternal(@Nonnull final LocatableResolver replica, @Nonnull final LongAccumulator accumulator, @Nonnull final AtomicInteger counter) {
ExecuteProperties executeProperties = ExecuteProperties.newBuilder().setReturnedRowLimit(transactionRowLimit).setTimeLimit(transactionTimeLimitMillis).setIsolationLevel(IsolationLevel.SNAPSHOT).build();
final AtomicReference<byte[]> continuation = new AtomicReference<>(null);
return AsyncUtil.whileTrue(() -> {
final FDBRecordContext context = runner.openContext();
return primary.getMappingSubspaceAsync().thenCompose(primaryMappingSubspace -> {
RecordCursor<KeyValue> cursor = KeyValueCursor.Builder.withSubspace(primaryMappingSubspace).setScanProperties(new ScanProperties(executeProperties)).setContext(context).setContinuation(continuation.get()).build();
return cursor.forEachResultAsync(result -> {
KeyValue kv = result.get();
final String mappedString = primaryMappingSubspace.unpack(kv.getKey()).getString(0);
final ResolverResult mappedValue = valueDeserializer.apply(kv.getValue());
accumulator.accumulate(mappedValue.getValue());
counter.incrementAndGet();
return replica.setMapping(context, mappedString, mappedValue);
}).thenCompose(lastResult -> context.commitAsync().thenRun(() -> {
byte[] nextContinuationBytes = lastResult.getContinuation().toBytes();
if (LOGGER.isInfoEnabled()) {
LOGGER.info(KeyValueLogMessage.of("committing batch", LogMessageKeys.SCANNED_SO_FAR, counter.get(), LogMessageKeys.NEXT_CONTINUATION, ByteArrayUtil2.loggable(nextContinuationBytes)));
}
continuation.set(nextContinuationBytes);
})).whenComplete((vignore, eignore) -> cursor.close()).thenApply(vignore -> Objects.nonNull(continuation.get()));
});
}, runner.getExecutor());
}
Aggregations