use of com.apple.foundationdb.record.IndexEntry in project fdb-record-layer by FoundationDB.
the class LuceneIndexQueryPlan method executePlan.
/**
* Override here to have specific logic to build the {@link QueryResult} for lucene auto complete suggestion result.
*/
@Nonnull
@Override
public <M extends Message> RecordCursor<QueryResult> executePlan(@Nonnull FDBRecordStoreBase<M> store, @Nonnull EvaluationContext context, @Nullable byte[] continuation, @Nonnull ExecuteProperties executeProperties) {
final RecordMetaData metaData = store.getRecordMetaData();
final Index index = metaData.getIndex(indexName);
final Collection<RecordType> recordTypes = metaData.recordTypesForIndex(index);
if (recordTypes.size() != 1) {
throw new RecordCoreException("No lucene index should span multiple record types");
}
final IndexScanType scanType = getScanType();
if (scanType == IndexScanType.BY_LUCENE_AUTO_COMPLETE || scanType == IndexScanType.BY_LUCENE_SPELLCHECK) {
final RecordType recordType = recordTypes.iterator().next();
final RecordCursor<IndexEntry> entryRecordCursor = executeEntries(store, context, continuation, executeProperties);
return entryRecordCursor.map(QueryPlanUtils.getCoveringIndexEntryToPartialRecordFunction(store, recordType.getName(), indexName, getToPartialRecord(index, recordType, scanType), scanType)).map(QueryResult::of);
}
return super.executePlan(store, context, continuation, executeProperties);
}
use of com.apple.foundationdb.record.IndexEntry in project fdb-record-layer by FoundationDB.
the class LuceneRecordCursor method onNext.
@Nonnull
@Override
public CompletableFuture<RecordCursorResult<IndexEntry>> onNext() {
if (nextResult != null && !nextResult.hasNext()) {
// hasNext is false to avoid the NoNextReason changing.
return CompletableFuture.completedFuture(nextResult);
}
if (topDocs == null) {
try {
performScan();
} catch (IOException ioException) {
throw new RuntimeException(ioException);
}
}
if (topDocs.scoreDocs.length - 1 < currentPosition && limitRemaining > 0 && !exhausted) {
try {
performScan();
} catch (IOException ioException) {
throw new RuntimeException(ioException);
}
currentPosition = Math.max(currentPosition - MAX_PAGE_SIZE, 0);
}
if (limitRemaining > 0 && currentPosition < topDocs.scoreDocs.length && limitManager.tryRecordScan()) {
return CompletableFuture.supplyAsync(() -> {
try {
Document document = searcher.doc(topDocs.scoreDocs[currentPosition].doc);
searchAfter = topDocs.scoreDocs[currentPosition];
IndexableField primaryKey = document.getField(LuceneIndexMaintainer.PRIMARY_KEY_FIELD_NAME);
BytesRef pk = primaryKey.binaryValue();
if (LOGGER.isTraceEnabled()) {
LOGGER.trace("document={}", document);
LOGGER.trace("primary key read={}", Tuple.fromBytes(pk.bytes, pk.offset, pk.length));
}
if (timer != null) {
timer.increment(FDBStoreTimer.Counts.LOAD_SCAN_ENTRY);
}
if (limitRemaining != Integer.MAX_VALUE) {
limitRemaining--;
}
List<Object> setPrimaryKey = Tuple.fromBytes(pk.bytes).getItems();
List<Object> fieldValues = Lists.newArrayList(fields);
int[] keyPos = state.index.getPrimaryKeyComponentPositions();
Tuple tuple;
if (keyPos != null) {
List<Object> leftovers = Lists.newArrayList();
for (int i = 0; i < keyPos.length; i++) {
if (keyPos[i] > -1) {
fieldValues.set(keyPos[i], setPrimaryKey.get(i));
} else {
leftovers.add(setPrimaryKey.get(i));
}
}
tuple = Tuple.fromList(fieldValues).addAll(leftovers);
} else {
tuple = Tuple.fromList(fieldValues).addAll(setPrimaryKey);
}
nextResult = RecordCursorResult.withNextValue(new IndexEntry(state.index, tuple, null), continuationHelper());
currentPosition++;
return nextResult;
} catch (Exception e) {
throw new RecordCoreException("Failed to get document", "currentPosition", currentPosition, "exception", e);
}
}, executor);
} else {
// a limit was exceeded
if (limitRemaining <= 0) {
nextResult = RecordCursorResult.withoutNextValue(continuationHelper(), NoNextReason.RETURN_LIMIT_REACHED);
} else if (currentPosition >= topDocs.scoreDocs.length) {
nextResult = RecordCursorResult.withoutNextValue(continuationHelper(), NoNextReason.SOURCE_EXHAUSTED);
} else {
final Optional<NoNextReason> stoppedReason = limitManager.getStoppedReason();
if (!stoppedReason.isPresent()) {
throw new RecordCoreException("limit manager stopped LuceneRecordCursor but did not report a reason");
} else {
nextResult = RecordCursorResult.withoutNextValue(continuationHelper(), stoppedReason.get());
}
}
return CompletableFuture.completedFuture(nextResult);
}
}
use of com.apple.foundationdb.record.IndexEntry in project fdb-record-layer by FoundationDB.
the class LuceneAutoCompleteResultCursor method onNext.
@SuppressWarnings("cast")
@Nonnull
@Override
public CompletableFuture<RecordCursorResult<IndexEntry>> onNext() {
CompletableFuture<Lookup.LookupResult> lookupResult = CompletableFuture.supplyAsync(() -> {
if (lookupResults == null) {
try {
performLookup();
} catch (IOException ioException) {
throw new RecordCoreException("Exception to lookup the auto complete suggestions", ioException).addLogInfo(LogMessageKeys.QUERY, query);
}
}
return currentPosition < lookupResults.size() ? lookupResults.get(currentPosition) : null;
}, executor);
return lookupResult.thenApply(r -> {
if (r == null) {
return RecordCursorResult.exhausted();
} else {
final String suggestion = highlight ? (String) r.highlightKey : (String) r.key;
if (r.payload == null) {
throw new RecordCoreException("Empty payload of lookup result for lucene auto complete suggestion").addLogInfo(LogMessageKeys.QUERY, query).addLogInfo(LogMessageKeys.RESULT, suggestion);
}
IndexEntry indexEntry = new IndexEntry(state.index, Tuple.fromBytes(r.payload.bytes).add(suggestion), Tuple.from(r.value));
if (LOGGER.isTraceEnabled()) {
LOGGER.trace("Suggestion read as an index entry={}", indexEntry);
}
return RecordCursorResult.withNextValue(indexEntry, continuationHelper(lookupResults.get(currentPosition++)));
}
});
}
use of com.apple.foundationdb.record.IndexEntry in project fdb-record-layer by FoundationDB.
the class FDBRecordStore method scanUniquenessViolations.
@Override
@Nonnull
public RecordCursor<RecordIndexUniquenessViolation> scanUniquenessViolations(@Nonnull Index index, @Nonnull TupleRange range, @Nullable byte[] continuation, @Nonnull ScanProperties scanProperties) {
RecordCursor<IndexEntry> tupleCursor = getIndexMaintainer(index).scanUniquenessViolations(range, continuation, scanProperties);
return tupleCursor.map(entry -> {
int indexColumns = index.getColumnSize();
Tuple valueKey = TupleHelpers.subTuple(entry.getKey(), 0, indexColumns);
Tuple primaryKey = TupleHelpers.subTuple(entry.getKey(), indexColumns, entry.getKey().size());
Tuple existingKey = entry.getValue();
return new RecordIndexUniquenessViolation(index, new IndexEntry(index, valueKey, entry.getValue()), primaryKey, existingKey);
});
}
use of com.apple.foundationdb.record.IndexEntry in project fdb-record-layer by FoundationDB.
the class TextScan method scan.
@Nonnull
// try-with-resources - the two cursors returned cannot be closed because they are wrapped and returned
@SuppressWarnings("squid:S2095")
private <M extends Message> RecordCursor<IndexEntry> scan(@Nonnull FDBRecordStoreBase<M> store, @Nonnull EvaluationContext context, @Nullable Tuple prefix, @Nullable TupleRange suffix, @Nonnull Index index, @Nonnull List<String> tokenList, @Nullable byte[] continuation, @Nonnull ScanProperties scanProperties) {
if (tokenList.isEmpty()) {
return RecordCursor.empty();
}
final int prefixEntries = 1 + (prefix != null ? prefix.size() : 0);
final Comparisons.Type comparisonType = textComparison.getType();
if (comparisonType.equals(Comparisons.Type.TEXT_CONTAINS_PREFIX) || (tokenList.size() == 1 && (comparisonType.equals(Comparisons.Type.TEXT_CONTAINS_ALL_PREFIXES) || comparisonType.equals(Comparisons.Type.TEXT_CONTAINS_ANY_PREFIX)))) {
if (tokenList.size() != 1) {
throw new RecordCoreException("text prefix comparison included " + tokenList.size() + " comparands instead of one");
}
return scanTokenPrefix(store, tokenList.get(0), prefix, suffix, index, scanProperties).apply(continuation);
} else if (tokenList.size() == 1) {
// is necessary, not just nice to have.
return scanToken(store, tokenList.get(0), prefix, suffix, index, scanProperties).apply(continuation);
} else if (comparisonType.equals(Comparisons.Type.TEXT_CONTAINS_ALL)) {
// Take the intersection of all children. Note that to handle skip and the returned row limit correctly,
// the skip and limit are both removed and then applied later.
final ScanProperties childScanProperties = scanProperties.with(ExecuteProperties::clearSkipAndLimit);
List<Function<byte[], RecordCursor<IndexEntry>>> intersectionChildren = tokenList.stream().map(token -> scanToken(store, token, prefix, suffix, index, childScanProperties)).collect(Collectors.toList());
return IntersectionCursor.create(suffixComparisonKeyFunction(prefixEntries), scanProperties.isReverse(), intersectionChildren, continuation, store.getTimer()).skip(scanProperties.getExecuteProperties().getSkip()).limitRowsTo(scanProperties.getExecuteProperties().getReturnedRowLimit());
} else if (comparisonType.equals(Comparisons.Type.TEXT_CONTAINS_ALL_PREFIXES)) {
final Comparisons.TextContainsAllPrefixesComparison allPrefixesComparison = (Comparisons.TextContainsAllPrefixesComparison) textComparison;
final ScanProperties childScanProperties = scanProperties.with(ExecuteProperties::clearSkipAndLimit);
List<Function<byte[], RecordCursor<IndexEntry>>> intersectionChildren = tokenList.stream().map(token -> scanTokenPrefix(store, token, prefix, suffix, index, childScanProperties)).collect(Collectors.toList());
return ProbableIntersectionCursor.create(suffixComparisonKeyFunction(prefixEntries), intersectionChildren, allPrefixesComparison.getExpectedRecords(), allPrefixesComparison.getFalsePositivePercentage(), continuation, store.getTimer()).skip(scanProperties.getExecuteProperties().getSkip()).limitRowsTo(scanProperties.getExecuteProperties().getReturnedRowLimit());
} else if (comparisonType.equals(Comparisons.Type.TEXT_CONTAINS_ANY)) {
// Take the union of all children. Note that to handle skip and the returned row limit correctly,
// the skip is removed from the children and applied to the returned cursor. Also, the limit
// is adjusted upwards and then must be applied again to returned union.
final ScanProperties childScanProperties = scanProperties.with(ExecuteProperties::clearSkipAndAdjustLimit);
List<Function<byte[], RecordCursor<IndexEntry>>> unionChildren = tokenList.stream().map(token -> scanToken(store, token, prefix, suffix, index, childScanProperties)).collect(Collectors.toList());
return UnionCursor.create(suffixComparisonKeyFunction(prefixEntries), scanProperties.isReverse(), unionChildren, continuation, store.getTimer()).skip(scanProperties.getExecuteProperties().getSkip()).limitRowsTo(scanProperties.getExecuteProperties().getReturnedRowLimit());
} else if (comparisonType.equals(Comparisons.Type.TEXT_CONTAINS_ANY_PREFIX)) {
final ScanProperties childScanProperties = scanProperties.with(ExecuteProperties::clearSkipAndAdjustLimit);
List<Function<byte[], RecordCursor<IndexEntry>>> unionChildren = tokenList.stream().map(token -> scanTokenPrefix(store, token, prefix, suffix, index, childScanProperties)).collect(Collectors.toList());
return UnorderedUnionCursor.create(unionChildren, continuation, store.getTimer()).skip(scanProperties.getExecuteProperties().getSkip()).limitRowsTo(scanProperties.getExecuteProperties().getReturnedRowLimit());
} else {
// Apply the filter based on the position lists
final Function<List<IndexEntry>, Boolean> predicate;
if (comparisonType.equals(Comparisons.Type.TEXT_CONTAINS_ALL_WITHIN) && textComparison instanceof Comparisons.TextWithMaxDistanceComparison) {
int maxDistance = ((Comparisons.TextWithMaxDistanceComparison) textComparison).getMaxDistance();
predicate = entries -> entriesContainAllWithin(entries, maxDistance);
} else if (comparisonType.equals(Comparisons.Type.TEXT_CONTAINS_PHRASE)) {
List<String> tokensWithStopWords = getTokenList(store, context, false);
predicate = entries -> entriesContainPhrase(entries, tokensWithStopWords);
} else {
throw new RecordCoreException("unsupported comparison type for text query: " + comparisonType);
}
// It's either TEXT_CONTAINS_ALL_WITHIN_DISTANCE or TEXT_CONTAINS_PHRASE. In any case, we need to scan
// all tokens, intersect, and then apply a filter on the returned list.
final ScanProperties childScanProperties = scanProperties.with(ExecuteProperties::clearSkipAndLimit);
List<Function<byte[], RecordCursor<IndexEntry>>> intersectionChildren = tokenList.stream().map(token -> scanToken(store, token, prefix, suffix, index, childScanProperties)).collect(Collectors.toList());
final RecordCursor<List<IndexEntry>> intersectionCursor = IntersectionMultiCursor.create(suffixComparisonKeyFunction(prefixEntries), scanProperties.isReverse(), intersectionChildren, continuation, store.getTimer());
return intersectionCursor.filterInstrumented(predicate, store.getTimer(), inCounts, duringEvents, successCounts, failureCounts).map(indexEntries -> indexEntries.get(0)).skip(scanProperties.getExecuteProperties().getSkip()).limitRowsTo(scanProperties.getExecuteProperties().getReturnedRowLimit());
}
}
Aggregations