use of com.apple.foundationdb.record.metadata.RecordType in project fdb-record-layer by FoundationDB.
the class LuceneIndexTest method searchForAutoCompleteWithTextSizeLimit.
void searchForAutoCompleteWithTextSizeLimit(int limit, boolean matches) throws Exception {
try (FDBRecordContext context = openContext(RecordLayerPropertyStorage.newBuilder().addProp(LuceneRecordContextProperties.LUCENE_AUTO_COMPLETE_TEXT_SIZE_UPPER_LIMIT, limit))) {
final RecordType recordType = addIndexAndSaveRecordForAutoComplete(context, false);
List<IndexEntry> results = recordStore.scanIndex(SIMPLE_TEXT_WITH_AUTO_COMPLETE, IndexScanType.BY_LUCENE_AUTO_COMPLETE, TupleRange.allOf(Tuple.from("software engineer")), null, ScanProperties.FORWARD_SCAN).asList().get();
if (!matches) {
// Assert no suggestions
assertTrue(results.isEmpty());
return;
}
// Assert the count of suggestions
assertEquals(1, results.size());
// Assert the suggestions' keys
List<String> suggestions = results.stream().map(i -> (String) i.getKey().get(i.getKeySize() - 1)).collect(Collectors.toList());
assertEquals(ImmutableList.of(ENGINEER_JOKE), suggestions);
// Assert the corresponding field for the suggestions
List<String> fields = results.stream().map(i -> (String) i.getKey().get(i.getKeySize() - 2)).collect(Collectors.toList());
assertEquals(ImmutableList.of("text"), fields);
results.stream().forEach(i -> assertDocumentPartialRecordFromIndexEntry(recordType, i, (String) i.getKey().get(i.getKeySize() - 1), (String) i.getKey().get(i.getKeySize() - 2), IndexScanType.BY_LUCENE_AUTO_COMPLETE));
assertEquals(1, context.getTimer().getCounter(FDBStoreTimer.Counts.LUCENE_SCAN_MATCHED_AUTO_COMPLETE_SUGGESTIONS).getCount());
assertEntriesAndSegmentInfoStoredInCompoundFile(AutoCompleteSuggesterCommitCheckAsync.getSuggestionIndexSubspace(recordStore.indexSubspace(SIMPLE_TEXT_WITH_AUTO_COMPLETE), TupleHelpers.EMPTY), context, "_0.cfs", true);
}
}
use of com.apple.foundationdb.record.metadata.RecordType in project fdb-record-layer by FoundationDB.
the class LuceneIndexQueryPlan method executePlan.
/**
* Override here to have specific logic to build the {@link QueryResult} for lucene auto complete suggestion result.
*/
@Nonnull
@Override
public <M extends Message> RecordCursor<QueryResult> executePlan(@Nonnull FDBRecordStoreBase<M> store, @Nonnull EvaluationContext context, @Nullable byte[] continuation, @Nonnull ExecuteProperties executeProperties) {
final RecordMetaData metaData = store.getRecordMetaData();
final Index index = metaData.getIndex(indexName);
final Collection<RecordType> recordTypes = metaData.recordTypesForIndex(index);
if (recordTypes.size() != 1) {
throw new RecordCoreException("No lucene index should span multiple record types");
}
final IndexScanType scanType = getScanType();
if (scanType == IndexScanType.BY_LUCENE_AUTO_COMPLETE || scanType == IndexScanType.BY_LUCENE_SPELLCHECK) {
final RecordType recordType = recordTypes.iterator().next();
final RecordCursor<IndexEntry> entryRecordCursor = executeEntries(store, context, continuation, executeProperties);
return entryRecordCursor.map(QueryPlanUtils.getCoveringIndexEntryToPartialRecordFunction(store, recordType.getName(), indexName, getToPartialRecord(index, recordType, scanType), scanType)).map(QueryResult::of);
}
return super.executePlan(store, context, continuation, executeProperties);
}
use of com.apple.foundationdb.record.metadata.RecordType in project fdb-record-layer by FoundationDB.
the class LuceneIndexValidator method validate.
@Override
public void validate(@Nonnull MetaDataValidator metaDataValidator) {
super.validate(metaDataValidator);
validateNotVersion();
for (RecordType recordType : metaDataValidator.getRecordMetaData().recordTypesForIndex(index)) {
LuceneIndexExpressions.validate(index.getRootExpression(), recordType.getDescriptor());
}
}
use of com.apple.foundationdb.record.metadata.RecordType in project fdb-record-layer by FoundationDB.
the class RecordMetaDataBuilder method build.
/**
* Build and validate meta-data with specific index registry.
* @param validate {@code true} to validate the new meta-data
* @return new meta-data
*/
@Nonnull
public RecordMetaData build(boolean validate) {
Map<String, RecordType> builtRecordTypes = Maps.newHashMapWithExpectedSize(recordTypes.size());
Map<String, SyntheticRecordType<?>> builtSyntheticRecordTypes = Maps.newHashMapWithExpectedSize(syntheticRecordTypes.size());
RecordMetaData metaData = new RecordMetaData(recordsDescriptor, getUnionDescriptor(), unionFields, builtRecordTypes, builtSyntheticRecordTypes, indexes, universalIndexes, formerIndexes, splitLongRecords, storeRecordVersions, version, subspaceKeyCounter, usesSubspaceKeyCounter, recordCountKey, localFileDescriptor != null);
for (RecordTypeBuilder recordTypeBuilder : recordTypes.values()) {
KeyExpression primaryKey = recordTypeBuilder.getPrimaryKey();
if (primaryKey != null) {
builtRecordTypes.put(recordTypeBuilder.getName(), recordTypeBuilder.build(metaData));
for (Index index : recordTypeBuilder.getIndexes()) {
index.setPrimaryKeyComponentPositions(buildPrimaryKeyComponentPositions(index.getRootExpression(), primaryKey));
}
} else {
throw new MetaDataException("Record type " + recordTypeBuilder.getName() + " must have a primary key");
}
}
if (!syntheticRecordTypes.isEmpty()) {
DescriptorProtos.FileDescriptorProto.Builder fileBuilder = DescriptorProtos.FileDescriptorProto.newBuilder();
fileBuilder.setName("_synthetic");
fileBuilder.addDependency(unionDescriptor.getFile().getName());
syntheticRecordTypes.values().forEach(recordTypeBuilder -> recordTypeBuilder.buildDescriptor(fileBuilder));
final Descriptors.FileDescriptor fileDescriptor;
try {
final Descriptors.FileDescriptor[] dependencies = new Descriptors.FileDescriptor[] { unionDescriptor.getFile() };
fileDescriptor = Descriptors.FileDescriptor.buildFrom(fileBuilder.build(), dependencies);
} catch (Descriptors.DescriptorValidationException ex) {
throw new MetaDataException("Could not build synthesized file descriptor", ex);
}
for (SyntheticRecordTypeBuilder<?> recordTypeBuilder : syntheticRecordTypes.values()) {
builtSyntheticRecordTypes.put(recordTypeBuilder.getName(), recordTypeBuilder.build(metaData, fileDescriptor));
}
}
if (validate) {
final MetaDataValidator validator = new MetaDataValidator(metaData, indexMaintainerRegistry);
validator.validate();
}
return metaData;
}
use of com.apple.foundationdb.record.metadata.RecordType in project fdb-record-layer by FoundationDB.
the class FDBRecordStore method repairRecordKeyIfNecessary.
private void repairRecordKeyIfNecessary(@Nonnull FDBRecordContext context, @Nonnull Subspace recordSubspace, @Nonnull KeyValue keyValue, final boolean isDryRun) {
final RecordMetaData metaData = metaDataProvider.getRecordMetaData();
final Tuple recordKey = recordSubspace.unpack(keyValue.getKey());
// Ignore version key
if (metaData.isStoreRecordVersions() && isMaybeVersion(recordKey)) {
return;
}
final Message protoRecord = serializer.deserialize(metaData, recordKey, keyValue.getValue(), getTimer());
final RecordType recordType = metaData.getRecordTypeForDescriptor(protoRecord.getDescriptorForType());
final KeyExpression primaryKeyExpression = recordType.getPrimaryKey();
if (recordKey.size() == primaryKeyExpression.getColumnSize()) {
context.increment(FDBStoreTimer.Counts.REPAIR_RECORD_KEY);
final Tuple newPrimaryKey = recordKey.add(SplitHelper.UNSPLIT_RECORD);
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(KeyValueLogMessage.of("Repairing primary key", LogMessageKeys.RECORD_TYPE, recordType.getName(), subspaceProvider.logKey(), subspaceProvider.toString(context), "dry_run", isDryRun, "orig_primary_key", recordKey, "new_primary_key", newPrimaryKey));
}
if (!isDryRun) {
final Transaction tr = context.ensureActive();
tr.clear(keyValue.getKey());
tr.set(recordSubspace.pack(newPrimaryKey), keyValue.getValue());
}
} else if (recordKey.size() == primaryKeyExpression.getColumnSize() + 1) {
Object suffix = recordKey.get(recordKey.size() - 1);
if (!(suffix instanceof Long) || !(((Long) suffix) == SplitHelper.UNSPLIT_RECORD)) {
context.increment(FDBStoreTimer.Counts.INVALID_SPLIT_SUFFIX);
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(KeyValueLogMessage.of("Invalid split suffix", subspaceProvider.logKey(), subspaceProvider.toString(context), LogMessageKeys.RECORD_TYPE, recordType.getName(), LogMessageKeys.PRIMARY_KEY, recordKey));
}
}
} else {
context.increment(FDBStoreTimer.Counts.INVALID_KEY_LENGTH);
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(KeyValueLogMessage.of("Invalid key length", subspaceProvider.logKey(), subspaceProvider.toString(context), LogMessageKeys.RECORD_TYPE, recordType.getName(), LogMessageKeys.PRIMARY_KEY, recordKey));
}
}
}
Aggregations