use of com.apple.foundationdb.record.metadata.Key in project fdb-record-layer by FoundationDB.
the class FDBCoveringIndexQueryTest method coveringConcatenatedFields.
/**
* Verify that if given a concatenated required-results field that a covering index is returned.
*/
@DualPlannerTest
void coveringConcatenatedFields() throws Exception {
RecordMetaDataHook hook = metaData -> metaData.addIndex("MySimpleRecord", "MySimpleRecord$2+3", concatenateFields("num_value_2", "num_value_3_indexed"));
complexQuerySetup(hook);
RecordQuery query = RecordQuery.newBuilder().setRecordType("MySimpleRecord").setFilter(Query.and(Query.field("num_value_2").greaterThan(0), Query.field("num_value_2").lessThan(10))).setRequiredResults(Collections.singletonList(concatenateFields("num_value_2", "num_value_3_indexed"))).build();
// Covering(Index(MySimpleRecord$2+3 ([0],[10])) -> [num_value_2: KEY[0], num_value_3_indexed: KEY[1], rec_no: KEY[2]])
RecordQueryPlan plan = planner.plan(query);
final BindingMatcher<? extends RecordQueryPlan> planMatcher = coveringIndexPlan().where(indexPlanOf(indexPlan().where(indexName("MySimpleRecord$2+3")).and(scanComparisons(range("([0],[10])")))));
assertMatchesExactly(plan, planMatcher);
assertEquals(1722836804, plan.planHash(PlanHashable.PlanHashKind.LEGACY));
assertEquals(-992322107, plan.planHash(PlanHashable.PlanHashKind.FOR_CONTINUATION));
assertEquals(2083564653, plan.planHash(PlanHashable.PlanHashKind.STRUCTURAL_WITHOUT_LITERALS));
try (FDBRecordContext context = openContext()) {
openSimpleRecordStore(context, hook);
try (RecordCursorIterator<FDBQueriedRecord<Message>> cursor = recordStore.executeQuery(plan).asIterator()) {
while (cursor.hasNext()) {
FDBQueriedRecord<Message> rec = cursor.next();
TestRecords1Proto.MySimpleRecord.Builder myrec = TestRecords1Proto.MySimpleRecord.newBuilder();
myrec.mergeFrom(Objects.requireNonNull(rec).getRecord());
assertThat(myrec.getNumValue2(), greaterThan(0));
assertThat(myrec.getNumValue2(), lessThan(10));
assertThat(myrec.hasNumValue3Indexed(), is(true));
}
}
commit(context);
assertDiscardedNone(context);
}
}
use of com.apple.foundationdb.record.metadata.Key in project fdb-record-layer by FoundationDB.
the class FDBCoveringIndexQueryTest method notCoveringWithRequiredFieldsNotAvailable.
/**
* Verify that covering indexes are not used when the an outer "header" field is missing from the primary key,
* even though the index has all of the fields that the query actually asks for.
*/
@DualPlannerTest
void notCoveringWithRequiredFieldsNotAvailable() throws Exception {
RecordMetaDataHook hook = metaData -> {
metaData.getRecordType("MyRecord").setPrimaryKey(field("header").nest(field("rec_no")));
metaData.addIndex("MyRecord", "str_value");
};
try (FDBRecordContext context = openContext()) {
openRecordWithHeader(context, hook);
}
RecordQuery query = RecordQuery.newBuilder().setRecordType("MyRecord").setFilter(Query.field("str_value").equalsValue("lion")).setRequiredResults(Collections.singletonList(field("header").nest("rec_no"))).build();
// Index(MyRecord$str_value [[lion],[lion]])
RecordQueryPlan plan = planner.plan(query);
final BindingMatcher<? extends RecordQueryPlan> planMatcher = indexPlan().where(indexName("MyRecord$str_value")).and(scanComparisons(range("[[lion],[lion]]")));
assertMatchesExactly(plan, planMatcher);
assertEquals(-629018945, plan.planHash(PlanHashable.PlanHashKind.LEGACY));
assertEquals(2065541259, plan.planHash(PlanHashable.PlanHashKind.FOR_CONTINUATION));
assertEquals(-2063034193, plan.planHash(PlanHashable.PlanHashKind.STRUCTURAL_WITHOUT_LITERALS));
}
use of com.apple.foundationdb.record.metadata.Key in project fdb-record-layer by FoundationDB.
the class FDBInQueryTest method testInQueryOrOverlap.
/**
* Verify that IN queries can be planned using index scans, then used in a UNION to implement an OR with IN whose
* elements overlap, and that the union with that comparison key deduplicates the records in the overlap.
*/
@DualPlannerTest
void testInQueryOrOverlap() throws Exception {
complexQuerySetup(NO_HOOK);
RecordQuery query = RecordQuery.newBuilder().setRecordType("MySimpleRecord").setFilter(Query.or(Query.field("num_value_unique").in(Arrays.asList(903, 905, 901)), Query.field("num_value_unique").in(Arrays.asList(906, 905, 904)))).build();
RecordQueryPlan plan = planner.plan(query);
if (planner instanceof RecordQueryPlanner) {
// Index(MySimpleRecord$num_value_unique [EQUALS $__in_num_value_unique__0]) WHERE __in_num_value_unique__0 IN [901, 903, 905] SORTED ∪[Field { 'num_value_unique' None}, Field { 'rec_no' None}] Index(MySimpleRecord$num_value_unique [EQUALS $__in_num_value_unique__0]) WHERE __in_num_value_unique__0 IN [904, 905, 906] SORTED
// Ordinary equality comparisons would be ordered just by the primary key so that would be the union comparison key.
// Must compare the IN field here; they are ordered, but not trivially (same value for each).
assertMatchesExactly(plan, unionPlan(inValuesJoinPlan(indexPlan().where(indexName("MySimpleRecord$num_value_unique")).and(scanComparisons(range("[EQUALS $__in_num_value_unique__0]")))).where(inValuesList(equalsObject(Arrays.asList(901, 903, 905)))), inValuesJoinPlan(indexPlan().where(indexName("MySimpleRecord$num_value_unique")).and(scanComparisons(range("[EQUALS $__in_num_value_unique__0]")))).where(inValuesList(equalsObject(Arrays.asList(904, 905, 906))))));
assertEquals(218263868, plan.planHash(PlanHashable.PlanHashKind.LEGACY));
assertEquals(468995802, plan.planHash(PlanHashable.PlanHashKind.FOR_CONTINUATION));
assertEquals(2098251608, plan.planHash(PlanHashable.PlanHashKind.STRUCTURAL_WITHOUT_LITERALS));
} else {
assertMatchesExactly(plan, fetchFromPartialRecordPlan(unionPlan(inValuesJoinPlan(coveringIndexPlan().where(indexPlanOf(indexPlan().where(indexName("MySimpleRecord$num_value_unique")).and(scanComparisons(equalities(exactly(anyParameterComparison()))))))).where(inValuesList(equalsObject(Arrays.asList(901, 903, 905)))), inValuesJoinPlan(coveringIndexPlan().where(indexPlanOf(indexPlan().where(indexName("MySimpleRecord$num_value_unique")).and(scanComparisons(equalities(exactly(anyParameterComparison()))))))).where(inValuesList(equalsObject(Arrays.asList(904, 905, 906))))).where(comparisonKey(concat(field("num_value_unique"), primaryKey("MySimpleRecord"))))));
assertEquals(-1323754895, plan.planHash(PlanHashable.PlanHashKind.LEGACY));
assertEquals(856768529, plan.planHash(PlanHashable.PlanHashKind.FOR_CONTINUATION));
assertEquals(-1700358353, plan.planHash(PlanHashable.PlanHashKind.STRUCTURAL_WITHOUT_LITERALS));
}
Set<Long> dupes = new HashSet<>();
assertEquals(5, querySimpleRecordStore(NO_HOOK, plan, EvaluationContext::empty, record -> {
assertTrue(dupes.add(record.getRecNo()), "should not have duplicated records");
assertThat(record.getNumValueUnique(), anyOf(is(901), is(903), is(904), is(905), is(906)));
}, context -> TestHelpers.assertDiscardedAtMost(1, context)));
}
use of com.apple.foundationdb.record.metadata.Key in project fdb-record-layer by FoundationDB.
the class FDBNestedFieldQueryTest method testNestedPrimaryKeyQuery.
/**
* Verify that record scans with nested primary keys works properly.
* Specifically, verify that a filter is implemented as a record scan in the case where there is a two-field
* primary key both of whose fields are nested in some header subrecord.
*/
@DualPlannerTest
public void testNestedPrimaryKeyQuery() throws Exception {
final RecordMetaDataHook hook = metaData -> {
metaData.getRecordType("MyRecord").setPrimaryKey(concat(field("header").nest(field("path")), field("header").nest(field("rec_no"))));
};
try (FDBRecordContext context = openContext()) {
openRecordWithHeader(context, hook);
saveHeaderRecord(1, "a", 0, "able");
saveHeaderRecord(2, "a", 3, "baker");
commit(context);
}
RecordQuery query = RecordQuery.newBuilder().setRecordType("MyRecord").setFilter(Query.and(Query.field("header").matches(Query.field("path").equalsValue("a")), Query.field("header").matches(Query.field("rec_no").equalsValue(2L)))).build();
// Scan([[a, 2],[a, 2]])
RecordQueryPlan plan = planner.plan(query);
assertThat(plan, scan(bounds(hasTupleString("[[a, 2],[a, 2]]"))));
assertEquals(1265534819, plan.planHash(PlanHashable.PlanHashKind.LEGACY));
assertEquals(136710600, plan.planHash(PlanHashable.PlanHashKind.FOR_CONTINUATION));
assertEquals(-1817343447, plan.planHash(PlanHashable.PlanHashKind.STRUCTURAL_WITHOUT_LITERALS));
try (FDBRecordContext context = openContext()) {
openRecordWithHeader(context, hook);
try (RecordCursor<FDBQueriedRecord<Message>> cursor = recordStore.executeQuery(plan)) {
RecordCursorResult<FDBQueriedRecord<Message>> result = cursor.getNext();
assertTrue(result.hasNext());
TestRecordsWithHeaderProto.MyRecord record = parseMyRecord(result.get().getRecord());
assertEquals("baker", record.getStrValue());
assertFalse(cursor.getNext().hasNext());
}
TestHelpers.assertDiscardedNone(context);
}
}
use of com.apple.foundationdb.record.metadata.Key in project fdb-record-layer by FoundationDB.
the class LuceneDocumentFromRecord method getGroupedFields.
// Grouping keys are evaluated more or less normally, turning into multiple groups.
// Each group corresponds to a single document in a separate index / directory.
// Within that document, the grouped fields are merged.
protected static <M extends Message> void getGroupedFields(@Nonnull List<KeyExpression> keys, int keyIndex, int keyPosition, int groupingCount, @Nonnull Tuple groupPrefix, @Nonnull FDBRecord<M> rec, @Nonnull Message message, @Nonnull Map<Tuple, List<DocumentField>> result, @Nullable String fieldNamePrefix) {
if (keyIndex >= keys.size()) {
return;
}
KeyExpression key = keys.get(keyIndex);
int keySize = key.getColumnSize();
if (keyPosition + keySize <= groupingCount) {
// Entirely in the grouping portion: extend group prefix with normal evaluation.
List<Key.Evaluated> groups = key.evaluateMessage(rec, message);
for (Key.Evaluated group : groups) {
Tuple wholeGroup = groupPrefix.addAll(group.toTupleAppropriateList());
if (groupingCount == wholeGroup.size()) {
result.putIfAbsent(wholeGroup, new ArrayList<>());
}
getGroupedFields(keys, keyIndex + 1, keyPosition + key.getColumnSize(), groupingCount, wholeGroup, rec, message, result, fieldNamePrefix);
}
return;
}
if (groupingCount <= keyPosition) {
// Entirely in the grouped portion: add fields to groups.
List<DocumentField> fields = getFields(key, rec, message, fieldNamePrefix);
for (Map.Entry<Tuple, List<DocumentField>> entry : result.entrySet()) {
if (TupleHelpers.isPrefix(groupPrefix, entry.getKey())) {
entry.getValue().addAll(fields);
}
}
// Grouping ends in the middle of this key: break it apart.
} else if (key instanceof NestingKeyExpression) {
NestingKeyExpression nesting = (NestingKeyExpression) key;
final String parentFieldName = nesting.getParent().getFieldName();
for (Key.Evaluated value : nesting.getParent().evaluateMessage(rec, message)) {
final Message submessage = (Message) value.toList().get(0);
getGroupedFields(Collections.singletonList(nesting.getChild()), 0, keyPosition, groupingCount, groupPrefix, rec, submessage, result, fieldNamePrefix == null ? parentFieldName : fieldNamePrefix + "_" + parentFieldName);
}
} else if (key instanceof ThenKeyExpression) {
ThenKeyExpression then = (ThenKeyExpression) key;
getGroupedFields(then.getChildren(), 0, keyPosition, groupingCount, groupPrefix, rec, message, result, fieldNamePrefix);
} else {
throw new RecordCoreException("Cannot split key for document grouping: " + key);
}
// Continue with remaining keys.
getGroupedFields(keys, keyIndex + 1, keyPosition + key.getColumnSize(), groupingCount, groupPrefix, rec, message, result, fieldNamePrefix);
}
Aggregations