use of com.apple.foundationdb.Range in project fdb-record-layer by FoundationDB.
the class OnlineIndexerSimpleTest method testMarkReadableClearsBuiltRanges.
@Test
public void testMarkReadableClearsBuiltRanges() {
List<TestRecords1Proto.MySimpleRecord> records = LongStream.range(0, 200).mapToObj(val -> TestRecords1Proto.MySimpleRecord.newBuilder().setRecNo(val).setNumValue2((int) val + 1).build()).collect(Collectors.toList());
Index index = new Index("newIndex", field("num_value_2").ungrouped(), IndexTypes.SUM);
IndexAggregateFunction aggregateFunction = new IndexAggregateFunction(FunctionNames.SUM, index.getRootExpression(), index.getName());
List<String> indexTypes = Collections.singletonList("MySimpleRecord");
FDBRecordStoreTestBase.RecordMetaDataHook hook = metaDataBuilder -> metaDataBuilder.addIndex("MySimpleRecord", index);
openSimpleMetaData();
try (FDBRecordContext context = openContext()) {
records.forEach(recordStore::saveRecord);
context.commit();
}
openSimpleMetaData(hook);
try (OnlineIndexer indexer = OnlineIndexer.newBuilder().setDatabase(fdb).setMetaData(metaData).setIndex(index).setSubspace(subspace).build()) {
indexer.buildIndex(true);
}
openSimpleMetaData(hook);
try (FDBRecordContext context = openContext()) {
// Verify rangeSet is cleared when index is marked readable
final RangeSet rangeSet = new RangeSet(recordStore.indexRangeSubspace(index));
AsyncIterator<Range> ranges = rangeSet.missingRanges(recordStore.ensureContextActive()).iterator();
final Range range = ranges.next();
final boolean range1IsEmpty = RangeSet.isFirstKey(range.begin) && RangeSet.isFinalKey(range.end);
assertTrue(range1IsEmpty);
// fake commit, happy compiler
context.commit();
}
}
use of com.apple.foundationdb.Range in project fdb-record-layer by FoundationDB.
the class OnlineIndexerSimpleTest method buildEndpointIdempotency.
@Test
public void buildEndpointIdempotency() {
List<TestRecords1Proto.MySimpleRecord> records = LongStream.range(0, 10).mapToObj(val -> TestRecords1Proto.MySimpleRecord.newBuilder().setRecNo(val).setNumValue2((int) val + 1).build()).collect(Collectors.toList());
Index index = new Index("simple$value_2", field("num_value_2").ungrouped(), IndexTypes.SUM);
IndexAggregateFunction aggregateFunction = new IndexAggregateFunction(FunctionNames.SUM, index.getRootExpression(), index.getName());
List<String> indexTypes = Collections.singletonList("MySimpleRecord");
FDBRecordStoreTestBase.RecordMetaDataHook hook = metaDataBuilder -> metaDataBuilder.addIndex("MySimpleRecord", index);
final Supplier<Tuple> getAggregate = () -> {
Tuple ret;
try (FDBRecordContext context = openContext()) {
assertTrue(recordStore.uncheckedMarkIndexReadable(index.getName()).join());
FDBRecordStore recordStore2 = recordStore.asBuilder().setContext(context).uncheckedOpen();
ret = recordStore2.evaluateAggregateFunction(indexTypes, aggregateFunction, TupleRange.ALL, IsolationLevel.SERIALIZABLE).join();
// Do NOT commit the change.
}
return ret;
};
openSimpleMetaData();
try (FDBRecordContext context = openContext()) {
records.forEach(recordStore::saveRecord);
context.commit();
}
openSimpleMetaData(hook);
try (FDBRecordContext context = openContext()) {
recordStore.markIndexWriteOnly(index).join();
context.commit();
}
try (OnlineIndexer indexBuilder = OnlineIndexer.newBuilder().setDatabase(fdb).setMetaData(metaData).setIndex(index).setSubspace(subspace).build()) {
final RangeSet rangeSet = new RangeSet(recordStore.indexRangeSubspace(index));
// Build the endpoints
TupleRange range = indexBuilder.buildEndpoints().join();
assertEquals(Tuple.from(0L), range.getLow());
assertEquals(Tuple.from(9L), range.getHigh());
assertEquals(Tuple.from(10L), getAggregate.get());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database(), null, Tuple.from(0L).pack()).join());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database(), Tuple.from(9L).pack(), null).join());
List<Range> middleRanges = rangeSet.missingRanges(fdb.database()).join();
assertEquals(Collections.singletonList(Tuple.from(0L)), middleRanges.stream().map(r -> Tuple.fromBytes(r.begin)).collect(Collectors.toList()));
assertEquals(Collections.singletonList(Tuple.from(9L)), middleRanges.stream().map(r -> Tuple.fromBytes(r.end)).collect(Collectors.toList()));
// Make sure running this again doesn't change anything.
range = indexBuilder.buildEndpoints().join();
assertEquals(Tuple.from(0L), range.getLow());
assertEquals(Tuple.from(9L), range.getHigh());
assertEquals(Tuple.from(10L), getAggregate.get());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database(), null, Tuple.from(0L).pack()).join());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database(), Tuple.from(9L).pack(), null).join());
middleRanges = rangeSet.missingRanges(fdb.database()).join();
assertEquals(Collections.singletonList(Tuple.from(0L)), middleRanges.stream().map(r -> Tuple.fromBytes(r.begin)).collect(Collectors.toList()));
assertEquals(Collections.singletonList(Tuple.from(9L)), middleRanges.stream().map(r -> Tuple.fromBytes(r.end)).collect(Collectors.toList()));
// Remove the first and last records.
try (FDBRecordContext context = openContext()) {
recordStore.deleteRecord(Tuple.from(0L));
recordStore.deleteRecord(Tuple.from(9L));
context.commit();
}
assertEquals(Tuple.from(0L), getAggregate.get());
// Rerun endpoints with new data.
range = indexBuilder.buildEndpoints().join();
assertEquals(Tuple.from(1L), range.getLow());
assertEquals(Tuple.from(8L), range.getHigh());
assertEquals(Tuple.from(9L), getAggregate.get());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database(), null, Tuple.from(1L).pack()).join());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database(), Tuple.from(8L).pack(), null).join());
middleRanges = rangeSet.missingRanges(fdb.database()).join();
assertEquals(Collections.singletonList(Tuple.from(1L)), middleRanges.stream().map(r -> Tuple.fromBytes(r.begin)).collect(Collectors.toList()));
assertEquals(Collections.singletonList(Tuple.from(8L)), middleRanges.stream().map(r -> Tuple.fromBytes(r.end)).collect(Collectors.toList()));
// Run it again to show that nothing has happened.
range = indexBuilder.buildEndpoints().join();
assertEquals(Tuple.from(1L), range.getLow());
assertEquals(Tuple.from(8L), range.getHigh());
assertEquals(Tuple.from(9L), getAggregate.get());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database(), null, Tuple.from(1L).pack()).join());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database(), Tuple.from(8L).pack(), null).join());
middleRanges = rangeSet.missingRanges(fdb.database()).join();
assertEquals(Collections.singletonList(Tuple.from(1L)), middleRanges.stream().map(r -> Tuple.fromBytes(r.begin)).collect(Collectors.toList()));
assertEquals(Collections.singletonList(Tuple.from(8L)), middleRanges.stream().map(r -> Tuple.fromBytes(r.end)).collect(Collectors.toList()));
// Add back the previous first and last records.
try (FDBRecordContext context = openContext()) {
recordStore.saveRecord(TestRecords1Proto.MySimpleRecord.newBuilder().setRecNo(0L).setNumValue2(1).build());
recordStore.saveRecord(TestRecords1Proto.MySimpleRecord.newBuilder().setRecNo(9L).setNumValue2(10).build());
context.commit();
}
assertEquals(Tuple.from(20L), getAggregate.get());
// Rerun endpoints with new data.
range = indexBuilder.buildEndpoints().join();
assertEquals(Tuple.from(0L), range.getLow());
assertEquals(Tuple.from(9L), range.getHigh());
assertEquals(Tuple.from(20L), getAggregate.get());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database(), null, Tuple.from(1L).pack()).join());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database(), Tuple.from(8L).pack(), null).join());
middleRanges = rangeSet.missingRanges(fdb.database()).join();
assertEquals(Collections.singletonList(Tuple.from(1L)), middleRanges.stream().map(r -> Tuple.fromBytes(r.begin)).collect(Collectors.toList()));
assertEquals(Collections.singletonList(Tuple.from(8L)), middleRanges.stream().map(r -> Tuple.fromBytes(r.end)).collect(Collectors.toList()));
// Run it again to show that nothing has happened.
range = indexBuilder.buildEndpoints().join();
assertEquals(Tuple.from(0L), range.getLow());
assertEquals(Tuple.from(9L), range.getHigh());
assertEquals(Tuple.from(20L), getAggregate.get());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database(), null, Tuple.from(1L).pack()).join());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database(), Tuple.from(8L).pack(), null).join());
middleRanges = rangeSet.missingRanges(fdb.database()).join();
assertEquals(Collections.singletonList(Tuple.from(1L)), middleRanges.stream().map(r -> Tuple.fromBytes(r.begin)).collect(Collectors.toList()));
assertEquals(Collections.singletonList(Tuple.from(8L)), middleRanges.stream().map(r -> Tuple.fromBytes(r.end)).collect(Collectors.toList()));
// Straight up build the whole index.
indexBuilder.buildIndex(false);
assertEquals(Tuple.from(55L), getAggregate.get());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database()).join());
}
}
use of com.apple.foundationdb.Range in project fdb-record-layer by FoundationDB.
the class HighContentionAllocatorTest method testCheckForRootConflicts.
@Test
@Tag(Tags.WipesFDB)
void testCheckForRootConflicts() {
Range everything = new Range(new byte[] { (byte) 0x00 }, new byte[] { (byte) 0xFF });
database.run(context -> {
context.ensureActive().clear(everything);
return null;
});
try (FDBRecordContext context = database.openContext()) {
HighContentionAllocator hca = HighContentionAllocator.forRoot(context, keySpace.path("test-path"));
// so write something for all of those keys
for (int i = 0; i < 64; i++) {
byte[] key = Tuple.from(i, "string-" + i).pack();
byte[] value = new byte[0];
context.ensureActive().set(key, value);
}
try {
hca.allocate("some-string").join();
fail("allocate should fail in the same transaction");
} catch (Exception e) {
assertThat("a", e.getCause().getMessage(), is("database already has keys in allocation range"));
}
// check that the hca marks these keys as invalid
// the thing here is that when the post allocation hook fails the allocator still writes a key,
// that's actually a good thing since it will prevent us from trying to allocate that key again
// but we need to make sure we have a way to exclude from the reverse lookup
Subspace allocationSubspace = hca.getAllocationSubspace();
Range initialwindow = new Range(allocationSubspace.getKey(), allocationSubspace.pack(64));
List<KeyValue> allocatedValues = context.ensureActive().getRange(initialwindow).asList().join();
byte[] valueBytes = allocatedValues.get(0).getValue();
assertThat("there's exactly one allocation key", allocatedValues, hasSize(1));
assertArrayEquals(valueBytes, new byte[] { (byte) 0xFD }, "the value is set to the magic byte");
context.commit();
}
try (FDBRecordContext context = database.openContext()) {
HighContentionAllocator hca = HighContentionAllocator.forRoot(context, keySpace.path("test-path"));
try {
hca.allocate("some-string").join();
fail("allocate should fail in new transaction");
} catch (Exception e) {
assertThat("a", e.getCause().getMessage(), is("database already has keys in allocation range"));
}
}
database.run(context -> {
context.ensureActive().clear(everything);
return null;
});
}
use of com.apple.foundationdb.Range in project fdb-record-layer by FoundationDB.
the class StringInterningLayerTest method testFilterInvalidAllocationValues.
@Test
@Tag(Tags.WipesFDB)
void testFilterInvalidAllocationValues() {
Range everything = new Range(new byte[] { (byte) 0x00 }, new byte[] { (byte) 0xFF });
database.run(context -> {
context.ensureActive().clear(everything);
return null;
});
try (FDBRecordContext context = database.openContext()) {
StringInterningLayer interningLayer = new StringInterningLayer(testSubspace, true);
// so write something for all of those keys
for (int i = 0; i < 64; i++) {
byte[] key = Tuple.from(i, "string-" + i).pack();
byte[] value = new byte[0];
context.ensureActive().set(key, value);
}
try {
interningLayer.intern(context, "a-string").join();
fail("intern should throw exception");
} catch (Exception e) {
assertThat("a", e.getCause().getMessage(), is("database already has keys in allocation range"));
}
Optional<ResolverResult> maybeRead = interningLayer.read(context, "a-string").join();
assertThat("we don't read anything", maybeRead, is(Optional.empty()));
}
}
use of com.apple.foundationdb.Range in project fdb-record-layer by FoundationDB.
the class FDBRecordStoreTest method prefixPrimaryKeysWithNullByteAfterPrefix.
/**
* If there are two primary keys that overlap in the just the right way, then one can have the representation
* of one record's primary key be a strict prefix of the other record's. With older versions of the Record Layer,
* this could lead to problems where attempting to load the record with the shorter key would also read data for
* the record with the longer key. See <a href="https://github.com/FoundationDB/fdb-record-layer/issues/782">Issue #782</a>.
*
* <p>
* This test is parameterized by format version because the fix involves being more particular about the range that
* is scanned. In particular, the scan range is now only over those keys which are strict prefixes of the primary
* key. This is fine as long as there aren't any data stored at that key. Prior to {@link FDBRecordStore#SAVE_UNSPLIT_WITH_SUFFIX_FORMAT_VERSION},
* there could be data in that key was not true if {@code splitLongRecords} was {@code false}. Parameterizing here
* tests those older configurations.
* </p>
*
* @param formatVersion format version to use when running the test
* @param splitLongRecords whether the test should split long records or not
*/
@ParameterizedTest(name = "prefixPrimaryKeysWithNullByteAfterPrefix [formatVersion = {0}, splitLongRecords = {1}]")
@MethodSource("formatVersionAndSplitArgs")
public void prefixPrimaryKeysWithNullByteAfterPrefix(int formatVersion, boolean splitLongRecords) throws Exception {
final RecordMetaDataHook hook = metaData -> {
metaData.setSplitLongRecords(splitLongRecords);
metaData.getRecordType("MySimpleRecord").setPrimaryKey(field("str_value_indexed"));
};
final FDBRecordStore.Builder storeBuilder;
// The primary key for record1 is a prefix of the primary key for record2, and the first byte in record2's primary
// key is a null byte. Because FDB's Tuple layer null terminates its representation of strings, this means that
// the keys used to store record1 forms a prefix of the keys storing record2. However, the null byte should be
// escaped in such a way that it is possible to read only the keys for record1. This is necessary to properly load
// the record by primary key.
final TestRecords1Proto.MySimpleRecord record1 = TestRecords1Proto.MySimpleRecord.newBuilder().setStrValueIndexed("foo").setNumValue3Indexed(1066).build();
final TestRecords1Proto.MySimpleRecord record2 = TestRecords1Proto.MySimpleRecord.newBuilder().setStrValueIndexed("foo\0bar").setNumValue3Indexed(1415).build();
// Save the two records
try (FDBRecordContext context = openContext()) {
uncheckedOpenSimpleRecordStore(context, hook);
storeBuilder = recordStore.asBuilder().setFormatVersion(formatVersion);
final FDBRecordStore store = storeBuilder.create();
store.saveRecord(record1);
store.saveRecord(record2);
commit(context);
}
// Load by scanning records
try (FDBRecordContext context = openContext()) {
final FDBRecordStore store = storeBuilder.setContext(context).open();
final List<FDBStoredRecord<Message>> records = store.scanRecords(null, ScanProperties.FORWARD_SCAN).asList().get();
assertThat(records, hasSize(2));
assertEquals(record1, records.get(0).getRecord());
assertEquals(record2, records.get(1).getRecord());
}
// Load by primary key
try (FDBRecordContext context = openContext()) {
final FDBRecordStore store = storeBuilder.setContext(context).open();
FDBStoredRecord<Message> storedRecord1 = store.loadRecord(Tuple.from(record1.getStrValueIndexed()));
assertNotNull(storedRecord1, "record1 was missing");
assertEquals(record1, storedRecord1.getRecord());
FDBStoredRecord<Message> storedRecord2 = store.loadRecord(Tuple.from(record2.getStrValueIndexed()));
assertNotNull(storedRecord2, "record2 was missing");
assertEquals(record2, storedRecord2.getRecord());
}
// Load by query
try (FDBRecordContext context = openContext()) {
final FDBRecordStore store = storeBuilder.setContext(context).open();
final RecordQuery query = RecordQuery.newBuilder().setRecordType("MySimpleRecord").setFilter(Query.field("num_value_3_indexed").equalsParameter("num")).build();
final RecordQueryPlan plan = store.planQuery(query);
assertThat(plan, indexScan(allOf(indexName("MySimpleRecord$num_value_3_indexed"), bounds(hasTupleString("[EQUALS $num]")))));
// Record 1
List<FDBQueriedRecord<Message>> record1List = plan.execute(store, EvaluationContext.forBinding("num", 1066)).asList().get();
assertThat(record1List, hasSize(1));
FDBQueriedRecord<Message> queriedRecord1 = record1List.get(0);
assertEquals(record1, queriedRecord1.getRecord());
// Record 2
List<FDBQueriedRecord<Message>> record2List = plan.execute(store, EvaluationContext.forBinding("num", 1415)).asList().get();
assertThat(record2List, hasSize(1));
FDBQueriedRecord<Message> queriedRecord2 = record2List.get(0);
assertEquals(record2, queriedRecord2.getRecord());
}
}
Aggregations