use of com.apple.foundationdb.tuple.Tuple in project fdb-record-layer by FoundationDB.
the class RecordMetaDataBuilderTest method testGetRecordTypeKeyTuple.
@Test
public void testGetRecordTypeKeyTuple() {
RecordMetaDataBuilder metaDataBuilder = RecordMetaData.newBuilder().setRecords(TestRecords1Proto.getDescriptor());
RecordMetaData metaData = metaDataBuilder.build(false);
Tuple t = metaData.getRecordType("MySimpleRecord").getRecordTypeKeyTuple();
assertEquals(1, t.size());
assertEquals(1, t.getLong(0));
}
use of com.apple.foundationdb.tuple.Tuple in project fdb-record-layer by FoundationDB.
the class FDBRecordStoreIndexTest method testNoBoundaryPrimaryKeysImpl.
public void testNoBoundaryPrimaryKeysImpl() {
final FDBDatabaseFactory factory = FDBDatabaseFactory.instance();
factory.setLocalityProvider(MockedLocalityUtil.instance());
FDBDatabase database = FDBDatabaseFactory.instance().getDatabase();
final String indexName = "MySimpleRecord$num_value_unique";
try (FDBRecordContext context = database.openContext()) {
openSimpleRecordStore(context, TEST_SPLIT_HOOK);
recordStore.markIndexWriteOnly(indexName).join();
commit(context);
}
String bigOlString = Strings.repeat("x", SplitHelper.SPLIT_RECORD_SIZE + 2);
saveAndSplitSimpleRecord(1, bigOlString, 1);
saveAndSplitSimpleRecord(2, bigOlString, 2);
OnlineIndexer indexer;
List<Tuple> boundaryPrimaryKeys;
TupleRange range;
try (FDBRecordContext context = database.openContext()) {
openSimpleRecordStore(context, TEST_SPLIT_HOOK);
Index index = recordStore.getRecordMetaData().getIndex(indexName);
// The indexer only uses recordStore as a prototype so does not require the original record store is still
// active.
indexer = OnlineIndexer.newBuilder().setDatabase(fdb).setRecordStore(recordStore).setIndex(index).build();
range = recordStore.context.asyncToSync(FDBStoreTimer.Waits.WAIT_BUILD_ENDPOINTS, indexer.buildEndpoints());
logger.info("The endpoints are " + range);
MockedLocalityUtil.init(new ArrayList<>(Arrays.asList(recordStore.recordsSubspace().pack(1), recordStore.recordsSubspace().pack(2))), 0);
boundaryPrimaryKeys = recordStore.context.asyncToSync(FDBStoreTimer.Waits.WAIT_GET_BOUNDARY, recordStore.getPrimaryKeyBoundaries(range.getLow(), range.getHigh()).asList());
assertEquals(0, boundaryPrimaryKeys.size());
logger.info("The boundary primary keys are " + boundaryPrimaryKeys);
commit(context);
}
// Test splitIndexBuildRange.
assertEquals(1, indexer.splitIndexBuildRange(Integer.MAX_VALUE, Integer.MAX_VALUE).size());
indexer.close();
database.close();
}
use of com.apple.foundationdb.tuple.Tuple in project fdb-record-layer by FoundationDB.
the class FDBRecordStoreIndexTest method testBoundaryPrimaryKeysImpl.
public void testBoundaryPrimaryKeysImpl() {
final FDBDatabaseFactory factory = FDBDatabaseFactory.instance();
factory.setLocalityProvider(MockedLocalityUtil.instance());
FDBDatabase database = FDBDatabaseFactory.instance().getDatabase();
final String indexName = "MySimpleRecord$num_value_unique";
try (FDBRecordContext context = database.openContext()) {
openSimpleRecordStore(context, TEST_SPLIT_HOOK);
recordStore.markIndexWriteOnly(indexName).join();
commit(context);
}
ArrayList<byte[]> keys = new ArrayList<>();
String bigOlString = Strings.repeat("x", SplitHelper.SPLIT_RECORD_SIZE + 2);
for (int i = 0; i < 50; i++) {
saveAndSplitSimpleRecord(i, bigOlString, i);
keys.add(recordStore.recordsSubspace().pack(i));
}
OnlineIndexer indexer;
List<Tuple> boundaryPrimaryKeys;
TupleRange range;
try (FDBRecordContext context = database.openContext()) {
openSimpleRecordStore(context, TEST_SPLIT_HOOK);
// 3 <= rangeCount <= size
MockedLocalityUtil.init(keys, new Random().nextInt(keys.size() - 2) + 3);
Index index = recordStore.getRecordMetaData().getIndex(indexName);
// The indexer only uses recordStore as a prototype so does not require the original record store is still
// active.
indexer = OnlineIndexer.newBuilder().setDatabase(database).setRecordStore(recordStore).setIndex(index).build();
range = recordStore.context.asyncToSync(FDBStoreTimer.Waits.WAIT_BUILD_ENDPOINTS, indexer.buildEndpoints());
logger.info("The endpoints are " + range);
boundaryPrimaryKeys = recordStore.context.asyncToSync(FDBStoreTimer.Waits.WAIT_GET_BOUNDARY, recordStore.getPrimaryKeyBoundaries(range.getLow(), range.getHigh()).asList());
logger.info("The boundary primary keys are " + boundaryPrimaryKeys);
commit(context);
}
int boundaryPrimaryKeysSize = boundaryPrimaryKeys.size();
assertTrue(boundaryPrimaryKeysSize > 2, "the test is meaningless if the records are not across boundaries");
assertThat(boundaryPrimaryKeys.get(0), greaterThanOrEqualTo(Tuple.from(-25L * 39)));
assertThat(boundaryPrimaryKeys.get(boundaryPrimaryKeysSize - 1), lessThanOrEqualTo(Tuple.from(24L * 39)));
assertEquals(boundaryPrimaryKeys.stream().sorted().distinct().collect(Collectors.toList()), boundaryPrimaryKeys, "the list should be sorted without duplication.");
for (Tuple boundaryPrimaryKey : boundaryPrimaryKeys) {
assertEquals(1, boundaryPrimaryKey.size(), "primary keys should be a single value");
}
// Test splitIndexBuildRange.
assertEquals(1, indexer.splitIndexBuildRange(Integer.MAX_VALUE, Integer.MAX_VALUE).size(), "the range is not split when it cannot be split to at least minSplit ranges");
// to test splitting into fewer than the default number of split points
checkSplitIndexBuildRange(1, 2, null, indexer);
// to test splitting into fewer than the default number of split points
checkSplitIndexBuildRange(boundaryPrimaryKeysSize / 2, boundaryPrimaryKeysSize - 2, null, indexer);
checkSplitIndexBuildRange(boundaryPrimaryKeysSize / 2, boundaryPrimaryKeysSize, null, indexer);
List<Pair<Tuple, Tuple>> oneRangePerSplit = getOneRangePerSplit(range, boundaryPrimaryKeys);
// to test exactly one range for each split
checkSplitIndexBuildRange(boundaryPrimaryKeysSize / 2, boundaryPrimaryKeysSize + 1, oneRangePerSplit, indexer);
checkSplitIndexBuildRange(boundaryPrimaryKeysSize / 2, boundaryPrimaryKeysSize + 2, oneRangePerSplit, indexer);
// to test that integer overflow isn't a problem
checkSplitIndexBuildRange(boundaryPrimaryKeysSize / 2, Integer.MAX_VALUE, oneRangePerSplit, indexer);
indexer.close();
database.close();
}
use of com.apple.foundationdb.tuple.Tuple in project fdb-record-layer by FoundationDB.
the class FDBRecordStoreIndexTest method minMaxOptional.
@ParameterizedTest(name = "minMaxLongOptional({0})")
@EnumSource(MinMaxIndexTypes.class)
public void minMaxOptional(MinMaxIndexTypes indexTypes) throws Exception {
final KeyExpression key = field("num_value_3_indexed").ungrouped();
final RecordMetaDataHook hook = md -> {
RecordTypeBuilder type = md.getRecordType("MySimpleRecord");
md.addIndex(type, new Index("min", key, indexTypes.min()));
md.addIndex(type, new Index("max", key, indexTypes.max()));
};
final IndexAggregateFunction minOverall = new IndexAggregateFunction(FunctionNames.MIN_EVER, key, null);
final IndexAggregateFunction maxOverall = new IndexAggregateFunction(FunctionNames.MAX_EVER, key, null);
List<String> types = Collections.singletonList("MySimpleRecord");
try (FDBRecordContext context = openContext()) {
openSimpleRecordStore(context, hook);
TestRecords1Proto.MySimpleRecord.Builder recBuilder = TestRecords1Proto.MySimpleRecord.newBuilder();
recBuilder.setRecNo(1066L);
recordStore.saveRecord(recBuilder.build());
commit(context);
}
try (FDBRecordContext context = openContext()) {
openSimpleRecordStore(context, hook);
final Tuple expected = indexTypes == MinMaxIndexTypes.TUPLE ? Tuple.fromList(Collections.singletonList(null)) : null;
assertEquals(expected, recordStore.evaluateAggregateFunction(types, minOverall, Key.Evaluated.EMPTY, IsolationLevel.SNAPSHOT).join());
assertEquals(expected, recordStore.evaluateAggregateFunction(types, maxOverall, Key.Evaluated.EMPTY, IsolationLevel.SNAPSHOT).join());
commit(context);
}
}
use of com.apple.foundationdb.tuple.Tuple in project fdb-record-layer by FoundationDB.
the class FDBRecordStoreRepairTest method mutateRecordKeys.
private void mutateRecordKeys(Function<Tuple, Tuple> mutator) throws Exception {
try (FDBRecordContext context = openContext()) {
final Transaction tr = context.ensureActive();
openUnsplitRecordStore(context);
RecordCursor<KeyValue> cursor = RecordCursor.fromIterator(tr.getRange(recordStore.recordsSubspace().range()).iterator());
cursor.forEach(keyValue -> {
Tuple keyTuple = Tuple.fromBytes(keyValue.getKey());
long suffix = keyTuple.getLong(keyTuple.size() - 1);
// Skip record versions
if (suffix != SplitHelper.RECORD_VERSION) {
Tuple mutatedKey = mutator.apply(keyTuple);
if (!mutatedKey.equals(keyTuple)) {
tr.clear(keyValue.getKey());
tr.set(mutatedKey.pack(), keyValue.getValue());
}
}
}).get();
commit(context);
}
}
Aggregations