use of com.apple.foundationdb.tuple.Tuple in project fdb-record-layer by FoundationDB.
the class FDBMetaDataStoreTest method historyCompat.
@Test
public void historyCompat() {
try (FDBRecordContext context = fdb.openContext()) {
openMetaDataStore(context);
RecordMetaDataProto.MetaData.Builder metaData = RecordMetaDataProto.MetaData.newBuilder();
metaData.setRecords(TestRecords1Proto.getDescriptor().toProto());
metaData.addRecordTypesBuilder().setName("MySimpleRecord").getPrimaryKeyBuilder().getFieldBuilder().setFieldName("rec_no").setFanType(RecordMetaDataProto.Field.FanType.SCALAR);
metaData.addRecordTypesBuilder().setName("MyOtherRecord").getPrimaryKeyBuilder().getFieldBuilder().setFieldName("rec_no").setFanType(RecordMetaDataProto.Field.FanType.SCALAR);
metaData.setVersion(101);
metaDataStore.saveRecordMetaData(metaData.build());
{
// Adjust to look like old format store by moving everything under CURRENT_KEY up under root.
Transaction tr = context.ensureActive();
List<KeyValue> kvs = context.asyncToSync(FDBStoreTimer.Waits.WAIT_LOAD_META_DATA, tr.getRange(metaDataStore.getSubspace().range(FDBMetaDataStore.CURRENT_KEY)).asList());
context.ensureActive().clear(metaDataStore.getSubspace().range());
for (KeyValue kv : kvs) {
Tuple tuple = Tuple.fromBytes(kv.getKey());
List<Object> items = tuple.getItems();
assertEquals(null, items.remove(items.size() - 2));
tuple = Tuple.fromList(items);
tr.set(tuple.pack(), kv.getValue());
}
}
context.commit();
}
RecordMetaData before;
try (FDBRecordContext context = fdb.openContext()) {
openMetaDataStore(context);
before = metaDataStore.getRecordMetaData();
context.commit();
}
assertNotNull(before.getRecordType("MySimpleRecord"));
assertFalse(before.hasIndex("MyIndex"));
try (FDBRecordContext context = fdb.openContext()) {
openMetaDataStore(context);
RecordMetaDataProto.MetaData.Builder metaData = RecordMetaDataProto.MetaData.newBuilder();
metaData.setRecords(TestRecords1Proto.getDescriptor().toProto());
metaData.addRecordTypesBuilder().setName("MySimpleRecord").getPrimaryKeyBuilder().getFieldBuilder().setFieldName("rec_no").setFanType(RecordMetaDataProto.Field.FanType.SCALAR);
metaData.addIndexesBuilder().setName("MyIndex").addRecordType("MySimpleRecord").setAddedVersion(102).setLastModifiedVersion(102).getRootExpressionBuilder().getFieldBuilder().setFieldName("num_value_2").setFanType(RecordMetaDataProto.Field.FanType.SCALAR);
metaData.addRecordTypesBuilder().setName("MyOtherRecord").getPrimaryKeyBuilder().getFieldBuilder().setFieldName("rec_no").setFanType(RecordMetaDataProto.Field.FanType.SCALAR);
metaData.setVersion(102);
metaDataStore.saveRecordMetaData(metaData.build());
context.commit();
}
RecordMetaData after;
try (FDBRecordContext context = fdb.openContext()) {
openMetaDataStore(context);
after = metaDataStore.getRecordMetaData();
context.commit();
}
assertNotNull(after.getRecordType("MySimpleRecord"));
assertTrue(after.hasIndex("MyIndex"));
RecordMetaData beforeAgain;
try (FDBRecordContext context = fdb.openContext()) {
openMetaDataStore(context);
beforeAgain = context.asyncToSync(FDBStoreTimer.Waits.WAIT_LOAD_META_DATA, metaDataStore.loadVersion(before.getVersion()));
context.commit();
}
assertEquals(before.getVersion(), beforeAgain.getVersion());
assertNotNull(beforeAgain.getRecordType("MySimpleRecord"));
assertFalse(beforeAgain.hasIndex("MyIndex"));
}
use of com.apple.foundationdb.tuple.Tuple in project fdb-record-layer by FoundationDB.
the class OnlineIndexerBuildIndexTest method singleRebuild.
void singleRebuild(@Nonnull List<TestRecords1Proto.MySimpleRecord> records, @Nullable List<TestRecords1Proto.MySimpleRecord> recordsWhileBuilding, int agents, boolean overlap, boolean splitLongRecords, @Nonnull Index index, @Nonnull Runnable beforeBuild, @Nonnull Runnable afterBuild, @Nonnull Runnable afterReadable) {
LOGGER.info(KeyValueLogMessage.of("beginning rebuild test", TestLogMessageKeys.RECORDS, records.size(), LogMessageKeys.RECORDS_WHILE_BUILDING, recordsWhileBuilding == null ? 0 : recordsWhileBuilding.size(), TestLogMessageKeys.AGENTS, agents, TestLogMessageKeys.OVERLAP, overlap, TestLogMessageKeys.SPLIT_LONG_RECORDS, splitLongRecords, TestLogMessageKeys.INDEX, index));
final FDBStoreTimer timer = new FDBStoreTimer();
final FDBRecordStoreTestBase.RecordMetaDataHook onlySplitHook = metaDataBuilder -> {
if (splitLongRecords) {
metaDataBuilder.setSplitLongRecords(true);
metaDataBuilder.removeIndex("MySimpleRecord$str_value_indexed");
}
};
final FDBRecordStoreTestBase.RecordMetaDataHook hook = metaDataBuilder -> {
onlySplitHook.apply(metaDataBuilder);
metaDataBuilder.addIndex("MySimpleRecord", index);
};
LOGGER.info(KeyValueLogMessage.of("inserting elements prior to test", TestLogMessageKeys.RECORDS, records.size()));
openSimpleMetaData(onlySplitHook);
try (FDBRecordContext context = openContext()) {
for (TestRecords1Proto.MySimpleRecord record : records) {
// Check presence first to avoid overwriting version information of previously added records.
Tuple primaryKey = Tuple.from(record.getRecNo());
if (recordStore.loadRecord(primaryKey) == null) {
recordStore.saveRecord(record);
}
}
context.commit();
}
LOGGER.info(KeyValueLogMessage.of("running before build for test"));
beforeBuild.run();
openSimpleMetaData(hook);
LOGGER.info(KeyValueLogMessage.of("adding index", TestLogMessageKeys.INDEX, index));
openSimpleMetaData(hook);
final boolean isAlwaysReadable;
try (FDBRecordContext context = openContext()) {
// care of by OnlineIndexer.
if (!safeBuild) {
LOGGER.info(KeyValueLogMessage.of("marking write-only", TestLogMessageKeys.INDEX, index));
recordStore.clearAndMarkIndexWriteOnly(index).join();
}
isAlwaysReadable = safeBuild && recordStore.isIndexReadable(index);
context.commit();
}
LOGGER.info(KeyValueLogMessage.of("creating online index builder", TestLogMessageKeys.INDEX, index, TestLogMessageKeys.RECORD_TYPES, metaData.recordTypesForIndex(index), LogMessageKeys.SUBSPACE, ByteArrayUtil2.loggable(subspace.pack()), LogMessageKeys.LIMIT, 20, TestLogMessageKeys.RECORDS_PER_SECOND, OnlineIndexer.DEFAULT_RECORDS_PER_SECOND * 100));
final OnlineIndexer.Builder builder = OnlineIndexer.newBuilder().setDatabase(fdb).setMetaData(metaData).setIndex(index).setSubspace(subspace).setConfigLoader(old -> {
OnlineIndexer.Config.Builder conf = OnlineIndexer.Config.newBuilder().setMaxLimit(20).setMaxRetries(Integer.MAX_VALUE).setRecordsPerSecond(OnlineIndexer.DEFAULT_RECORDS_PER_SECOND * 100);
if (ThreadLocalRandom.current().nextBoolean()) {
// randomly enable the progress logging to ensure that it doesn't throw exceptions,
// or otherwise disrupt the build.
LOGGER.info("Setting progress log interval");
conf.setProgressLogIntervalMillis(0);
}
return conf.build();
}).setTimer(timer);
if (ThreadLocalRandom.current().nextBoolean()) {
LOGGER.info("Setting priority to DEFAULT");
builder.setPriority(FDBTransactionPriority.DEFAULT);
}
if (fdb.isTrackLastSeenVersion()) {
LOGGER.info("Setting weak read semantics");
builder.setWeakReadSemantics(new FDBDatabase.WeakReadSemantics(0L, Long.MAX_VALUE, true));
}
if (!safeBuild) {
builder.setIndexingPolicy(OnlineIndexer.IndexingPolicy.newBuilder().setIfDisabled(OnlineIndexer.IndexingPolicy.DesiredAction.ERROR).setIfMismatchPrevious(OnlineIndexer.IndexingPolicy.DesiredAction.ERROR));
builder.setUseSynchronizedSession(false);
}
try (OnlineIndexer indexBuilder = builder.build()) {
CompletableFuture<Void> buildFuture;
LOGGER.info(KeyValueLogMessage.of("building index", TestLogMessageKeys.INDEX, index, TestLogMessageKeys.AGENT, agents, LogMessageKeys.RECORDS_WHILE_BUILDING, recordsWhileBuilding == null ? 0 : recordsWhileBuilding.size(), TestLogMessageKeys.OVERLAP, overlap));
if (agents == 1) {
buildFuture = indexBuilder.buildIndexAsync(false);
} else {
if (overlap) {
CompletableFuture<?>[] futures = new CompletableFuture<?>[agents];
for (int i = 0; i < agents; i++) {
final int agent = i;
futures[i] = safeBuild ? indexBuilder.buildIndexAsync(false).exceptionally(exception -> {
// because the other one is already working on building the index.
if (exception.getCause() instanceof SynchronizedSessionLockedException) {
LOGGER.info(KeyValueLogMessage.of("Detected another worker processing this index", TestLogMessageKeys.INDEX, index, TestLogMessageKeys.AGENT, agent), exception);
return null;
} else {
throw new CompletionException(exception);
}
}) : indexBuilder.buildIndexAsync(false);
}
buildFuture = CompletableFuture.allOf(futures);
} else {
// Safe builds do not support building ranges yet.
assumeFalse(safeBuild);
buildFuture = indexBuilder.buildEndpoints().thenCompose(tupleRange -> {
if (tupleRange != null) {
long start = tupleRange.getLow().getLong(0);
long end = tupleRange.getHigh().getLong(0);
CompletableFuture<?>[] futures = new CompletableFuture<?>[agents];
for (int i = 0; i < agents; i++) {
long itrStart = start + (end - start) / agents * i;
long itrEnd = (i == agents - 1) ? end : start + (end - start) / agents * (i + 1);
LOGGER.info(KeyValueLogMessage.of("building range", TestLogMessageKeys.INDEX, index, TestLogMessageKeys.AGENT, i, TestLogMessageKeys.BEGIN, itrStart, TestLogMessageKeys.END, itrEnd));
futures[i] = indexBuilder.buildRange(Key.Evaluated.scalar(itrStart), Key.Evaluated.scalar(itrEnd));
}
return CompletableFuture.allOf(futures);
} else {
return AsyncUtil.DONE;
}
});
}
}
if (safeBuild) {
buildFuture = MoreAsyncUtil.composeWhenComplete(buildFuture, (result, ex) -> indexBuilder.checkAnyOngoingOnlineIndexBuildsAsync().thenAccept(Assertions::assertFalse), fdb::mapAsyncToSyncException);
}
if (recordsWhileBuilding != null && recordsWhileBuilding.size() > 0) {
int i = 0;
while (i < recordsWhileBuilding.size()) {
List<TestRecords1Proto.MySimpleRecord> thisBatch = recordsWhileBuilding.subList(i, Math.min(i + 30, recordsWhileBuilding.size()));
fdb.run(context -> {
FDBRecordStore store = recordStore.asBuilder().setContext(context).build();
thisBatch.forEach(store::saveRecord);
return null;
});
i += 30;
}
}
buildFuture.join();
// if a record is added to a range that has already been built, it will not be counted, otherwise,
// it will.
long additionalScans = 0;
if (recordsWhileBuilding != null && recordsWhileBuilding.size() > 0) {
additionalScans += (long) recordsWhileBuilding.size();
}
try (FDBRecordContext context = openContext()) {
IndexBuildState indexBuildState = context.asyncToSync(FDBStoreTimer.Waits.WAIT_GET_INDEX_BUILD_STATE, IndexBuildState.loadIndexBuildStateAsync(recordStore, index));
IndexState indexState = indexBuildState.getIndexState();
if (isAlwaysReadable) {
assertEquals(IndexState.READABLE, indexState);
} else {
assertEquals(IndexState.WRITE_ONLY, indexState);
assertEquals(indexBuilder.getTotalRecordsScanned(), indexBuildState.getRecordsScanned());
// Count index is not defined so we cannot determine the records in total from it.
assertNull(indexBuildState.getRecordsInTotal());
}
}
assertThat(indexBuilder.getTotalRecordsScanned(), allOf(greaterThanOrEqualTo((long) records.size()), lessThanOrEqualTo((long) records.size() + additionalScans)));
}
KeyValueLogMessage msg = KeyValueLogMessage.build("building index - completed", TestLogMessageKeys.INDEX, index);
msg.addKeysAndValues(timer.getKeysAndValues());
LOGGER.info(msg.toString());
LOGGER.info(KeyValueLogMessage.of("running post build checks", TestLogMessageKeys.INDEX, index));
// uses the index in quereis since the index is readable.
if (!isAlwaysReadable) {
afterBuild.run();
}
LOGGER.info(KeyValueLogMessage.of("verifying range set emptiness", TestLogMessageKeys.INDEX, index));
try (FDBRecordContext context = openContext()) {
RangeSet rangeSet = new RangeSet(recordStore.indexRangeSubspace(metaData.getIndex(index.getName())));
System.out.println("Range set for " + records.size() + " records:\n" + rangeSet.rep(context.ensureActive()).join());
if (!isAlwaysReadable) {
assertEquals(Collections.emptyList(), rangeSet.missingRanges(context.ensureActive()).asList().join());
}
context.commit();
}
LOGGER.info(KeyValueLogMessage.of("marking index readable", TestLogMessageKeys.INDEX, index));
try (FDBRecordContext context = openContext()) {
boolean updated = recordStore.markIndexReadable(index).join();
if (isAlwaysReadable) {
assertFalse(updated);
} else {
assertTrue(updated);
}
context.commit();
}
afterReadable.run();
LOGGER.info(KeyValueLogMessage.of("ending rebuild test", TestLogMessageKeys.RECORDS, records.size(), LogMessageKeys.RECORDS_WHILE_BUILDING, recordsWhileBuilding == null ? 0 : recordsWhileBuilding.size(), TestLogMessageKeys.AGENTS, agents, TestLogMessageKeys.OVERLAP, overlap, TestLogMessageKeys.SPLIT_LONG_RECORDS, splitLongRecords, TestLogMessageKeys.INDEX, index));
}
use of com.apple.foundationdb.tuple.Tuple in project fdb-record-layer by FoundationDB.
the class OnlineIndexerUniqueIndexTest method resolveUniquenessViolations.
@Test
public void resolveUniquenessViolations() {
List<TestRecords1Proto.MySimpleRecord> records = LongStream.range(0, 10).mapToObj(val -> TestRecords1Proto.MySimpleRecord.newBuilder().setRecNo(val).setNumValue2(((int) val) % 5).build()).collect(Collectors.toList());
Index index = new Index("simple$value_2", field("num_value_2"), EmptyKeyExpression.EMPTY, IndexTypes.VALUE, IndexOptions.UNIQUE_OPTIONS);
FDBRecordStoreTestBase.RecordMetaDataHook hook = metaDataBuilder -> metaDataBuilder.addIndex("MySimpleRecord", index);
openSimpleMetaData();
try (FDBRecordContext context = openContext()) {
records.forEach(recordStore::saveRecord);
context.commit();
}
openSimpleMetaData(hook);
try (FDBRecordContext context = openContext()) {
recordStore.markIndexWriteOnly(index).join();
context.commit();
}
try (OnlineIndexer indexBuilder = OnlineIndexer.newBuilder().setDatabase(fdb).setMetaData(metaData).setIndex(index).setSubspace(subspace).build()) {
indexBuilder.buildIndexAsync().handle((ignore, e) -> {
assertNotNull(e);
RuntimeException runE = FDBExceptions.wrapException(e);
assertNotNull(runE);
assertThat(runE, instanceOf(RecordIndexUniquenessViolation.class));
return null;
}).join();
}
try (FDBRecordContext context = openContext()) {
Set<Tuple> indexEntries = new HashSet<>(recordStore.scanUniquenessViolations(index).map(v -> v.getIndexEntry().getKey()).asList().join());
for (Tuple indexKey : indexEntries) {
List<Tuple> primaryKeys = recordStore.scanUniquenessViolations(index, indexKey).map(RecordIndexUniquenessViolation::getPrimaryKey).asList().join();
assertEquals(2, primaryKeys.size());
recordStore.resolveUniquenessViolation(index, indexKey, primaryKeys.get(0)).join();
assertEquals(0, (int) recordStore.scanUniquenessViolations(index, indexKey).getCount().join());
}
for (int i = 0; i < 5; i++) {
assertNotNull(recordStore.loadRecord(Tuple.from(i)));
}
for (int i = 5; i < records.size(); i++) {
assertNull(recordStore.loadRecord(Tuple.from(i)));
}
recordStore.markIndexReadable(index).join();
context.commit();
}
}
use of com.apple.foundationdb.tuple.Tuple in project fdb-record-layer by FoundationDB.
the class SplitHelperTest method saveSuccessfully.
private SplitHelper.SizeInfo saveSuccessfully(@Nonnull FDBRecordContext context, @Nonnull Tuple key, byte[] serialized, @Nullable FDBRecordVersion version, boolean splitLongRecords, boolean omitUnsplitSuffix, @Nullable FDBStoredSizes previousSizeInfo) {
final SplitHelper.SizeInfo sizeInfo = new SplitHelper.SizeInfo();
SplitHelper.saveWithSplit(context, subspace, key, serialized, version, splitLongRecords, omitUnsplitSuffix, previousSizeInfo != null, previousSizeInfo, sizeInfo);
int dataKeyCount = (serialized.length - 1) / SplitHelper.SPLIT_RECORD_SIZE + 1;
boolean isSplit = dataKeyCount > 1;
int keyCount = dataKeyCount;
if (version != null) {
keyCount += 1;
}
int keySize = (subspace.pack().length + key.pack().length) * keyCount;
assertEquals(isSplit, sizeInfo.isSplit());
assertEquals(keyCount, sizeInfo.getKeyCount());
if (!omitUnsplitSuffix || splitLongRecords) {
// Add in the the counters the split points.
if (!isSplit) {
// As 0 requires 1 byte when Tuple packed
keySize += 1;
} else {
// As each split point is two bytes when tuple packed
keySize += dataKeyCount * 2;
}
}
if (version != null) {
keySize += 2;
}
int valueSize = serialized.length + (version != null ? 1 + FDBRecordVersion.VERSION_LENGTH : 0);
assertEquals(keySize, sizeInfo.getKeySize());
assertEquals(valueSize, sizeInfo.getValueSize());
assertEquals(version != null, sizeInfo.isVersionedInline());
final Subspace keySubspace = subspace.subspace(key);
RecordCursorIterator<KeyValue> kvCursor = KeyValueCursor.Builder.withSubspace(keySubspace).setContext(context).setScanProperties(ScanProperties.FORWARD_SCAN).build().asIterator();
List<Long> indexes = new ArrayList<>(keyCount);
byte[] versionBytes = null;
byte[] valueBytes = null;
while (kvCursor.hasNext()) {
KeyValue kv = kvCursor.next();
Tuple suffix = keySubspace.unpack(kv.getKey());
if (omitUnsplitSuffix) {
assertThat(suffix.isEmpty(), is(true));
valueBytes = kv.getValue();
} else {
Long index = suffix.getLong(0);
indexes.add(index);
if (index == SplitHelper.RECORD_VERSION) {
versionBytes = kv.getValue();
} else {
if (valueBytes == null) {
valueBytes = kv.getValue();
} else {
valueBytes = ByteArrayUtil.join(valueBytes, kv.getValue());
}
}
}
}
List<Long> expectedIndexes;
if (omitUnsplitSuffix) {
expectedIndexes = Collections.emptyList();
} else {
expectedIndexes = new ArrayList<>(keyCount);
if (version != null && version.isComplete()) {
expectedIndexes.add(SplitHelper.RECORD_VERSION);
}
if (!isSplit) {
expectedIndexes.add(SplitHelper.UNSPLIT_RECORD);
} else {
LongStream.range(SplitHelper.START_SPLIT_RECORD, SplitHelper.START_SPLIT_RECORD + dataKeyCount).forEach(expectedIndexes::add);
}
}
assertEquals(expectedIndexes, indexes);
assertNotNull(valueBytes);
assertArrayEquals(serialized, valueBytes);
if (version != null) {
if (!version.isComplete()) {
assertNull(versionBytes);
} else {
assertNotNull(versionBytes);
assertEquals(version, FDBRecordVersion.fromVersionstamp(Tuple.fromBytes(versionBytes).getVersionstamp(0)));
}
} else {
assertNull(versionBytes);
}
return sizeInfo;
}
use of com.apple.foundationdb.tuple.Tuple in project fdb-record-layer by FoundationDB.
the class OnlineIndexScrubberTest method testScrubberSimpleMissing.
@Test
void testScrubberSimpleMissing() throws ExecutionException, InterruptedException {
final FDBStoreTimer timer = new FDBStoreTimer();
final long numRecords = 50;
long res;
Index tgtIndex = new Index("tgt_index", field("num_value_2"), EmptyKeyExpression.EMPTY, IndexTypes.VALUE, IndexOptions.UNIQUE_OPTIONS);
FDBRecordStoreTestBase.RecordMetaDataHook hook = myHook(tgtIndex);
openSimpleMetaData();
populateData(numRecords);
openSimpleMetaData(hook);
buildIndex(tgtIndex);
try (OnlineIndexScrubber indexScrubber = OnlineIndexScrubber.newBuilder().setDatabase(fdb).setMetaData(metaData).setIndex(tgtIndex).setSubspace(subspace).setScrubbingPolicy(OnlineIndexScrubber.ScrubbingPolicy.newBuilder().setLogWarningsLimit(Integer.MAX_VALUE).setAllowRepair(false).build()).setTimer(timer).build()) {
res = indexScrubber.scrubMissingIndexEntries();
}
assertEquals(numRecords, timer.getCount(FDBStoreTimer.Counts.ONLINE_INDEX_BUILDER_RECORDS_SCANNED));
assertEquals(0, timer.getCount(FDBStoreTimer.Counts.ONLINE_INDEX_BUILDER_RECORDS_INDEXED));
assertEquals(0, timer.getCount(FDBStoreTimer.Counts.INDEX_SCRUBBER_MISSING_ENTRIES));
assertEquals(0, res);
// manually delete a few index entries
openSimpleMetaData(hook);
int missingCount = 0;
try (FDBRecordContext context = openContext()) {
List<IndexEntry> indexEntries = recordStore.scanIndex(tgtIndex, IndexScanType.BY_VALUE, TupleRange.ALL, null, ScanProperties.FORWARD_SCAN).asList().get();
for (int i = 3; i < numRecords; i *= 2) {
final IndexEntry indexEntry = indexEntries.get(i);
final Tuple valueKey = indexEntry.getKey();
final byte[] keyBytes = recordStore.indexSubspace(tgtIndex).pack(valueKey);
recordStore.getContext().ensureActive().clear(keyBytes);
missingCount++;
}
context.commit();
}
// verify the missing entries are found and fixed
timer.reset();
try (OnlineIndexScrubber indexScrubber = OnlineIndexScrubber.newBuilder().setDatabase(fdb).setMetaData(metaData).setIndex(tgtIndex).setSubspace(subspace).setScrubbingPolicy(OnlineIndexScrubber.ScrubbingPolicy.newBuilder().build()).setTimer(timer).build()) {
res = indexScrubber.scrubMissingIndexEntries();
}
assertEquals(numRecords, timer.getCount(FDBStoreTimer.Counts.ONLINE_INDEX_BUILDER_RECORDS_SCANNED));
assertEquals(missingCount, timer.getCount(FDBStoreTimer.Counts.ONLINE_INDEX_BUILDER_RECORDS_INDEXED));
assertEquals(missingCount, timer.getCount(FDBStoreTimer.Counts.INDEX_SCRUBBER_MISSING_ENTRIES));
assertEquals(missingCount, res);
// now verify it's fixed
timer.reset();
try (OnlineIndexScrubber indexScrubber = OnlineIndexScrubber.newBuilder().setDatabase(fdb).setMetaData(metaData).setIndex(tgtIndex).setSubspace(subspace).setScrubbingPolicy(OnlineIndexScrubber.ScrubbingPolicy.newBuilder().build()).setTimer(timer).build()) {
res = indexScrubber.scrubMissingIndexEntries();
}
assertEquals(numRecords, timer.getCount(FDBStoreTimer.Counts.ONLINE_INDEX_BUILDER_RECORDS_SCANNED));
assertEquals(0, timer.getCount(FDBStoreTimer.Counts.ONLINE_INDEX_BUILDER_RECORDS_INDEXED));
assertEquals(0, timer.getCount(FDBStoreTimer.Counts.INDEX_SCRUBBER_MISSING_ENTRIES));
assertEquals(0, res);
}
Aggregations