use of com.apple.foundationdb.async.RangeSet in project fdb-record-layer by FoundationDB.
the class OnlineIndexerSimpleTest method testMarkReadableClearsBuiltRanges.
@Test
public void testMarkReadableClearsBuiltRanges() {
List<TestRecords1Proto.MySimpleRecord> records = LongStream.range(0, 200).mapToObj(val -> TestRecords1Proto.MySimpleRecord.newBuilder().setRecNo(val).setNumValue2((int) val + 1).build()).collect(Collectors.toList());
Index index = new Index("newIndex", field("num_value_2").ungrouped(), IndexTypes.SUM);
IndexAggregateFunction aggregateFunction = new IndexAggregateFunction(FunctionNames.SUM, index.getRootExpression(), index.getName());
List<String> indexTypes = Collections.singletonList("MySimpleRecord");
FDBRecordStoreTestBase.RecordMetaDataHook hook = metaDataBuilder -> metaDataBuilder.addIndex("MySimpleRecord", index);
openSimpleMetaData();
try (FDBRecordContext context = openContext()) {
records.forEach(recordStore::saveRecord);
context.commit();
}
openSimpleMetaData(hook);
try (OnlineIndexer indexer = OnlineIndexer.newBuilder().setDatabase(fdb).setMetaData(metaData).setIndex(index).setSubspace(subspace).build()) {
indexer.buildIndex(true);
}
openSimpleMetaData(hook);
try (FDBRecordContext context = openContext()) {
// Verify rangeSet is cleared when index is marked readable
final RangeSet rangeSet = new RangeSet(recordStore.indexRangeSubspace(index));
AsyncIterator<Range> ranges = rangeSet.missingRanges(recordStore.ensureContextActive()).iterator();
final Range range = ranges.next();
final boolean range1IsEmpty = RangeSet.isFirstKey(range.begin) && RangeSet.isFinalKey(range.end);
assertTrue(range1IsEmpty);
// fake commit, happy compiler
context.commit();
}
}
use of com.apple.foundationdb.async.RangeSet in project fdb-record-layer by FoundationDB.
the class OnlineIndexerSimpleTest method buildEndpointIdempotency.
@Test
public void buildEndpointIdempotency() {
List<TestRecords1Proto.MySimpleRecord> records = LongStream.range(0, 10).mapToObj(val -> TestRecords1Proto.MySimpleRecord.newBuilder().setRecNo(val).setNumValue2((int) val + 1).build()).collect(Collectors.toList());
Index index = new Index("simple$value_2", field("num_value_2").ungrouped(), IndexTypes.SUM);
IndexAggregateFunction aggregateFunction = new IndexAggregateFunction(FunctionNames.SUM, index.getRootExpression(), index.getName());
List<String> indexTypes = Collections.singletonList("MySimpleRecord");
FDBRecordStoreTestBase.RecordMetaDataHook hook = metaDataBuilder -> metaDataBuilder.addIndex("MySimpleRecord", index);
final Supplier<Tuple> getAggregate = () -> {
Tuple ret;
try (FDBRecordContext context = openContext()) {
assertTrue(recordStore.uncheckedMarkIndexReadable(index.getName()).join());
FDBRecordStore recordStore2 = recordStore.asBuilder().setContext(context).uncheckedOpen();
ret = recordStore2.evaluateAggregateFunction(indexTypes, aggregateFunction, TupleRange.ALL, IsolationLevel.SERIALIZABLE).join();
// Do NOT commit the change.
}
return ret;
};
openSimpleMetaData();
try (FDBRecordContext context = openContext()) {
records.forEach(recordStore::saveRecord);
context.commit();
}
openSimpleMetaData(hook);
try (FDBRecordContext context = openContext()) {
recordStore.markIndexWriteOnly(index).join();
context.commit();
}
try (OnlineIndexer indexBuilder = OnlineIndexer.newBuilder().setDatabase(fdb).setMetaData(metaData).setIndex(index).setSubspace(subspace).build()) {
final RangeSet rangeSet = new RangeSet(recordStore.indexRangeSubspace(index));
// Build the endpoints
TupleRange range = indexBuilder.buildEndpoints().join();
assertEquals(Tuple.from(0L), range.getLow());
assertEquals(Tuple.from(9L), range.getHigh());
assertEquals(Tuple.from(10L), getAggregate.get());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database(), null, Tuple.from(0L).pack()).join());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database(), Tuple.from(9L).pack(), null).join());
List<Range> middleRanges = rangeSet.missingRanges(fdb.database()).join();
assertEquals(Collections.singletonList(Tuple.from(0L)), middleRanges.stream().map(r -> Tuple.fromBytes(r.begin)).collect(Collectors.toList()));
assertEquals(Collections.singletonList(Tuple.from(9L)), middleRanges.stream().map(r -> Tuple.fromBytes(r.end)).collect(Collectors.toList()));
// Make sure running this again doesn't change anything.
range = indexBuilder.buildEndpoints().join();
assertEquals(Tuple.from(0L), range.getLow());
assertEquals(Tuple.from(9L), range.getHigh());
assertEquals(Tuple.from(10L), getAggregate.get());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database(), null, Tuple.from(0L).pack()).join());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database(), Tuple.from(9L).pack(), null).join());
middleRanges = rangeSet.missingRanges(fdb.database()).join();
assertEquals(Collections.singletonList(Tuple.from(0L)), middleRanges.stream().map(r -> Tuple.fromBytes(r.begin)).collect(Collectors.toList()));
assertEquals(Collections.singletonList(Tuple.from(9L)), middleRanges.stream().map(r -> Tuple.fromBytes(r.end)).collect(Collectors.toList()));
// Remove the first and last records.
try (FDBRecordContext context = openContext()) {
recordStore.deleteRecord(Tuple.from(0L));
recordStore.deleteRecord(Tuple.from(9L));
context.commit();
}
assertEquals(Tuple.from(0L), getAggregate.get());
// Rerun endpoints with new data.
range = indexBuilder.buildEndpoints().join();
assertEquals(Tuple.from(1L), range.getLow());
assertEquals(Tuple.from(8L), range.getHigh());
assertEquals(Tuple.from(9L), getAggregate.get());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database(), null, Tuple.from(1L).pack()).join());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database(), Tuple.from(8L).pack(), null).join());
middleRanges = rangeSet.missingRanges(fdb.database()).join();
assertEquals(Collections.singletonList(Tuple.from(1L)), middleRanges.stream().map(r -> Tuple.fromBytes(r.begin)).collect(Collectors.toList()));
assertEquals(Collections.singletonList(Tuple.from(8L)), middleRanges.stream().map(r -> Tuple.fromBytes(r.end)).collect(Collectors.toList()));
// Run it again to show that nothing has happened.
range = indexBuilder.buildEndpoints().join();
assertEquals(Tuple.from(1L), range.getLow());
assertEquals(Tuple.from(8L), range.getHigh());
assertEquals(Tuple.from(9L), getAggregate.get());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database(), null, Tuple.from(1L).pack()).join());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database(), Tuple.from(8L).pack(), null).join());
middleRanges = rangeSet.missingRanges(fdb.database()).join();
assertEquals(Collections.singletonList(Tuple.from(1L)), middleRanges.stream().map(r -> Tuple.fromBytes(r.begin)).collect(Collectors.toList()));
assertEquals(Collections.singletonList(Tuple.from(8L)), middleRanges.stream().map(r -> Tuple.fromBytes(r.end)).collect(Collectors.toList()));
// Add back the previous first and last records.
try (FDBRecordContext context = openContext()) {
recordStore.saveRecord(TestRecords1Proto.MySimpleRecord.newBuilder().setRecNo(0L).setNumValue2(1).build());
recordStore.saveRecord(TestRecords1Proto.MySimpleRecord.newBuilder().setRecNo(9L).setNumValue2(10).build());
context.commit();
}
assertEquals(Tuple.from(20L), getAggregate.get());
// Rerun endpoints with new data.
range = indexBuilder.buildEndpoints().join();
assertEquals(Tuple.from(0L), range.getLow());
assertEquals(Tuple.from(9L), range.getHigh());
assertEquals(Tuple.from(20L), getAggregate.get());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database(), null, Tuple.from(1L).pack()).join());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database(), Tuple.from(8L).pack(), null).join());
middleRanges = rangeSet.missingRanges(fdb.database()).join();
assertEquals(Collections.singletonList(Tuple.from(1L)), middleRanges.stream().map(r -> Tuple.fromBytes(r.begin)).collect(Collectors.toList()));
assertEquals(Collections.singletonList(Tuple.from(8L)), middleRanges.stream().map(r -> Tuple.fromBytes(r.end)).collect(Collectors.toList()));
// Run it again to show that nothing has happened.
range = indexBuilder.buildEndpoints().join();
assertEquals(Tuple.from(0L), range.getLow());
assertEquals(Tuple.from(9L), range.getHigh());
assertEquals(Tuple.from(20L), getAggregate.get());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database(), null, Tuple.from(1L).pack()).join());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database(), Tuple.from(8L).pack(), null).join());
middleRanges = rangeSet.missingRanges(fdb.database()).join();
assertEquals(Collections.singletonList(Tuple.from(1L)), middleRanges.stream().map(r -> Tuple.fromBytes(r.begin)).collect(Collectors.toList()));
assertEquals(Collections.singletonList(Tuple.from(8L)), middleRanges.stream().map(r -> Tuple.fromBytes(r.end)).collect(Collectors.toList()));
// Straight up build the whole index.
indexBuilder.buildIndex(false);
assertEquals(Tuple.from(55L), getAggregate.get());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database()).join());
}
}
use of com.apple.foundationdb.async.RangeSet in project fdb-record-layer by FoundationDB.
the class OnlineIndexerBuildIndexTest method singleRebuild.
void singleRebuild(@Nonnull List<TestRecords1Proto.MySimpleRecord> records, @Nullable List<TestRecords1Proto.MySimpleRecord> recordsWhileBuilding, int agents, boolean overlap, boolean splitLongRecords, @Nonnull Index index, @Nonnull Runnable beforeBuild, @Nonnull Runnable afterBuild, @Nonnull Runnable afterReadable) {
LOGGER.info(KeyValueLogMessage.of("beginning rebuild test", TestLogMessageKeys.RECORDS, records.size(), LogMessageKeys.RECORDS_WHILE_BUILDING, recordsWhileBuilding == null ? 0 : recordsWhileBuilding.size(), TestLogMessageKeys.AGENTS, agents, TestLogMessageKeys.OVERLAP, overlap, TestLogMessageKeys.SPLIT_LONG_RECORDS, splitLongRecords, TestLogMessageKeys.INDEX, index));
final FDBStoreTimer timer = new FDBStoreTimer();
final FDBRecordStoreTestBase.RecordMetaDataHook onlySplitHook = metaDataBuilder -> {
if (splitLongRecords) {
metaDataBuilder.setSplitLongRecords(true);
metaDataBuilder.removeIndex("MySimpleRecord$str_value_indexed");
}
};
final FDBRecordStoreTestBase.RecordMetaDataHook hook = metaDataBuilder -> {
onlySplitHook.apply(metaDataBuilder);
metaDataBuilder.addIndex("MySimpleRecord", index);
};
LOGGER.info(KeyValueLogMessage.of("inserting elements prior to test", TestLogMessageKeys.RECORDS, records.size()));
openSimpleMetaData(onlySplitHook);
try (FDBRecordContext context = openContext()) {
for (TestRecords1Proto.MySimpleRecord record : records) {
// Check presence first to avoid overwriting version information of previously added records.
Tuple primaryKey = Tuple.from(record.getRecNo());
if (recordStore.loadRecord(primaryKey) == null) {
recordStore.saveRecord(record);
}
}
context.commit();
}
LOGGER.info(KeyValueLogMessage.of("running before build for test"));
beforeBuild.run();
openSimpleMetaData(hook);
LOGGER.info(KeyValueLogMessage.of("adding index", TestLogMessageKeys.INDEX, index));
openSimpleMetaData(hook);
final boolean isAlwaysReadable;
try (FDBRecordContext context = openContext()) {
// care of by OnlineIndexer.
if (!safeBuild) {
LOGGER.info(KeyValueLogMessage.of("marking write-only", TestLogMessageKeys.INDEX, index));
recordStore.clearAndMarkIndexWriteOnly(index).join();
}
isAlwaysReadable = safeBuild && recordStore.isIndexReadable(index);
context.commit();
}
LOGGER.info(KeyValueLogMessage.of("creating online index builder", TestLogMessageKeys.INDEX, index, TestLogMessageKeys.RECORD_TYPES, metaData.recordTypesForIndex(index), LogMessageKeys.SUBSPACE, ByteArrayUtil2.loggable(subspace.pack()), LogMessageKeys.LIMIT, 20, TestLogMessageKeys.RECORDS_PER_SECOND, OnlineIndexer.DEFAULT_RECORDS_PER_SECOND * 100));
final OnlineIndexer.Builder builder = OnlineIndexer.newBuilder().setDatabase(fdb).setMetaData(metaData).setIndex(index).setSubspace(subspace).setConfigLoader(old -> {
OnlineIndexer.Config.Builder conf = OnlineIndexer.Config.newBuilder().setMaxLimit(20).setMaxRetries(Integer.MAX_VALUE).setRecordsPerSecond(OnlineIndexer.DEFAULT_RECORDS_PER_SECOND * 100);
if (ThreadLocalRandom.current().nextBoolean()) {
// randomly enable the progress logging to ensure that it doesn't throw exceptions,
// or otherwise disrupt the build.
LOGGER.info("Setting progress log interval");
conf.setProgressLogIntervalMillis(0);
}
return conf.build();
}).setTimer(timer);
if (ThreadLocalRandom.current().nextBoolean()) {
LOGGER.info("Setting priority to DEFAULT");
builder.setPriority(FDBTransactionPriority.DEFAULT);
}
if (fdb.isTrackLastSeenVersion()) {
LOGGER.info("Setting weak read semantics");
builder.setWeakReadSemantics(new FDBDatabase.WeakReadSemantics(0L, Long.MAX_VALUE, true));
}
if (!safeBuild) {
builder.setIndexingPolicy(OnlineIndexer.IndexingPolicy.newBuilder().setIfDisabled(OnlineIndexer.IndexingPolicy.DesiredAction.ERROR).setIfMismatchPrevious(OnlineIndexer.IndexingPolicy.DesiredAction.ERROR));
builder.setUseSynchronizedSession(false);
}
try (OnlineIndexer indexBuilder = builder.build()) {
CompletableFuture<Void> buildFuture;
LOGGER.info(KeyValueLogMessage.of("building index", TestLogMessageKeys.INDEX, index, TestLogMessageKeys.AGENT, agents, LogMessageKeys.RECORDS_WHILE_BUILDING, recordsWhileBuilding == null ? 0 : recordsWhileBuilding.size(), TestLogMessageKeys.OVERLAP, overlap));
if (agents == 1) {
buildFuture = indexBuilder.buildIndexAsync(false);
} else {
if (overlap) {
CompletableFuture<?>[] futures = new CompletableFuture<?>[agents];
for (int i = 0; i < agents; i++) {
final int agent = i;
futures[i] = safeBuild ? indexBuilder.buildIndexAsync(false).exceptionally(exception -> {
// because the other one is already working on building the index.
if (exception.getCause() instanceof SynchronizedSessionLockedException) {
LOGGER.info(KeyValueLogMessage.of("Detected another worker processing this index", TestLogMessageKeys.INDEX, index, TestLogMessageKeys.AGENT, agent), exception);
return null;
} else {
throw new CompletionException(exception);
}
}) : indexBuilder.buildIndexAsync(false);
}
buildFuture = CompletableFuture.allOf(futures);
} else {
// Safe builds do not support building ranges yet.
assumeFalse(safeBuild);
buildFuture = indexBuilder.buildEndpoints().thenCompose(tupleRange -> {
if (tupleRange != null) {
long start = tupleRange.getLow().getLong(0);
long end = tupleRange.getHigh().getLong(0);
CompletableFuture<?>[] futures = new CompletableFuture<?>[agents];
for (int i = 0; i < agents; i++) {
long itrStart = start + (end - start) / agents * i;
long itrEnd = (i == agents - 1) ? end : start + (end - start) / agents * (i + 1);
LOGGER.info(KeyValueLogMessage.of("building range", TestLogMessageKeys.INDEX, index, TestLogMessageKeys.AGENT, i, TestLogMessageKeys.BEGIN, itrStart, TestLogMessageKeys.END, itrEnd));
futures[i] = indexBuilder.buildRange(Key.Evaluated.scalar(itrStart), Key.Evaluated.scalar(itrEnd));
}
return CompletableFuture.allOf(futures);
} else {
return AsyncUtil.DONE;
}
});
}
}
if (safeBuild) {
buildFuture = MoreAsyncUtil.composeWhenComplete(buildFuture, (result, ex) -> indexBuilder.checkAnyOngoingOnlineIndexBuildsAsync().thenAccept(Assertions::assertFalse), fdb::mapAsyncToSyncException);
}
if (recordsWhileBuilding != null && recordsWhileBuilding.size() > 0) {
int i = 0;
while (i < recordsWhileBuilding.size()) {
List<TestRecords1Proto.MySimpleRecord> thisBatch = recordsWhileBuilding.subList(i, Math.min(i + 30, recordsWhileBuilding.size()));
fdb.run(context -> {
FDBRecordStore store = recordStore.asBuilder().setContext(context).build();
thisBatch.forEach(store::saveRecord);
return null;
});
i += 30;
}
}
buildFuture.join();
// if a record is added to a range that has already been built, it will not be counted, otherwise,
// it will.
long additionalScans = 0;
if (recordsWhileBuilding != null && recordsWhileBuilding.size() > 0) {
additionalScans += (long) recordsWhileBuilding.size();
}
try (FDBRecordContext context = openContext()) {
IndexBuildState indexBuildState = context.asyncToSync(FDBStoreTimer.Waits.WAIT_GET_INDEX_BUILD_STATE, IndexBuildState.loadIndexBuildStateAsync(recordStore, index));
IndexState indexState = indexBuildState.getIndexState();
if (isAlwaysReadable) {
assertEquals(IndexState.READABLE, indexState);
} else {
assertEquals(IndexState.WRITE_ONLY, indexState);
assertEquals(indexBuilder.getTotalRecordsScanned(), indexBuildState.getRecordsScanned());
// Count index is not defined so we cannot determine the records in total from it.
assertNull(indexBuildState.getRecordsInTotal());
}
}
assertThat(indexBuilder.getTotalRecordsScanned(), allOf(greaterThanOrEqualTo((long) records.size()), lessThanOrEqualTo((long) records.size() + additionalScans)));
}
KeyValueLogMessage msg = KeyValueLogMessage.build("building index - completed", TestLogMessageKeys.INDEX, index);
msg.addKeysAndValues(timer.getKeysAndValues());
LOGGER.info(msg.toString());
LOGGER.info(KeyValueLogMessage.of("running post build checks", TestLogMessageKeys.INDEX, index));
// uses the index in quereis since the index is readable.
if (!isAlwaysReadable) {
afterBuild.run();
}
LOGGER.info(KeyValueLogMessage.of("verifying range set emptiness", TestLogMessageKeys.INDEX, index));
try (FDBRecordContext context = openContext()) {
RangeSet rangeSet = new RangeSet(recordStore.indexRangeSubspace(metaData.getIndex(index.getName())));
System.out.println("Range set for " + records.size() + " records:\n" + rangeSet.rep(context.ensureActive()).join());
if (!isAlwaysReadable) {
assertEquals(Collections.emptyList(), rangeSet.missingRanges(context.ensureActive()).asList().join());
}
context.commit();
}
LOGGER.info(KeyValueLogMessage.of("marking index readable", TestLogMessageKeys.INDEX, index));
try (FDBRecordContext context = openContext()) {
boolean updated = recordStore.markIndexReadable(index).join();
if (isAlwaysReadable) {
assertFalse(updated);
} else {
assertTrue(updated);
}
context.commit();
}
afterReadable.run();
LOGGER.info(KeyValueLogMessage.of("ending rebuild test", TestLogMessageKeys.RECORDS, records.size(), LogMessageKeys.RECORDS_WHILE_BUILDING, recordsWhileBuilding == null ? 0 : recordsWhileBuilding.size(), TestLogMessageKeys.AGENTS, agents, TestLogMessageKeys.OVERLAP, overlap, TestLogMessageKeys.SPLIT_LONG_RECORDS, splitLongRecords, TestLogMessageKeys.INDEX, index));
}
use of com.apple.foundationdb.async.RangeSet in project fdb-record-layer by FoundationDB.
the class FDBRestrictedIndexQueryTest method queryAggregateWithWriteOnly.
/**
* Verify that write-only aggregate indexes are not used by the planner.
* Verify that re-allowing reads to those indexes allows the planner to use them.
* TODO: Abstract out common code in queryWithWriteOnly, queryWithDisabled, queryAggregateWithWriteOnly and queryAggregateWithDisabled (https://github.com/FoundationDB/fdb-record-layer/issues/4)
*/
@Test
void queryAggregateWithWriteOnly() throws Exception {
Index sumIndex = new Index("value3sum", field("num_value_3_indexed").ungrouped(), IndexTypes.SUM);
Index maxIndex = new Index("value3max", field("num_value_3_indexed").ungrouped(), IndexTypes.MAX_EVER_TUPLE);
RecordMetaDataHook hook = metaData -> {
metaData.addIndex("MySimpleRecord", sumIndex);
metaData.addIndex("MySimpleRecord", maxIndex);
};
try (FDBRecordContext context = openContext()) {
openSimpleRecordStore(context, hook);
recordStore.deleteAllRecords();
recordStore.clearAndMarkIndexWriteOnly("value3sum").join();
recordStore.clearAndMarkIndexWriteOnly("value3max").join();
RangeSet rangeSet = new RangeSet(recordStore.indexRangeSubspace(sumIndex));
rangeSet.insertRange(context.ensureActive(), Tuple.from(1000).pack(), Tuple.from(1500).pack(), true).get();
saveSimpleRecord(1066, 42);
saveSimpleRecord(1776, 100);
assertThrowsAggregateFunctionNotSupported(() -> recordStore.evaluateAggregateFunction(Collections.singletonList("MySimpleRecord"), new IndexAggregateFunction(FunctionNames.SUM, sumIndex.getRootExpression(), sumIndex.getName()), TupleRange.ALL, IsolationLevel.SERIALIZABLE).get(), "value3sum.sum(Field { 'num_value_3_indexed' None} group 1)");
assertThrowsAggregateFunctionNotSupported(() -> recordStore.evaluateAggregateFunction(Collections.singletonList("MySimpleRecord"), new IndexAggregateFunction(FunctionNames.MAX_EVER, maxIndex.getRootExpression(), maxIndex.getName()), TupleRange.ALL, IsolationLevel.SERIALIZABLE).get(), "value3max.max_ever(Field { 'num_value_3_indexed' None} group 1)");
commit(context);
}
try (FDBRecordContext context = openContext()) {
openSimpleRecordStore(context, hook);
recordStore.uncheckedMarkIndexReadable("value3sum").join();
recordStore.uncheckedMarkIndexReadable("value3max").join();
// Unsafe: made readable without building indexes, which is why sum gets wrong answer.
assertEquals(42L, recordStore.evaluateAggregateFunction(Collections.singletonList("MySimpleRecord"), new IndexAggregateFunction(FunctionNames.SUM, sumIndex.getRootExpression(), sumIndex.getName()), TupleRange.ALL, IsolationLevel.SERIALIZABLE).get().getLong(0));
assertEquals(100L, recordStore.evaluateAggregateFunction(Collections.singletonList("MySimpleRecord"), new IndexAggregateFunction(FunctionNames.MAX_EVER, maxIndex.getRootExpression(), maxIndex.getName()), TupleRange.ALL, IsolationLevel.SERIALIZABLE).get().getLong(0));
recordStore.rebuildAllIndexes().get();
assertEquals(142L, recordStore.evaluateAggregateFunction(Collections.singletonList("MySimpleRecord"), new IndexAggregateFunction(FunctionNames.SUM, sumIndex.getRootExpression(), sumIndex.getName()), TupleRange.ALL, IsolationLevel.SERIALIZABLE).get().getLong(0));
assertEquals(100L, recordStore.evaluateAggregateFunction(Collections.singletonList("MySimpleRecord"), new IndexAggregateFunction(FunctionNames.MAX_EVER, maxIndex.getRootExpression(), maxIndex.getName()), TupleRange.ALL, IsolationLevel.SERIALIZABLE).get().getLong(0));
}
}
use of com.apple.foundationdb.async.RangeSet in project fdb-record-layer by FoundationDB.
the class FDBRecordStore method firstUnbuiltRange.
/**
* Returns the first unbuilt range of an index that is currently being bulit.
* If there is no range that is currently unbuilt, it will return an
* empty {@link Optional}. If there is one, it will return an {@link Optional}
* set to the first unbuilt range it finds.
* @param index the index to check built state
* @return a future that will contain the first unbuilt range if any
*/
@Nonnull
public CompletableFuture<Optional<Range>> firstUnbuiltRange(@Nonnull Index index) {
if (!getRecordMetaData().hasIndex(index.getName())) {
throw new MetaDataException("Index " + index.getName() + " does not exist in meta-data.");
}
Transaction tr = ensureContextActive();
RangeSet rangeSet = new RangeSet(indexRangeSubspace(index));
AsyncIterator<Range> missingRangeIterator = rangeSet.missingRanges(tr, null, null, 1).iterator();
return missingRangeIterator.onHasNext().thenApply(hasFirst -> {
if (hasFirst) {
return Optional.of(missingRangeIterator.next());
} else {
return Optional.empty();
}
});
}
Aggregations