use of com.apple.foundationdb.record.TupleRange in project fdb-record-layer by FoundationDB.
the class FDBRecordStoreBase method scanIndexRecordsEqual.
/**
* Scan the records pointed to by an index equal to indexed values.
* @param indexName the name of the index
* @param values a left-subset of values of indexed fields
* @return a cursor that return records pointed to by the index
*/
@Nonnull
default RecordCursor<FDBIndexedRecord<M>> scanIndexRecordsEqual(@Nonnull final String indexName, @Nonnull final Object... values) {
final Tuple tuple = Tuple.from(values);
final TupleRange range = TupleRange.allOf(tuple);
return scanIndexRecords(indexName, IndexScanType.BY_VALUE, range, null, ScanProperties.FORWARD_SCAN);
}
use of com.apple.foundationdb.record.TupleRange in project fdb-record-layer by FoundationDB.
the class TimeWindowLeaderboardIndexMaintainer method scan.
@Nonnull
@Override
public RecordCursor<IndexEntry> scan(@Nonnull IndexScanBounds scanBounds, @Nullable byte[] continuation, @Nonnull ScanProperties scanProperties) {
final IndexScanType scanType = scanBounds.getScanType();
if (scanType != IndexScanType.BY_VALUE && scanType != IndexScanType.BY_RANK && scanType != IndexScanType.BY_TIME_WINDOW) {
throw new RecordCoreException("Can only scan leaderboard index by time window, rank or value.");
}
// Decode range arguments.
final int type;
final long timestamp;
final TupleRange leaderboardRange;
if (scanType == IndexScanType.BY_TIME_WINDOW) {
// Get oldest leaderboard of type containing timestamp.
if (scanBounds instanceof TimeWindowScanRange) {
TimeWindowScanRange scanRange = (TimeWindowScanRange) scanBounds;
type = scanRange.getLeaderboardType();
timestamp = scanRange.getLeaderboardTimestamp();
leaderboardRange = scanRange.getScanRange();
} else {
// TODO: For compatibility, accept scan with BY_TIME_WINDOW and TupleRange for a while.
// This code can be removed when we are confident all callers have been converted.
IndexScanRange scanRange = (IndexScanRange) scanBounds;
TupleRange rankRange = scanRange.getScanRange();
final Tuple lowRank = rankRange.getLow();
final Tuple highRank = rankRange.getHigh();
type = (int) lowRank.getLong(0);
timestamp = lowRank.getLong(1);
leaderboardRange = new TupleRange(Tuple.fromList(lowRank.getItems().subList(2, lowRank.size())), Tuple.fromList(highRank.getItems().subList(2, highRank.size())), rankRange.getLowEndpoint(), rankRange.getHighEndpoint());
}
} else {
// Get the all-time leaderboard for unqualified rank or value.
IndexScanRange scanRange = (IndexScanRange) scanBounds;
type = TimeWindowLeaderboard.ALL_TIME_LEADERBOARD_TYPE;
// Any value would do.
timestamp = 0;
leaderboardRange = scanRange.getScanRange();
}
final int groupPrefixSize = getGroupingCount();
final CompletableFuture<TimeWindowLeaderboard> leaderboardFuture = oldestLeaderboardMatching(type, timestamp);
final CompletableFuture<TupleRange> scoreRangeFuture;
if (scanType == IndexScanType.BY_VALUE) {
scoreRangeFuture = leaderboardFuture.thenApply(leaderboard -> leaderboard == null ? null : leaderboardRange);
} else {
scoreRangeFuture = leaderboardFuture.thenCompose(leaderboard -> {
if (leaderboard == null) {
return CompletableFuture.completedFuture(null);
}
final Subspace extraSubspace = getSecondarySubspace();
final Subspace leaderboardSubspace = extraSubspace.subspace(leaderboard.getSubspaceKey());
final RankedSet.Config leaderboardConfig = config.toBuilder().setNLevels(leaderboard.getNLevels()).build();
return RankedSetIndexHelper.rankRangeToScoreRange(state, groupPrefixSize, leaderboardSubspace, leaderboardConfig, leaderboardRange);
});
}
// Add leaderboard's key to the front and take it off of the results.
return RecordCursor.flatMapPipelined(ignore -> RecordCursor.fromFuture(getExecutor(), scoreRangeFuture), (scoreRange, ignore) -> {
if (scoreRange == null) {
return RecordCursor.empty(getExecutor());
}
// Already waited in scoreRangeFuture.
final TimeWindowLeaderboard leaderboard = state.context.joinNow(leaderboardFuture);
final CompletableFuture<Boolean> highStoreFirstFuture;
if (scanType == IndexScanType.BY_VALUE) {
final Tuple lowGroup = scoreRange.getLow() != null && scoreRange.getLow().size() > groupPrefixSize ? TupleHelpers.subTuple(scoreRange.getLow(), 0, groupPrefixSize) : null;
final Tuple highGroup = scoreRange.getHigh() != null && scoreRange.getHigh().size() > groupPrefixSize ? TupleHelpers.subTuple(scoreRange.getHigh(), 0, groupPrefixSize) : null;
if (lowGroup != null && lowGroup.equals(highGroup)) {
highStoreFirstFuture = isHighScoreFirst(leaderboard.getDirectory(), lowGroup);
} else {
highStoreFirstFuture = CompletableFuture.completedFuture(leaderboard.getDirectory().isHighScoreFirst());
}
} else {
highStoreFirstFuture = AsyncUtil.READY_FALSE;
}
if (highStoreFirstFuture.isDone()) {
return scanLeaderboard(leaderboard, state.context.joinNow(highStoreFirstFuture), scoreRange, continuation, scanProperties);
} else {
return RecordCursor.flatMapPipelined(ignore2 -> RecordCursor.fromFuture(getExecutor(), highStoreFirstFuture), (highScoreFirst, ignore2) -> scanLeaderboard(leaderboard, highScoreFirst, scoreRange, continuation, scanProperties), null, 1);
}
}, null, 1).mapPipelined(kv -> getIndexEntry(kv, groupPrefixSize, state.context.joinNow(leaderboardFuture).getDirectory()), 1);
}
use of com.apple.foundationdb.record.TupleRange in project fdb-record-layer by FoundationDB.
the class FDBRecordStoreIndexTest method scanIndexWithValue.
/**
* Verify that explicit (i.e. bypassing the planner) index scans work .
*/
@Test
public void scanIndexWithValue() throws Exception {
RecordMetaDataHook hook = metaData -> {
metaData.removeIndex("MySimpleRecord$num_value_unique");
metaData.addIndex("MySimpleRecord", new Index("multi_index_value", Key.Expressions.field("num_value_unique"), Key.Expressions.field("num_value_2"), IndexTypes.VALUE, IndexOptions.UNIQUE_OPTIONS));
};
complexQuerySetup(hook);
try (FDBRecordContext context = openContext()) {
openSimpleRecordStore(context, hook);
int i = 0;
try (RecordCursorIterator<IndexEntry> cursor = recordStore.scanIndex(recordStore.getRecordMetaData().getIndex("multi_index_value"), IndexScanType.BY_VALUE, new TupleRange(Tuple.from(900L), Tuple.from(950L), EndpointType.RANGE_INCLUSIVE, EndpointType.RANGE_INCLUSIVE), null, ScanProperties.FORWARD_SCAN).asIterator()) {
while (cursor.hasNext()) {
IndexEntry tuples = cursor.next();
Tuple key = tuples.getKey();
Tuple value = tuples.getValue();
assertEquals(2, key.size());
assertEquals(1, value.size());
assertTrue(key.getLong(0) >= 900);
assertTrue(key.getLong(0) <= 950);
assertTrue(value.getLong(0) == (999 - i) % 3);
i++;
}
}
assertEquals(50, i);
assertDiscardedNone(context);
}
}
use of com.apple.foundationdb.record.TupleRange in project fdb-record-layer by FoundationDB.
the class GeophileSpatialJoin method getSpatialIndex.
@Nonnull
public SpatialIndex<GeophileRecordImpl> getSpatialIndex(@Nonnull String indexName, @Nonnull ScanComparisons prefixComparisons, @Nonnull BiFunction<IndexEntry, Tuple, GeophileRecordImpl> recordFunction) {
if (!prefixComparisons.isEquality()) {
throw new RecordCoreArgumentException("prefix comparisons must only have equality");
}
// TODO: Add a FDBRecordStoreBase.getIndexMaintainer String overload to do this.
final IndexMaintainer indexMaintainer = store.getIndexMaintainer(store.getRecordMetaData().getIndex(indexName));
final TupleRange prefixRange = prefixComparisons.toTupleRange(store, context);
// Since this is an equality, will match getHigh(), too.
final Tuple prefix = prefixRange.getLow();
final Index<GeophileRecordImpl> index = new GeophileIndexImpl(indexMaintainer, prefix, recordFunction);
final Space space = ((GeophileIndexMaintainer) indexMaintainer).getSpace();
try {
return SpatialIndex.newSpatialIndex(space, index);
} catch (IOException ex) {
throw new RecordCoreException("Unexpected IO exception", ex);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
throw new RecordCoreException(ex);
}
}
use of com.apple.foundationdb.record.TupleRange in project fdb-record-layer by FoundationDB.
the class OnlineIndexerSimpleTest method buildEndpointIdempotency.
@Test
public void buildEndpointIdempotency() {
List<TestRecords1Proto.MySimpleRecord> records = LongStream.range(0, 10).mapToObj(val -> TestRecords1Proto.MySimpleRecord.newBuilder().setRecNo(val).setNumValue2((int) val + 1).build()).collect(Collectors.toList());
Index index = new Index("simple$value_2", field("num_value_2").ungrouped(), IndexTypes.SUM);
IndexAggregateFunction aggregateFunction = new IndexAggregateFunction(FunctionNames.SUM, index.getRootExpression(), index.getName());
List<String> indexTypes = Collections.singletonList("MySimpleRecord");
FDBRecordStoreTestBase.RecordMetaDataHook hook = metaDataBuilder -> metaDataBuilder.addIndex("MySimpleRecord", index);
final Supplier<Tuple> getAggregate = () -> {
Tuple ret;
try (FDBRecordContext context = openContext()) {
assertTrue(recordStore.uncheckedMarkIndexReadable(index.getName()).join());
FDBRecordStore recordStore2 = recordStore.asBuilder().setContext(context).uncheckedOpen();
ret = recordStore2.evaluateAggregateFunction(indexTypes, aggregateFunction, TupleRange.ALL, IsolationLevel.SERIALIZABLE).join();
// Do NOT commit the change.
}
return ret;
};
openSimpleMetaData();
try (FDBRecordContext context = openContext()) {
records.forEach(recordStore::saveRecord);
context.commit();
}
openSimpleMetaData(hook);
try (FDBRecordContext context = openContext()) {
recordStore.markIndexWriteOnly(index).join();
context.commit();
}
try (OnlineIndexer indexBuilder = OnlineIndexer.newBuilder().setDatabase(fdb).setMetaData(metaData).setIndex(index).setSubspace(subspace).build()) {
final RangeSet rangeSet = new RangeSet(recordStore.indexRangeSubspace(index));
// Build the endpoints
TupleRange range = indexBuilder.buildEndpoints().join();
assertEquals(Tuple.from(0L), range.getLow());
assertEquals(Tuple.from(9L), range.getHigh());
assertEquals(Tuple.from(10L), getAggregate.get());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database(), null, Tuple.from(0L).pack()).join());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database(), Tuple.from(9L).pack(), null).join());
List<Range> middleRanges = rangeSet.missingRanges(fdb.database()).join();
assertEquals(Collections.singletonList(Tuple.from(0L)), middleRanges.stream().map(r -> Tuple.fromBytes(r.begin)).collect(Collectors.toList()));
assertEquals(Collections.singletonList(Tuple.from(9L)), middleRanges.stream().map(r -> Tuple.fromBytes(r.end)).collect(Collectors.toList()));
// Make sure running this again doesn't change anything.
range = indexBuilder.buildEndpoints().join();
assertEquals(Tuple.from(0L), range.getLow());
assertEquals(Tuple.from(9L), range.getHigh());
assertEquals(Tuple.from(10L), getAggregate.get());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database(), null, Tuple.from(0L).pack()).join());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database(), Tuple.from(9L).pack(), null).join());
middleRanges = rangeSet.missingRanges(fdb.database()).join();
assertEquals(Collections.singletonList(Tuple.from(0L)), middleRanges.stream().map(r -> Tuple.fromBytes(r.begin)).collect(Collectors.toList()));
assertEquals(Collections.singletonList(Tuple.from(9L)), middleRanges.stream().map(r -> Tuple.fromBytes(r.end)).collect(Collectors.toList()));
// Remove the first and last records.
try (FDBRecordContext context = openContext()) {
recordStore.deleteRecord(Tuple.from(0L));
recordStore.deleteRecord(Tuple.from(9L));
context.commit();
}
assertEquals(Tuple.from(0L), getAggregate.get());
// Rerun endpoints with new data.
range = indexBuilder.buildEndpoints().join();
assertEquals(Tuple.from(1L), range.getLow());
assertEquals(Tuple.from(8L), range.getHigh());
assertEquals(Tuple.from(9L), getAggregate.get());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database(), null, Tuple.from(1L).pack()).join());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database(), Tuple.from(8L).pack(), null).join());
middleRanges = rangeSet.missingRanges(fdb.database()).join();
assertEquals(Collections.singletonList(Tuple.from(1L)), middleRanges.stream().map(r -> Tuple.fromBytes(r.begin)).collect(Collectors.toList()));
assertEquals(Collections.singletonList(Tuple.from(8L)), middleRanges.stream().map(r -> Tuple.fromBytes(r.end)).collect(Collectors.toList()));
// Run it again to show that nothing has happened.
range = indexBuilder.buildEndpoints().join();
assertEquals(Tuple.from(1L), range.getLow());
assertEquals(Tuple.from(8L), range.getHigh());
assertEquals(Tuple.from(9L), getAggregate.get());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database(), null, Tuple.from(1L).pack()).join());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database(), Tuple.from(8L).pack(), null).join());
middleRanges = rangeSet.missingRanges(fdb.database()).join();
assertEquals(Collections.singletonList(Tuple.from(1L)), middleRanges.stream().map(r -> Tuple.fromBytes(r.begin)).collect(Collectors.toList()));
assertEquals(Collections.singletonList(Tuple.from(8L)), middleRanges.stream().map(r -> Tuple.fromBytes(r.end)).collect(Collectors.toList()));
// Add back the previous first and last records.
try (FDBRecordContext context = openContext()) {
recordStore.saveRecord(TestRecords1Proto.MySimpleRecord.newBuilder().setRecNo(0L).setNumValue2(1).build());
recordStore.saveRecord(TestRecords1Proto.MySimpleRecord.newBuilder().setRecNo(9L).setNumValue2(10).build());
context.commit();
}
assertEquals(Tuple.from(20L), getAggregate.get());
// Rerun endpoints with new data.
range = indexBuilder.buildEndpoints().join();
assertEquals(Tuple.from(0L), range.getLow());
assertEquals(Tuple.from(9L), range.getHigh());
assertEquals(Tuple.from(20L), getAggregate.get());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database(), null, Tuple.from(1L).pack()).join());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database(), Tuple.from(8L).pack(), null).join());
middleRanges = rangeSet.missingRanges(fdb.database()).join();
assertEquals(Collections.singletonList(Tuple.from(1L)), middleRanges.stream().map(r -> Tuple.fromBytes(r.begin)).collect(Collectors.toList()));
assertEquals(Collections.singletonList(Tuple.from(8L)), middleRanges.stream().map(r -> Tuple.fromBytes(r.end)).collect(Collectors.toList()));
// Run it again to show that nothing has happened.
range = indexBuilder.buildEndpoints().join();
assertEquals(Tuple.from(0L), range.getLow());
assertEquals(Tuple.from(9L), range.getHigh());
assertEquals(Tuple.from(20L), getAggregate.get());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database(), null, Tuple.from(1L).pack()).join());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database(), Tuple.from(8L).pack(), null).join());
middleRanges = rangeSet.missingRanges(fdb.database()).join();
assertEquals(Collections.singletonList(Tuple.from(1L)), middleRanges.stream().map(r -> Tuple.fromBytes(r.begin)).collect(Collectors.toList()));
assertEquals(Collections.singletonList(Tuple.from(8L)), middleRanges.stream().map(r -> Tuple.fromBytes(r.end)).collect(Collectors.toList()));
// Straight up build the whole index.
indexBuilder.buildIndex(false);
assertEquals(Tuple.from(55L), getAggregate.get());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(fdb.database()).join());
}
}
Aggregations