use of com.apple.foundationdb.record.RecordCursor in project fdb-record-layer by FoundationDB.
the class Main method main.
public static void main(String[] args) {
// Get a database connection.
FDBDatabase fdb = FDBDatabaseFactory.instance().getDatabase();
// Create a subspace using the key space API to create a subspace within
// the cluster used by this record store. The key space API in general
// allows the user to specify a hierarchical structure of named sub-paths.
// Each record store can then fill in the named entries within the path
// with values relevant to that store. If the key space includes a directory
// layer directory, then the value supplied by the user will be replaced
// by a short prefix supplied by the the directory layer. The results from
// the directory layer are cached locally by the Record Layer to avoid excessive
// database reads.
//
// In this case, the key space implies that there are multiple "applications"
// that might be defined to run on the same FoundationDB cluster, and then
// each "application" might have multiple "environments". This could be used,
// for example, to connect to either the "prod" or "qa" environment for the same
// application from within the same code base.
final KeySpace keySpace = new KeySpace(new DirectoryLayerDirectory("application").addSubdirectory(new KeySpaceDirectory("environment", KeySpaceDirectory.KeyType.STRING)));
// Create a path for the "record-layer-sample" application's demo environment.
// Clear all existing data and then return the subspace associated with the key space path.
final KeySpacePath path = keySpace.path("application", "record-layer-sample").add("environment", "demo");
// Clear out any data that may be in the record store.
LOGGER.info("Clearing the Record Store ...");
fdb.run(context -> {
path.deleteAllData(context);
return null;
});
// Build the metadata. This simple approach only works for primary
// keys and secondary indexes defined in the Protobuf message types.
RecordMetaData rmd = RecordMetaData.build(SampleProto.getDescriptor());
FDBRecordStore.Builder recordStoreBuilder = FDBRecordStore.newBuilder().setMetaDataProvider(rmd).setKeySpacePath(path);
// Write records for Vendor and Item.
LOGGER.info("Writing Vendor and Item record ...");
fdb.run((FDBRecordContext cx) -> {
FDBRecordStore store = recordStoreBuilder.copyBuilder().setContext(cx).create();
store.saveRecord(SampleProto.Vendor.newBuilder().setVendorId(9375L).setVendorName("Acme").build());
store.saveRecord(SampleProto.Vendor.newBuilder().setVendorId(1066L).setVendorName("Buy n Large").build());
store.saveRecord(SampleProto.Item.newBuilder().setItemId(4836L).setItemName("GPS").setVendorId(9375L).build());
store.saveRecord(SampleProto.Item.newBuilder().setItemId(9970L).setItemName("Personal Transport").setVendorId(1066L).build());
store.saveRecord(SampleProto.Item.newBuilder().setItemId(8380L).setItemName("Piles of Garbage").setVendorId(1066L).build());
return null;
});
// Use the primary key declared in the Vendor message type to read a
// record.
LOGGER.info("Reading Vendor record with primary key 9375L ...");
SampleProto.Vendor.Builder readBuilder = fdb.run((FDBRecordContext cx) -> {
FDBRecordStore store = recordStoreBuilder.copyBuilder().setContext(cx).open();
return SampleProto.Vendor.newBuilder().mergeFrom(store.loadRecord(Key.Evaluated.scalar(9375L).toTuple()).getRecord());
});
LOGGER.info(" Result -> Id: {}, Name: {}", readBuilder.getVendorId(), readBuilder.getVendorName());
// Using the secondary index declared in the message type, query
// Item by vendor ID, then look up the item ID.
LOGGER.info("Looking for item IDs with vendor ID 9375L ...");
ArrayList<Long> ids = fdb.run((FDBRecordContext cx) -> {
ArrayList<Long> itemIDs = new ArrayList<>();
FDBRecordStore store = recordStoreBuilder.copyBuilder().setContext(cx).open();
RecordQuery query = RecordQuery.newBuilder().setRecordType("Item").setFilter(Query.field("vendor_id").equalsValue(9375L)).build();
try (RecordCursor<FDBQueriedRecord<Message>> cursor = store.executeQuery(query)) {
RecordCursorResult<FDBQueriedRecord<Message>> result;
do {
result = cursor.getNext();
if (result.hasNext()) {
itemIDs.add(SampleProto.Item.newBuilder().mergeFrom(result.get().getRecord()).getItemId());
}
} while (result.hasNext());
}
return itemIDs;
});
ids.forEach((Long res) -> LOGGER.info(" Result -> Vendor ID: 9375, Item ID: {}", res));
// A kind of hand-crafted "cross-table join" (in some sense). This returns a list
// linking the name of each vendor to the names of the products they sell.
// Note that this query is entirely non-blocking until the end.
// In SQL, this might look something like:
//
// SELECT Vendor.name, Item.name FROM Item JOIN Vendor ON Vendor.vendor_id = Item.vid
//
// One difference is that the above SQL query will flatten the results out so that there
// is exactly one returned row per item name (per vendor) where as the map returned by
// this RecordLayer query will feature exactly one entry per vendor where the key is the
// vendor name and the value is the vendor's items.
//
// Note that this query is not particularly efficient as is. To make this efficient, one
// might consider an index on vendor name. This could scan the index to get the vendor
// name of the Vendor record type and a second index on item by vendor ID, perhaps with
// the item name in the value portion of the index definition. This would allow the
// query to be satisfied with one scan of the vendor name index and another scan of the
// item's vendor ID index (one scan per vendor).
LOGGER.info("Grouping items by vendor ...");
Map<String, List<String>> namesToItems = fdb.run((FDBRecordContext cx) -> cx.asyncToSync(FDBStoreTimer.Waits.WAIT_EXECUTE_QUERY, recordStoreBuilder.copyBuilder().setContext(cx).openAsync().thenCompose(store -> {
// Outer plan gets all of the vendors
RecordQueryPlan outerPlan = store.planQuery(RecordQuery.newBuilder().setRecordType("Vendor").setRequiredResults(Arrays.asList(field("vendor_id"), field("vendor_name"))).build());
// Inner plan gets all items for the given vendor ID.
// Using "equalsParameter" does the plan once and re-uses the plan for each vendor ID.
RecordQueryPlan innerPlan = store.planQuery(RecordQuery.newBuilder().setRecordType("Item").setRequiredResults(Collections.singletonList(field("item_name"))).setFilter(Query.field("vendor_id").equalsParameter("vid")).build());
return store.executeQuery(outerPlan).mapPipelined(record -> {
SampleProto.Vendor vendor = SampleProto.Vendor.newBuilder().mergeFrom(record.getRecord()).build();
return innerPlan.execute(store, EvaluationContext.forBinding("vid", vendor.getVendorId())).map(innerRecord -> SampleProto.Item.newBuilder().mergeFrom(innerRecord.getRecord()).getItemName()).asList().thenApply(list -> Pair.of(vendor.getVendorName(), list));
}, 10).asList().thenApply((List<Pair<String, List<String>>> list) -> list.stream().collect(Collectors.toMap(Pair::getKey, Pair::getValue)));
})));
namesToItems.forEach((String name, List<String> items) -> LOGGER.info(" Result -> Vendor Name: {}, Item names: {}", name, items));
// Richer indexes:
// To build richer primary keys or secondary indexes (than those definable in the protobuf
// message types), you need to use the more verbose and powerful RecordMetaDataBuilder.
RecordMetaDataBuilder rmdBuilder = RecordMetaData.newBuilder().setRecords(SampleProto.getDescriptor());
// Order customers by last name, then first name, then their ID if otherwise equal.
// NOTE: This operation is dangerous if you have existing data! Existing records are *not*
// automatically migrated.
rmdBuilder.getRecordType("Customer").setPrimaryKey(concatenateFields("last_name", "first_name", "customer_id"));
// Add a global count index. Most record stores should probably add this index as it allows
// the database to make intelligent decisions based on the current size of the record store.
rmdBuilder.addUniversalIndex(new Index("globalCount", new GroupingKeyExpression(EmptyKeyExpression.EMPTY, 0), IndexTypes.COUNT));
// Add a FanType.FanOut secondary index for email_address, so that
// each value for email_address generates its own key in the index.
rmdBuilder.addIndex("Customer", new Index("email_address", field("email_address", FanType.FanOut), IndexTypes.VALUE));
// Add a FanType.Concatenate secondary index for preference_tag, so
// that all values for preference_tag generate a single key in the index.
rmdBuilder.addIndex("Customer", new Index("preference_tag", field("preference_tag", FanType.Concatenate), IndexTypes.VALUE));
// Add an index on the count of each preference tag. This allows us to
// quickly get the number of users for each preference tag. The key
// provided will create a separate "count" field for each value of the
// preference_tag field and keep track of the number of customer
// records with each value.
rmdBuilder.addIndex("Customer", new Index("preference_tag_count", new GroupingKeyExpression(field("preference_tag", FanType.FanOut), 0), IndexTypes.COUNT));
// Add a nested secondary index for order such that each value for
// quantity in Order generates a single key in the index.
rmdBuilder.addIndex("Customer", new Index("order", field("order", FanType.FanOut).nest("quantity"), IndexTypes.VALUE));
// Add an index on the sum of the quantity of each item in each
// order. This can be used to know how many of each item have been ordered across
// all customers. The grouping key here is a little hairy, but it
// specifies that the "item_id" column should be used as a grouping key
// and the quantity used as the sum value, so it will keep track of the
// quantity ordered of each item.
rmdBuilder.addIndex("Customer", new Index("item_quantity_sum", new GroupingKeyExpression(field("order", FanType.FanOut).nest(concatenateFields("item_id", "quantity")), 1), IndexTypes.SUM));
// Rebuild the metadata for the newly added indexes before reading or
// writing more data.
RecordMetaData rmd2 = rmdBuilder.getRecordMetaData();
recordStoreBuilder.setMetaDataProvider(rmd2);
// Calling "open" on an existing record store with new meta-data will
// create the index and place them in a "disabled" mode that means that
// they cannot yet be used for queries. (In particular, the query planner
// will ignore this index and any attempt to read from the index will
// throw an error.) To enable querying, one must invoke the online index
// builder. This will scan through the record store across multiple
// transactions and populate the new indexes with data from the existing
// entries. During the build job, the record store remains available for
// reading and writing, but there may be additional conflicts if the index
// build job and normal operations happen to mutate the same records.
RecordStoreState storeState = fdb.run(cx -> {
FDBRecordStore store = recordStoreBuilder.copyBuilder().setContext(cx).open();
return store.getRecordStoreState();
});
LOGGER.info("Running index builds of new indexes:");
// Build all of the indexes in parallel by firing off a future for each and
// then wait for all of them.
fdb.asyncToSync(null, FDBStoreTimer.Waits.WAIT_ONLINE_BUILD_INDEX, AsyncUtil.whenAll(storeState.getDisabledIndexNames().stream().map(indexName -> {
// Build this index. It will begin the background job and return a future
// that will complete when the index is ready for querying.
OnlineIndexer indexBuilder = OnlineIndexer.newBuilder().setDatabase(fdb).setRecordStoreBuilder(recordStoreBuilder).setIndex(indexName).build();
return indexBuilder.buildIndexAsync().thenRun(() -> LOGGER.info(" Index build of {} is complete.", indexName)).whenComplete((vignore, eignore) -> indexBuilder.close());
}).collect(Collectors.toList())));
// Write larger records for Customer (and Order).
LOGGER.info("Adding records with new secondary indexes ...");
fdb.run((FDBRecordContext cx) -> {
FDBRecordStore store = recordStoreBuilder.copyBuilder().setContext(cx).open();
store.saveRecord(SampleProto.Customer.newBuilder().setCustomerId(9264L).setFirstName("John").setLastName("Smith").addEmailAddress("jsmith@example.com").addEmailAddress("john_smith@example.com").addPreferenceTag("books").addPreferenceTag("movies").addOrder(SampleProto.Order.newBuilder().setOrderId(3875L).setItemId(9374L).setQuantity(2)).addOrder(SampleProto.Order.newBuilder().setOrderId(4828L).setItemId(2740L).setQuantity(1)).setPhoneNumber("(703) 555-8255").build());
store.saveRecord(SampleProto.Customer.newBuilder().setCustomerId(8365L).setFirstName("Jane").setLastName("Doe").addEmailAddress("jdoe@example.com").addEmailAddress("jane_doe@example.com").addPreferenceTag("games").addPreferenceTag("lawn").addPreferenceTag("books").addOrder(SampleProto.Order.newBuilder().setOrderId(9280L).setItemId(2740L).setQuantity(3)).setPhoneNumber("(408) 555-0248").build());
return null;
});
// Get the record count. This uses the global count index to get the
// full number of records in the store.
Long recordCount = fdb.run((FDBRecordContext cx) -> cx.asyncToSync(FDBStoreTimer.Waits.WAIT_EXECUTE_QUERY, recordStoreBuilder.copyBuilder().setContext(cx).openAsync().thenCompose(FDBRecordStore::getSnapshotRecordCount)));
LOGGER.info("Store contains {} records.", recordCount);
// Query all records with the first name "Jane".
// Performs a full scan of the primary key index.
LOGGER.info("Retrieving all customers with first name \"Jane\"...");
List<String> names = fdb.run((FDBRecordContext cx) -> {
RecordQuery query = RecordQuery.newBuilder().setRecordType("Customer").setFilter(Query.field("first_name").equalsValue("Jane")).build();
return readNames(recordStoreBuilder, cx, query);
});
names.forEach((String res) -> LOGGER.info(" Result -> {}", res));
// Query all records with last name "Doe".
// Scans only the customers from the primary key index.
LOGGER.info("Retrieving all customers with last name \"Doe\"...");
names = fdb.run((FDBRecordContext cx) -> {
RecordQuery query = RecordQuery.newBuilder().setRecordType("Customer").setFilter(Query.field("last_name").equalsValue("Doe")).build();
return readNames(recordStoreBuilder, cx, query);
});
names.forEach((String res) -> LOGGER.info(" Result -> {}", res));
// Query all records with first_name "Jane" and last_name "Doe"
// Scans only the customers from the primary key index.
LOGGER.info("Retrieving all customers with name \"Jane Doe\"...");
names = fdb.run((FDBRecordContext cx) -> {
RecordQuery query = RecordQuery.newBuilder().setRecordType("Customer").setFilter(Query.and(Query.field("first_name").equalsValue("Jane"), Query.field("last_name").equalsValue("Doe"))).build();
return readNames(recordStoreBuilder, cx, query);
});
names.forEach((String res) -> LOGGER.info(" Result -> {}", res));
// Query all records with an email address beginning with "john".
// Uses FanType.FanOut secondary index.
LOGGER.info("Retrieving all customers with an email address beginning with \"john\"...");
Map<String, List<String>> addresses = fdb.run((FDBRecordContext cx) -> {
RecordQuery query = RecordQuery.newBuilder().setRecordType("Customer").setFilter(Query.field("email_address").oneOfThem().startsWith("john")).build();
return cx.asyncToSync(FDBStoreTimer.Waits.WAIT_EXECUTE_QUERY, recordStoreBuilder.copyBuilder().setContext(cx).openAsync().thenCompose(store -> {
Map<String, List<String>> addressMap = new HashMap<>();
return store.executeQuery(query).forEach((FDBQueriedRecord<Message> record) -> {
SampleProto.Customer.Builder builder = SampleProto.Customer.newBuilder().mergeFrom(record.getRecord());
addressMap.put(builder.getFirstName() + " " + builder.getLastName(), builder.getEmailAddressList());
}).thenApply(v -> addressMap);
}));
});
addresses.forEach((String k, List<String> vals) -> LOGGER.info(" Result -> {} with emails {}", k, vals));
// Query all records with preference_tags "books" and "movies".
// Uses FanType.Concatenate secondary index.
LOGGER.info("Retrieving all customers with preference tags \"books\" and \"movies\"...");
names = fdb.run((FDBRecordContext cx) -> {
RecordQuery query = RecordQuery.newBuilder().setRecordType("Customer").setFilter(Query.and(Query.field("preference_tag").oneOfThem().equalsValue("books"), Query.field("preference_tag").oneOfThem().equalsValue("movies"))).build();
return readNames(recordStoreBuilder, cx, query);
});
names.forEach((String res) -> LOGGER.info(" Result -> {}", res));
// Get the number of customers who have "books" listed as one of their preference tags
Long bookPreferenceCount = fdb.run((FDBRecordContext cx) -> cx.asyncToSync(FDBStoreTimer.Waits.WAIT_EXECUTE_QUERY, recordStoreBuilder.copyBuilder().setContext(cx).openAsync().thenCompose(store -> {
Index index = store.getRecordMetaData().getIndex("preference_tag_count");
IndexAggregateFunction function = new IndexAggregateFunction(FunctionNames.COUNT, index.getRootExpression(), index.getName());
return store.evaluateAggregateFunction(Collections.singletonList("Customer"), function, Key.Evaluated.scalar("books"), IsolationLevel.SERIALIZABLE).thenApply(tuple -> tuple.getLong(0));
})));
LOGGER.info("Number of customers with the \"books\" preference tag: {}", bookPreferenceCount);
// Query all customers with an order of quantity greater than 2.
// Uses nested secondary index.
LOGGER.info("Retrieving all customers with an order of quantity greater than 2 ...");
names = fdb.run((FDBRecordContext cx) -> {
RecordQuery query = RecordQuery.newBuilder().setRecordType("Customer").setFilter(Query.field("order").oneOfThem().matches(Query.field("quantity").greaterThan(2))).build();
return readNames(recordStoreBuilder, cx, query);
});
names.forEach((String res) -> LOGGER.info(" Result -> {}", res));
// Get the sum of the quantity of items ordered for item ID 2740.
// Using the index, it can determine this by reading a single
// key in the database.
Long itemQuantitySum = fdb.run((FDBRecordContext cx) -> cx.asyncToSync(FDBStoreTimer.Waits.WAIT_EXECUTE_QUERY, recordStoreBuilder.copyBuilder().setContext(cx).openAsync().thenCompose(store -> {
Index index = store.getRecordMetaData().getIndex("item_quantity_sum");
IndexAggregateFunction function = new IndexAggregateFunction(FunctionNames.SUM, index.getRootExpression(), index.getName());
return store.evaluateAggregateFunction(Collections.singletonList("Customer"), function, Key.Evaluated.scalar(2740L), IsolationLevel.SERIALIZABLE).thenApply(tuple -> tuple.getLong(0));
})));
LOGGER.info("Total quantity ordered of item 2740L: {}", itemQuantitySum);
// Get the sum of the quantity of all items ordered.
// Using the index, it will do a scan that will hit one key
// for each unique item id with a single range scan.
Long allItemsQuantitySum = fdb.run((FDBRecordContext cx) -> cx.asyncToSync(FDBStoreTimer.Waits.WAIT_EXECUTE_QUERY, recordStoreBuilder.copyBuilder().setContext(cx).openAsync().thenCompose(store -> {
Index index = store.getRecordMetaData().getIndex("item_quantity_sum");
IndexAggregateFunction function = new IndexAggregateFunction(FunctionNames.SUM, index.getRootExpression(), index.getName());
return store.evaluateAggregateFunction(Collections.singletonList("Customer"), function, TupleRange.ALL, IsolationLevel.SERIALIZABLE).thenApply(tuple -> tuple.getLong(0));
})));
LOGGER.info("Total quantity ordered of all items: {}", allItemsQuantitySum);
}
use of com.apple.foundationdb.record.RecordCursor in project fdb-record-layer by FoundationDB.
the class FDBRecordStore method addRebuildRecordCountsJob.
public void addRebuildRecordCountsJob(List<CompletableFuture<Void>> work) {
final KeyExpression recordCountKey = getRecordMetaData().getRecordCountKey();
if (recordCountKey == null) {
return;
}
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(KeyValueLogMessage.of("recounting all records", subspaceProvider.logKey(), subspaceProvider.toString(context)));
}
final Map<Key.Evaluated, Long> counts = new HashMap<>();
final RecordCursor<FDBStoredRecord<Message>> records = scanRecords(null, ScanProperties.FORWARD_SCAN);
CompletableFuture<Void> future = records.forEach(rec -> {
Key.Evaluated subkey = recordCountKey.evaluateSingleton(rec);
counts.compute(subkey, (k, v) -> (v == null) ? 1 : v + 1);
}).thenApply(vignore -> {
final Transaction tr = ensureContextActive();
final byte[] bytes = new byte[8];
final ByteBuffer buf = ByteBuffer.wrap(bytes).order(ByteOrder.LITTLE_ENDIAN);
for (Map.Entry<Key.Evaluated, Long> entry : counts.entrySet()) {
buf.putLong(entry.getValue());
tr.set(getSubspace().pack(Tuple.from(RECORD_COUNT_KEY).addAll(entry.getKey().toTupleAppropriateList())), bytes);
buf.clear();
}
return null;
});
future = context.instrument(FDBStoreTimer.Events.RECOUNT_RECORDS, future);
work.add(future);
}
use of com.apple.foundationdb.record.RecordCursor in project fdb-record-layer by FoundationDB.
the class FDBRecordStore method getPrimaryKeyBoundaries.
/**
* Return a cursor of boundaries separating the key ranges maintained by each FDB server. This information can be
* useful for splitting a large task (e.g., rebuilding an index for a large record store) into smaller tasks (e.g.,
* rebuilding the index for records in certain primary key ranges) more evenly so that they can be executed in a
* parallel fashion efficiently. The returned boundaries are an estimate from FDB's locality API and may not
* represent the exact boundary locations at any database version.
* <p>
* The boundaries are returned as a cursor which is sorted and does not contain any duplicates. The first element of
* the list is greater than or equal to <code>low</code>, and the last element is less than or equal to
* <code>high</code>.
* <p>
* This implementation may not work when there are too many shard boundaries to complete in a single transaction.
* <p>
* Note: the returned cursor is blocking and must not be used in an asynchronous context
*
* @param low low endpoint of primary key range (inclusive)
* @param high high endpoint of primary key range (exclusive)
* @return the list of boundary primary keys
*/
@API(API.Status.EXPERIMENTAL)
@Nonnull
public RecordCursor<Tuple> getPrimaryKeyBoundaries(@Nonnull Tuple low, @Nonnull Tuple high) {
final Transaction transaction = ensureContextActive();
byte[] rangeStart = recordsSubspace().pack(low);
byte[] rangeEnd = recordsSubspace().pack(high);
CloseableAsyncIterator<byte[]> cursor = context.getDatabase().getLocalityProvider().getBoundaryKeys(transaction, rangeStart, rangeEnd);
final boolean hasSplitRecordSuffix = hasSplitRecordSuffix();
DistinctFilterCursorClosure closure = new DistinctFilterCursorClosure();
return RecordCursor.flatMapPipelined(ignore -> RecordCursor.fromIterator(getExecutor(), cursor), (result, ignore) -> RecordCursor.fromIterator(getExecutor(), transaction.snapshot().getRange(result, rangeEnd, 1).iterator()), null, DEFAULT_PIPELINE_SIZE).map(keyValue -> {
Tuple recordKey = recordsSubspace().unpack(keyValue.getKey());
return hasSplitRecordSuffix ? recordKey.popBack() : recordKey;
}).filter(closure::pred);
}
use of com.apple.foundationdb.record.RecordCursor in project fdb-record-layer by FoundationDB.
the class TimeWindowLeaderboardIndexMaintainer method scan.
@Nonnull
@Override
public RecordCursor<IndexEntry> scan(@Nonnull IndexScanBounds scanBounds, @Nullable byte[] continuation, @Nonnull ScanProperties scanProperties) {
final IndexScanType scanType = scanBounds.getScanType();
if (scanType != IndexScanType.BY_VALUE && scanType != IndexScanType.BY_RANK && scanType != IndexScanType.BY_TIME_WINDOW) {
throw new RecordCoreException("Can only scan leaderboard index by time window, rank or value.");
}
// Decode range arguments.
final int type;
final long timestamp;
final TupleRange leaderboardRange;
if (scanType == IndexScanType.BY_TIME_WINDOW) {
// Get oldest leaderboard of type containing timestamp.
if (scanBounds instanceof TimeWindowScanRange) {
TimeWindowScanRange scanRange = (TimeWindowScanRange) scanBounds;
type = scanRange.getLeaderboardType();
timestamp = scanRange.getLeaderboardTimestamp();
leaderboardRange = scanRange.getScanRange();
} else {
// TODO: For compatibility, accept scan with BY_TIME_WINDOW and TupleRange for a while.
// This code can be removed when we are confident all callers have been converted.
IndexScanRange scanRange = (IndexScanRange) scanBounds;
TupleRange rankRange = scanRange.getScanRange();
final Tuple lowRank = rankRange.getLow();
final Tuple highRank = rankRange.getHigh();
type = (int) lowRank.getLong(0);
timestamp = lowRank.getLong(1);
leaderboardRange = new TupleRange(Tuple.fromList(lowRank.getItems().subList(2, lowRank.size())), Tuple.fromList(highRank.getItems().subList(2, highRank.size())), rankRange.getLowEndpoint(), rankRange.getHighEndpoint());
}
} else {
// Get the all-time leaderboard for unqualified rank or value.
IndexScanRange scanRange = (IndexScanRange) scanBounds;
type = TimeWindowLeaderboard.ALL_TIME_LEADERBOARD_TYPE;
// Any value would do.
timestamp = 0;
leaderboardRange = scanRange.getScanRange();
}
final int groupPrefixSize = getGroupingCount();
final CompletableFuture<TimeWindowLeaderboard> leaderboardFuture = oldestLeaderboardMatching(type, timestamp);
final CompletableFuture<TupleRange> scoreRangeFuture;
if (scanType == IndexScanType.BY_VALUE) {
scoreRangeFuture = leaderboardFuture.thenApply(leaderboard -> leaderboard == null ? null : leaderboardRange);
} else {
scoreRangeFuture = leaderboardFuture.thenCompose(leaderboard -> {
if (leaderboard == null) {
return CompletableFuture.completedFuture(null);
}
final Subspace extraSubspace = getSecondarySubspace();
final Subspace leaderboardSubspace = extraSubspace.subspace(leaderboard.getSubspaceKey());
final RankedSet.Config leaderboardConfig = config.toBuilder().setNLevels(leaderboard.getNLevels()).build();
return RankedSetIndexHelper.rankRangeToScoreRange(state, groupPrefixSize, leaderboardSubspace, leaderboardConfig, leaderboardRange);
});
}
// Add leaderboard's key to the front and take it off of the results.
return RecordCursor.flatMapPipelined(ignore -> RecordCursor.fromFuture(getExecutor(), scoreRangeFuture), (scoreRange, ignore) -> {
if (scoreRange == null) {
return RecordCursor.empty(getExecutor());
}
// Already waited in scoreRangeFuture.
final TimeWindowLeaderboard leaderboard = state.context.joinNow(leaderboardFuture);
final CompletableFuture<Boolean> highStoreFirstFuture;
if (scanType == IndexScanType.BY_VALUE) {
final Tuple lowGroup = scoreRange.getLow() != null && scoreRange.getLow().size() > groupPrefixSize ? TupleHelpers.subTuple(scoreRange.getLow(), 0, groupPrefixSize) : null;
final Tuple highGroup = scoreRange.getHigh() != null && scoreRange.getHigh().size() > groupPrefixSize ? TupleHelpers.subTuple(scoreRange.getHigh(), 0, groupPrefixSize) : null;
if (lowGroup != null && lowGroup.equals(highGroup)) {
highStoreFirstFuture = isHighScoreFirst(leaderboard.getDirectory(), lowGroup);
} else {
highStoreFirstFuture = CompletableFuture.completedFuture(leaderboard.getDirectory().isHighScoreFirst());
}
} else {
highStoreFirstFuture = AsyncUtil.READY_FALSE;
}
if (highStoreFirstFuture.isDone()) {
return scanLeaderboard(leaderboard, state.context.joinNow(highStoreFirstFuture), scoreRange, continuation, scanProperties);
} else {
return RecordCursor.flatMapPipelined(ignore2 -> RecordCursor.fromFuture(getExecutor(), highStoreFirstFuture), (highScoreFirst, ignore2) -> scanLeaderboard(leaderboard, highScoreFirst, scoreRange, continuation, scanProperties), null, 1);
}
}, null, 1).mapPipelined(kv -> getIndexEntry(kv, groupPrefixSize, state.context.joinNow(leaderboardFuture).getDirectory()), 1);
}
use of com.apple.foundationdb.record.RecordCursor in project fdb-record-layer by FoundationDB.
the class KeySpaceDirectory method listSubdirectoryAsync.
@Nonnull
// SonarQube doesn't realize that the cursor is wrapped and returned
@SuppressWarnings("squid:S2095")
protected RecordCursor<ResolvedKeySpacePath> listSubdirectoryAsync(@Nullable KeySpacePath listFrom, @Nonnull FDBRecordContext context, @Nonnull String subdirName, @Nullable ValueRange<?> valueRange, @Nullable byte[] continuation, @Nonnull ScanProperties scanProperties) {
if (listFrom != null && listFrom.getDirectory() != this) {
throw new RecordCoreException("Provided path does not belong to this directory").addLogInfo("path", listFrom, "directory", this.getName());
}
final KeySpaceDirectory subdir = getSubdirectory(subdirName);
final CompletableFuture<ResolvedKeySpacePath> resolvedFromFuture = listFrom == null ? CompletableFuture.completedFuture(null) : listFrom.toResolvedPathAsync(context);
// The chained cursor cannot implement reverse scan, so we implement it by having the
// inner key value cursor do the reversing but telling the chained cursor we are moving
// forward.
final ScanProperties chainedCursorScanProperties;
if (scanProperties.isReverse()) {
chainedCursorScanProperties = scanProperties.setReverse(false);
} else {
chainedCursorScanProperties = scanProperties;
}
// For the read of the individual row keys, we only want to read a single key. In addition,
// the ChainedCursor is going to do counting of our reads to apply any limits that were specified
// on the ScanProperties. We don't want the inner KeyValueCursor in nextTuple() to ALSO count those
// same reads so we clear out its limits.
final ScanProperties keyReadScanProperties = scanProperties.with(props -> props.clearState().setReturnedRowLimit(1));
return new LazyCursor<>(resolvedFromFuture.thenCompose(resolvedFrom -> {
final Subspace subspace = resolvedFrom == null ? new Subspace() : resolvedFrom.toSubspace();
return subdir.getValueRange(context, valueRange, subspace).thenApply(range -> {
final RecordCursor<Tuple> cursor = new ChainedCursor<>(context, lastKey -> nextTuple(context, subspace, range, lastKey, keyReadScanProperties), Tuple::pack, Tuple::fromBytes, continuation, chainedCursorScanProperties);
return cursor.mapPipelined(tuple -> {
final Tuple key = Tuple.fromList(tuple.getItems());
return findChildForKey(context, resolvedFrom, key, 1, 0);
}, 1);
});
}), context.getExecutor());
}
Aggregations