use of com.apple.foundationdb.record.provider.foundationdb.FDBIndexableRecord in project fdb-record-layer by FoundationDB.
the class StandardIndexMaintainer method checkUniqueness.
protected <M extends Message> void checkUniqueness(@Nonnull FDBIndexableRecord<M> savedRecord, @Nonnull IndexEntry indexEntry) {
Tuple valueKey = indexEntry.getKey();
AsyncIterable<KeyValue> kvs = state.transaction.getRange(state.indexSubspace.range(valueKey));
Tuple primaryKey = savedRecord.getPrimaryKey();
final CompletableFuture<Void> checker = state.store.getContext().instrument(FDBStoreTimer.Events.CHECK_INDEX_UNIQUENESS, AsyncUtil.forEach(kvs, kv -> {
Tuple existingEntry = unpackKey(getIndexSubspace(), kv);
Tuple existingKey = state.index.getEntryPrimaryKey(existingEntry);
if (!TupleHelpers.equals(primaryKey, existingKey)) {
if (state.store.isIndexWriteOnly(state.index)) {
addUniquenessViolation(valueKey, primaryKey, existingKey);
addUniquenessViolation(valueKey, existingKey, primaryKey);
} else {
throw new RecordIndexUniquenessViolation(state.index, indexEntry, primaryKey, existingKey);
}
}
}, getExecutor()));
// Add a pre-commit check to prevent accidentally committing and getting into an invalid state.
state.store.addIndexUniquenessCommitCheck(state.index, checker);
}
use of com.apple.foundationdb.record.provider.foundationdb.FDBIndexableRecord in project fdb-record-layer by FoundationDB.
the class RankIndexMaintainer method updateIndexKeys.
@Override
protected <M extends Message> CompletableFuture<Void> updateIndexKeys(@Nonnull final FDBIndexableRecord<M> savedRecord, final boolean remove, @Nonnull final List<IndexEntry> indexEntries) {
final int groupPrefixSize = getGroupingCount();
final Subspace extraSubspace = getSecondarySubspace();
final List<CompletableFuture<Void>> ordinaryIndexFutures = new ArrayList<>(indexEntries.size());
final Map<Subspace, CompletableFuture<Void>> rankFutures = Maps.newHashMapWithExpectedSize(indexEntries.size());
for (IndexEntry indexEntry : indexEntries) {
// Maintain an ordinary B-tree index by score.
CompletableFuture<Void> updateOrdinaryIndex = updateOneKeyAsync(savedRecord, remove, indexEntry);
if (!MoreAsyncUtil.isCompletedNormally(updateOrdinaryIndex)) {
ordinaryIndexFutures.add(updateOrdinaryIndex);
}
final Subspace rankSubspace;
final Tuple scoreKey;
if (groupPrefixSize > 0) {
final List<Object> keyValues = indexEntry.getKey().getItems();
rankSubspace = extraSubspace.subspace(Tuple.fromList(keyValues.subList(0, groupPrefixSize)));
scoreKey = Tuple.fromList(keyValues.subList(groupPrefixSize, keyValues.size()));
} else {
rankSubspace = extraSubspace;
scoreKey = indexEntry.getKey();
}
// It is unsafe to have two concurrent updates to the same ranked set, so ensure that at most
// one update per grouping key is ongoing at any given time
final Function<Void, CompletableFuture<Void>> futureSupplier = vignore -> RankedSetIndexHelper.updateRankedSet(state, rankSubspace, config, indexEntry.getKey(), scoreKey, remove);
CompletableFuture<Void> existingFuture = rankFutures.get(rankSubspace);
if (existingFuture == null) {
rankFutures.put(rankSubspace, futureSupplier.apply(null));
} else {
rankFutures.put(rankSubspace, existingFuture.thenCompose(futureSupplier));
}
}
return CompletableFuture.allOf(AsyncUtil.whenAll(ordinaryIndexFutures), AsyncUtil.whenAll(rankFutures.values()));
}
use of com.apple.foundationdb.record.provider.foundationdb.FDBIndexableRecord in project fdb-record-layer by FoundationDB.
the class TimeWindowLeaderboardIndexMaintainer method updateIndexKeys.
@Override
protected <M extends Message> CompletableFuture<Void> updateIndexKeys(@Nonnull final FDBIndexableRecord<M> savedRecord, final boolean remove, @Nonnull final List<IndexEntry> indexEntries) {
final Subspace extraSubspace = getSecondarySubspace();
// The value for the index key cannot vary from entry-to-entry, so get the value only from the first entry.
final Tuple entryValue = indexEntries.isEmpty() ? TupleHelpers.EMPTY : indexEntries.get(0).getValue();
return loadDirectory().thenCompose(directory -> {
if (directory == null) {
return AsyncUtil.DONE;
}
return groupOrderedScoreIndexKeys(indexEntries, directory, true).thenCompose(groupedScores -> {
final List<CompletableFuture<Void>> futures = new ArrayList<>();
for (Iterable<TimeWindowLeaderboard> directoryEntry : directory.getLeaderboards().values()) {
for (TimeWindowLeaderboard leaderboard : directoryEntry) {
for (Map.Entry<Tuple, Collection<OrderedScoreIndexKey>> groupEntry : groupedScores.entrySet()) {
final Optional<OrderedScoreIndexKey> bestContainedScore = groupEntry.getValue().stream().filter(score -> leaderboard.containsTimestamp(score.timestamp)).findFirst();
if (bestContainedScore.isPresent()) {
final Tuple groupKey = groupEntry.getKey();
final OrderedScoreIndexKey indexKey = bestContainedScore.get();
final Tuple leaderboardGroupKey = leaderboard.getSubspaceKey().addAll(groupKey);
// Update the ordinary B-tree for this leaderboard.
final Tuple entryKey = leaderboardGroupKey.addAll(indexKey.scoreKey);
CompletableFuture<Void> updateOrdinaryIndex = updateOneKeyAsync(savedRecord, remove, new IndexEntry(state.index, entryKey, entryValue));
if (!MoreAsyncUtil.isCompletedNormally(updateOrdinaryIndex)) {
futures.add(updateOrdinaryIndex);
}
// Update the corresponding rankset for this leaderboard.
// Notice that as each leaderboard has its own subspace key and at most one score
// per record is chosen per leaderboard, this is the only time this record will be
// indexed in this rankSubspace. Compare/contrast: RankIndexMaintainer::updateIndexKeys
final Subspace rankSubspace = extraSubspace.subspace(leaderboardGroupKey);
final RankedSet.Config leaderboardConfig = config.toBuilder().setNLevels(leaderboard.getNLevels()).build();
futures.add(RankedSetIndexHelper.updateRankedSet(state, rankSubspace, leaderboardConfig, entryKey, indexKey.scoreKey, remove));
}
}
}
}
Optional<Long> latestTimestamp = groupedScores.values().stream().flatMap(Collection::stream).map(OrderedScoreIndexKey::getTimestamp).max(Long::compareTo);
if (latestTimestamp.isPresent()) {
// Keep track of the latest timestamp for any indexed entry.
// Then, if time window update adds an index that starts before then, we have to index existing records.
state.transaction.mutate(MutationType.MAX, state.indexSubspace.getKey(), AtomicMutation.Standard.encodeSignedLong(latestTimestamp.get()));
}
return AsyncUtil.whenAll(futures);
});
});
}
use of com.apple.foundationdb.record.provider.foundationdb.FDBIndexableRecord in project fdb-record-layer by FoundationDB.
the class PermutedMinMaxIndexMaintainer method updateIndexKeys.
@Override
protected <M extends Message> CompletableFuture<Void> updateIndexKeys(@Nonnull final FDBIndexableRecord<M> savedRecord, final boolean remove, @Nonnull final List<IndexEntry> indexEntries) {
final int groupPrefixSize = getGroupingCount();
final int totalSize = state.index.getColumnSize();
final Subspace permutedSubspace = getSecondarySubspace();
for (IndexEntry indexEntry : indexEntries) {
final Tuple groupKey = TupleHelpers.subTuple(indexEntry.getKey(), 0, groupPrefixSize);
final Tuple value = TupleHelpers.subTuple(indexEntry.getKey(), groupPrefixSize, totalSize);
final int permutePosition = groupPrefixSize - permutedSize;
final Tuple groupPrefix = TupleHelpers.subTuple(groupKey, 0, permutePosition);
final Tuple groupSuffix = TupleHelpers.subTuple(groupKey, permutePosition, groupPrefixSize);
if (remove) {
// First remove from ordinary tree.
return updateOneKeyAsync(savedRecord, remove, indexEntry).thenCompose(vignore -> {
final byte[] permutedKeyToRemove = permutedSubspace.pack(groupPrefix.addAll(value).addAll(groupSuffix));
// See if value is the current minimum/maximum.
return state.store.ensureContextActive().get(permutedKeyToRemove).thenCompose(permutedValueExists -> {
if (permutedValueExists == null) {
// No, nothing more to do.
return AsyncUtil.DONE;
}
return getExtremum(groupKey).thenApply(extremum -> {
if (extremum == null) {
// No replacement, just remove.
state.store.ensureContextActive().clear(permutedKeyToRemove);
} else {
final Tuple remainingValue = TupleHelpers.subTuple(extremum, groupPrefixSize, totalSize);
if (!value.equals(remainingValue)) {
// New extremum: remove existing and store it.
final byte[] permutedKeyToAdd = permutedSubspace.pack(groupPrefix.addAll(remainingValue).addAll(groupSuffix));
final Transaction tr = state.store.ensureContextActive();
tr.clear(permutedKeyToRemove);
tr.set(permutedKeyToAdd, TupleHelpers.EMPTY.pack());
}
}
return null;
});
});
});
} else {
// Get existing minimum/maximum.
return getExtremum(groupKey).thenApply(extremum -> {
final boolean addPermuted;
if (extremum == null) {
// New group.
addPermuted = true;
} else {
final Tuple currentValue = TupleHelpers.subTuple(extremum, groupPrefixSize, totalSize);
int compare = value.compareTo(currentValue);
addPermuted = type == Type.MIN ? compare < 0 : compare > 0;
// Replace if new value is better.
if (addPermuted) {
final byte[] permutedKeyToRemove = permutedSubspace.pack(groupPrefix.addAll(currentValue).addAll(groupSuffix));
state.store.ensureContextActive().clear(permutedKeyToRemove);
}
}
if (addPermuted) {
final byte[] permutedKeyToAdd = permutedSubspace.pack(groupPrefix.addAll(value).addAll(groupSuffix));
state.store.ensureContextActive().set(permutedKeyToAdd, TupleHelpers.EMPTY.pack());
}
return null;
}).thenCompose(// Ordinary is second.
vignore -> updateOneKeyAsync(savedRecord, remove, indexEntry));
}
}
return AsyncUtil.DONE;
}
Aggregations