use of com.apple.foundationdb.subspace.Subspace in project fdb-record-layer by FoundationDB.
the class StandardIndexMaintainer method removeUniquenessViolationsAsync.
/**
* Remove a uniqueness violation within the database. This is used to keep track of
* uniqueness violations that occur when an index is in write-only mode, both during
* the built itself and by other writes. This means that the writes will succeed, but
* it will cause a later attempt to make the index readable to fail.
*
* <p>This will remove the last uniqueness violation entry when removing the second
* last entry that contains the value key.</p>
* @param valueKey the indexed key that is (apparently) not unique
* @param primaryKey the primary key of one record that is causing a violation
* @return a future that is complete when the uniqueness violation is removed
*/
@Nonnull
protected CompletableFuture<Void> removeUniquenessViolationsAsync(@Nonnull Tuple valueKey, @Nonnull Tuple primaryKey) {
Subspace uniqueValueSubspace = state.store.indexUniquenessViolationsSubspace(state.index).subspace(valueKey);
state.transaction.clear(uniqueValueSubspace.pack(primaryKey));
// Remove the last entry if it was the second last entry in the unique value subspace.
RecordCursor<KeyValue> uniquenessViolationEntries = KeyValueCursor.Builder.withSubspace(uniqueValueSubspace).setContext(state.context).setScanProperties(new ScanProperties(ExecuteProperties.newBuilder().setReturnedRowLimit(2).setIsolationLevel(IsolationLevel.SERIALIZABLE).setDefaultCursorStreamingMode(CursorStreamingMode.WANT_ALL).build())).build();
return uniquenessViolationEntries.getCount().thenAccept(count -> {
if (count == 1) {
state.transaction.clear(Range.startsWith(uniqueValueSubspace.pack()));
}
});
}
use of com.apple.foundationdb.subspace.Subspace in project fdb-record-layer by FoundationDB.
the class StandardIndexMaintainer method scanUniquenessViolations.
@Override
@Nonnull
public RecordCursor<IndexEntry> scanUniquenessViolations(@Nonnull TupleRange range, @Nullable byte[] continuation, @Nonnull ScanProperties scanProperties) {
final Subspace uniquenessViolationsSubspace = state.store.indexUniquenessViolationsSubspace(state.index);
RecordCursor<KeyValue> keyValues = KeyValueCursor.Builder.withSubspace(uniquenessViolationsSubspace).setContext(state.context).setRange(range).setContinuation(continuation).setScanProperties(scanProperties).build();
return keyValues.map(kv -> unpackKeyValue(uniquenessViolationsSubspace, kv));
}
use of com.apple.foundationdb.subspace.Subspace in project fdb-record-layer by FoundationDB.
the class TextIndexMaintainer method updateOneKeyAsync.
@Nonnull
private <M extends Message> CompletableFuture<Void> updateOneKeyAsync(@Nonnull FDBIndexableRecord<M> savedRecord, final boolean remove, @Nonnull IndexEntry entry, int textPosition, int recordTokenizerVersion) {
long startTime = System.nanoTime();
final Tuple indexEntryKey = indexEntryKey(entry.getKey(), savedRecord.getPrimaryKey());
final String text = indexEntryKey.getString(textPosition);
if (text == null || text.isEmpty()) {
// empty or not set. Either way, there is nothing to tokenize, so just exit now.
return AsyncUtil.DONE;
}
final Tuple groupingKey = (textPosition == 0) ? null : TupleHelpers.subTuple(indexEntryKey, 0, textPosition);
final Tuple groupedKey = TupleHelpers.subTuple(indexEntryKey, textPosition + 1, indexEntryKey.size());
final Map<String, List<Integer>> positionMap = tokenizer.tokenizeToMap(text, recordTokenizerVersion, TextTokenizer.TokenizerMode.INDEX);
final StoreTimer.Event indexUpdateEvent = remove ? FDBStoreTimer.Events.DELETE_INDEX_ENTRY : FDBStoreTimer.Events.SAVE_INDEX_ENTRY;
if (LOGGER.isDebugEnabled()) {
final Pair<Integer, Integer> estimatedSize = estimateSize(groupingKey, positionMap, groupedKey);
KeyValueLogMessage msg = KeyValueLogMessage.build("performed text tokenization", LogMessageKeys.REMOVE, remove, LogMessageKeys.TEXT_SIZE, text.length(), LogMessageKeys.UNIQUE_TOKENS, positionMap.size(), LogMessageKeys.AVG_TOKEN_SIZE, positionMap.keySet().stream().mapToInt(String::length).sum() * 1.0 / positionMap.size(), LogMessageKeys.MAX_TOKEN_SIZE, positionMap.keySet().stream().mapToInt(String::length).max().orElse(0), LogMessageKeys.AVG_POSITIONS, positionMap.values().stream().mapToInt(List::size).sum() * 1.0 / positionMap.size(), LogMessageKeys.MAX_POSITIONS, positionMap.values().stream().mapToInt(List::size).max().orElse(0), LogMessageKeys.TEXT_KEY_SIZE, estimatedSize.getKey(), LogMessageKeys.TEXT_VALUE_SIZE, estimatedSize.getValue(), LogMessageKeys.TEXT_INDEX_SIZE_AMORTIZED, estimatedSize.getKey() / 10 + estimatedSize.getValue(), IndexOptions.TEXT_TOKENIZER_NAME_OPTION, tokenizer.getName(), IndexOptions.TEXT_TOKENIZER_VERSION_OPTION, recordTokenizerVersion, IndexOptions.TEXT_ADD_AGGRESSIVE_CONFLICT_RANGES_OPTION, addAggressiveConflictRanges, LogMessageKeys.PRIMARY_KEY, savedRecord.getPrimaryKey(), LogMessageKeys.SUBSPACE, ByteArrayUtil2.loggable(state.store.getSubspace().getKey()), LogMessageKeys.INDEX_SUBSPACE, ByteArrayUtil2.loggable(state.indexSubspace.getKey()), LogMessageKeys.WROTE_INDEX, true);
LOGGER.debug(msg.toString());
}
if (positionMap.isEmpty()) {
if (state.store.getTimer() != null) {
state.store.getTimer().recordSinceNanoTime(indexUpdateEvent, startTime);
}
return AsyncUtil.DONE;
}
if (addAggressiveConflictRanges) {
// Add a read and write conflict range over the whole index to decrease the number of mutations
// sent to the resolver. In theory, this will increase the number of conflicts in that if two
// records with the same grouping key come in at the same time, then they will now definitely
// conflict. However, this isn't too bad because there is already a high chance of conflict
// in the text index because each token insert has to do a read on its own.
final Range indexRange = groupingKey == null ? state.indexSubspace.range() : state.indexSubspace.range(groupingKey);
state.context.ensureActive().addReadConflictRange(indexRange.begin, indexRange.end);
state.context.ensureActive().addWriteConflictRange(indexRange.begin, indexRange.end);
}
final BunchedMap<Tuple, List<Integer>> bunchedMap = getBunchedMap(state.context);
CompletableFuture<Void> tokenInsertFuture = RecordCursor.fromIterator(state.context.getExecutor(), positionMap.entrySet().iterator()).forEachAsync((Map.Entry<String, List<Integer>> tokenEntry) -> {
Tuple subspaceTuple;
if (groupingKey == null) {
subspaceTuple = Tuple.from(tokenEntry.getKey());
} else {
subspaceTuple = groupingKey.add(tokenEntry.getKey());
}
Subspace mapSubspace = state.indexSubspace.subspace(subspaceTuple);
if (remove) {
return bunchedMap.remove(state.transaction, mapSubspace, groupedKey).thenAccept(ignore -> {
});
} else {
final List<Integer> value = omitPositionLists ? Collections.emptyList() : tokenEntry.getValue();
return bunchedMap.put(state.transaction, mapSubspace, groupedKey, value).thenAccept(ignore -> {
});
}
}, state.store.getPipelineSize(PipelineOperation.TEXT_INDEX_UPDATE));
if (state.store.getTimer() != null) {
return state.store.getTimer().instrument(indexUpdateEvent, tokenInsertFuture, state.context.getExecutor(), startTime);
} else {
return tokenInsertFuture;
}
}
use of com.apple.foundationdb.subspace.Subspace in project fdb-record-layer by FoundationDB.
the class PermutedMinMaxIndexMaintainer method deleteWhere.
@Override
public CompletableFuture<Void> deleteWhere(Transaction tr, @Nonnull Tuple prefix) {
return super.deleteWhere(tr, prefix).thenApply(v -> {
final Subspace permutedSubspace = getSecondarySubspace();
tr.clear(permutedSubspace.subspace(prefix).range());
return v;
});
}
use of com.apple.foundationdb.subspace.Subspace in project fdb-record-layer by FoundationDB.
the class RankIndexMaintainer method updateIndexKeys.
@Override
protected <M extends Message> CompletableFuture<Void> updateIndexKeys(@Nonnull final FDBIndexableRecord<M> savedRecord, final boolean remove, @Nonnull final List<IndexEntry> indexEntries) {
final int groupPrefixSize = getGroupingCount();
final Subspace extraSubspace = getSecondarySubspace();
final List<CompletableFuture<Void>> ordinaryIndexFutures = new ArrayList<>(indexEntries.size());
final Map<Subspace, CompletableFuture<Void>> rankFutures = Maps.newHashMapWithExpectedSize(indexEntries.size());
for (IndexEntry indexEntry : indexEntries) {
// Maintain an ordinary B-tree index by score.
CompletableFuture<Void> updateOrdinaryIndex = updateOneKeyAsync(savedRecord, remove, indexEntry);
if (!MoreAsyncUtil.isCompletedNormally(updateOrdinaryIndex)) {
ordinaryIndexFutures.add(updateOrdinaryIndex);
}
final Subspace rankSubspace;
final Tuple scoreKey;
if (groupPrefixSize > 0) {
final List<Object> keyValues = indexEntry.getKey().getItems();
rankSubspace = extraSubspace.subspace(Tuple.fromList(keyValues.subList(0, groupPrefixSize)));
scoreKey = Tuple.fromList(keyValues.subList(groupPrefixSize, keyValues.size()));
} else {
rankSubspace = extraSubspace;
scoreKey = indexEntry.getKey();
}
// It is unsafe to have two concurrent updates to the same ranked set, so ensure that at most
// one update per grouping key is ongoing at any given time
final Function<Void, CompletableFuture<Void>> futureSupplier = vignore -> RankedSetIndexHelper.updateRankedSet(state, rankSubspace, config, indexEntry.getKey(), scoreKey, remove);
CompletableFuture<Void> existingFuture = rankFutures.get(rankSubspace);
if (existingFuture == null) {
rankFutures.put(rankSubspace, futureSupplier.apply(null));
} else {
rankFutures.put(rankSubspace, existingFuture.thenCompose(futureSupplier));
}
}
return CompletableFuture.allOf(AsyncUtil.whenAll(ordinaryIndexFutures), AsyncUtil.whenAll(rankFutures.values()));
}
Aggregations