use of com.apple.foundationdb.record.logging.KeyValueLogMessage in project fdb-record-layer by FoundationDB.
the class IndexingThrottle method decreaseLimit.
void decreaseLimit(@Nonnull FDBException fdbException, @Nullable List<Object> additionalLogMessageKeyValues) {
limit = Math.max(1, (3 * limit) / 4);
if (LOGGER.isInfoEnabled()) {
final KeyValueLogMessage message = KeyValueLogMessage.build("Lessening limit of online index build", LogMessageKeys.ERROR, fdbException.getMessage(), LogMessageKeys.ERROR_CODE, fdbException.getCode(), LogMessageKeys.LIMIT, limit, LogMessageKeys.INDEX_NAME, common.getTargetIndexesNames(), LogMessageKeys.INDEXER_ID, common.getUuid());
if (additionalLogMessageKeyValues != null) {
message.addKeysAndValues(additionalLogMessageKeyValues);
}
LOGGER.info(message.toString(), fdbException);
}
}
use of com.apple.foundationdb.record.logging.KeyValueLogMessage in project fdb-record-layer by FoundationDB.
the class IndexingThrottle method throttledRunAsync.
@Nonnull
<R> CompletableFuture<R> throttledRunAsync(@Nonnull final Function<FDBRecordStore, CompletableFuture<R>> function, @Nonnull final BiFunction<R, Throwable, Pair<R, Throwable>> handlePostTransaction, @Nullable final BiConsumer<FDBException, List<Object>> handleLessenWork, @Nullable final List<Object> additionalLogMessageKeyValues) {
List<Object> onlineIndexerLogMessageKeyValues = new ArrayList<>(Arrays.asList(LogMessageKeys.INDEX_NAME, common.getTargetIndexesNames(), LogMessageKeys.INDEXER_ID, common.getUuid()));
if (additionalLogMessageKeyValues != null) {
onlineIndexerLogMessageKeyValues.addAll(additionalLogMessageKeyValues);
}
AtomicInteger tries = new AtomicInteger(0);
CompletableFuture<R> ret = new CompletableFuture<>();
AtomicLong toWait = new AtomicLong(common.getRunner().getDatabase().getFactory().getInitialDelayMillis());
AsyncUtil.whileTrue(() -> {
loadConfig();
return common.getRunner().runAsync(context -> common.getRecordStoreBuilder().copyBuilder().setContext(context).openAsync().thenCompose(store -> {
for (Index index : common.getTargetIndexes()) {
IndexState indexState = store.getIndexState(index);
if (indexState != expectedIndexState) {
throw new RecordCoreStorageException("Unexpected index state", LogMessageKeys.INDEX_NAME, index.getName(), common.getRecordStoreBuilder().getSubspaceProvider().logKey(), common.getRecordStoreBuilder().getSubspaceProvider().toString(context), LogMessageKeys.INDEX_STATE, indexState, LogMessageKeys.INDEX_STATE_PRECONDITION, expectedIndexState);
}
}
return function.apply(store);
}), handlePostTransaction, onlineIndexerLogMessageKeyValues).handle((value, e) -> {
if (e == null) {
ret.complete(value);
return AsyncUtil.READY_FALSE;
} else {
int currTries = tries.getAndIncrement();
FDBException fdbE = getFDBException(e);
if (currTries < common.config.getMaxRetries() && fdbE != null && lessenWorkCodes.contains(fdbE.getCode())) {
if (handleLessenWork != null) {
handleLessenWork.accept(fdbE, onlineIndexerLogMessageKeyValues);
}
long delay = (long) (Math.random() * toWait.get());
toWait.set(Math.min(toWait.get() * 2, common.getRunner().getDatabase().getFactory().getMaxDelayMillis()));
if (LOGGER.isWarnEnabled()) {
final KeyValueLogMessage message = KeyValueLogMessage.build("Retrying Runner Exception", LogMessageKeys.INDEXER_CURR_RETRY, currTries, LogMessageKeys.INDEXER_MAX_RETRIES, common.config.getMaxRetries(), LogMessageKeys.DELAY, delay, LogMessageKeys.LIMIT, limit);
message.addKeysAndValues(onlineIndexerLogMessageKeyValues);
LOGGER.warn(message.toString(), e);
}
return MoreAsyncUtil.delayedFuture(delay, TimeUnit.MILLISECONDS).thenApply(vignore3 -> true);
} else {
return completeExceptionally(ret, e, onlineIndexerLogMessageKeyValues);
}
}
}).thenCompose(Function.identity());
}, common.getRunner().getExecutor()).whenComplete((vignore, e) -> {
if (e != null) {
// Just update ret and ignore the returned future.
completeExceptionally(ret, e, onlineIndexerLogMessageKeyValues);
}
});
return ret;
}
use of com.apple.foundationdb.record.logging.KeyValueLogMessage in project fdb-record-layer by FoundationDB.
the class TextIndexMaintainer method updateOneKeyAsync.
@Nonnull
private <M extends Message> CompletableFuture<Void> updateOneKeyAsync(@Nonnull FDBIndexableRecord<M> savedRecord, final boolean remove, @Nonnull IndexEntry entry, int textPosition, int recordTokenizerVersion) {
long startTime = System.nanoTime();
final Tuple indexEntryKey = indexEntryKey(entry.getKey(), savedRecord.getPrimaryKey());
final String text = indexEntryKey.getString(textPosition);
if (text == null || text.isEmpty()) {
// empty or not set. Either way, there is nothing to tokenize, so just exit now.
return AsyncUtil.DONE;
}
final Tuple groupingKey = (textPosition == 0) ? null : TupleHelpers.subTuple(indexEntryKey, 0, textPosition);
final Tuple groupedKey = TupleHelpers.subTuple(indexEntryKey, textPosition + 1, indexEntryKey.size());
final Map<String, List<Integer>> positionMap = tokenizer.tokenizeToMap(text, recordTokenizerVersion, TextTokenizer.TokenizerMode.INDEX);
final StoreTimer.Event indexUpdateEvent = remove ? FDBStoreTimer.Events.DELETE_INDEX_ENTRY : FDBStoreTimer.Events.SAVE_INDEX_ENTRY;
if (LOGGER.isDebugEnabled()) {
final Pair<Integer, Integer> estimatedSize = estimateSize(groupingKey, positionMap, groupedKey);
KeyValueLogMessage msg = KeyValueLogMessage.build("performed text tokenization", LogMessageKeys.REMOVE, remove, LogMessageKeys.TEXT_SIZE, text.length(), LogMessageKeys.UNIQUE_TOKENS, positionMap.size(), LogMessageKeys.AVG_TOKEN_SIZE, positionMap.keySet().stream().mapToInt(String::length).sum() * 1.0 / positionMap.size(), LogMessageKeys.MAX_TOKEN_SIZE, positionMap.keySet().stream().mapToInt(String::length).max().orElse(0), LogMessageKeys.AVG_POSITIONS, positionMap.values().stream().mapToInt(List::size).sum() * 1.0 / positionMap.size(), LogMessageKeys.MAX_POSITIONS, positionMap.values().stream().mapToInt(List::size).max().orElse(0), LogMessageKeys.TEXT_KEY_SIZE, estimatedSize.getKey(), LogMessageKeys.TEXT_VALUE_SIZE, estimatedSize.getValue(), LogMessageKeys.TEXT_INDEX_SIZE_AMORTIZED, estimatedSize.getKey() / 10 + estimatedSize.getValue(), IndexOptions.TEXT_TOKENIZER_NAME_OPTION, tokenizer.getName(), IndexOptions.TEXT_TOKENIZER_VERSION_OPTION, recordTokenizerVersion, IndexOptions.TEXT_ADD_AGGRESSIVE_CONFLICT_RANGES_OPTION, addAggressiveConflictRanges, LogMessageKeys.PRIMARY_KEY, savedRecord.getPrimaryKey(), LogMessageKeys.SUBSPACE, ByteArrayUtil2.loggable(state.store.getSubspace().getKey()), LogMessageKeys.INDEX_SUBSPACE, ByteArrayUtil2.loggable(state.indexSubspace.getKey()), LogMessageKeys.WROTE_INDEX, true);
LOGGER.debug(msg.toString());
}
if (positionMap.isEmpty()) {
if (state.store.getTimer() != null) {
state.store.getTimer().recordSinceNanoTime(indexUpdateEvent, startTime);
}
return AsyncUtil.DONE;
}
if (addAggressiveConflictRanges) {
// Add a read and write conflict range over the whole index to decrease the number of mutations
// sent to the resolver. In theory, this will increase the number of conflicts in that if two
// records with the same grouping key come in at the same time, then they will now definitely
// conflict. However, this isn't too bad because there is already a high chance of conflict
// in the text index because each token insert has to do a read on its own.
final Range indexRange = groupingKey == null ? state.indexSubspace.range() : state.indexSubspace.range(groupingKey);
state.context.ensureActive().addReadConflictRange(indexRange.begin, indexRange.end);
state.context.ensureActive().addWriteConflictRange(indexRange.begin, indexRange.end);
}
final BunchedMap<Tuple, List<Integer>> bunchedMap = getBunchedMap(state.context);
CompletableFuture<Void> tokenInsertFuture = RecordCursor.fromIterator(state.context.getExecutor(), positionMap.entrySet().iterator()).forEachAsync((Map.Entry<String, List<Integer>> tokenEntry) -> {
Tuple subspaceTuple;
if (groupingKey == null) {
subspaceTuple = Tuple.from(tokenEntry.getKey());
} else {
subspaceTuple = groupingKey.add(tokenEntry.getKey());
}
Subspace mapSubspace = state.indexSubspace.subspace(subspaceTuple);
if (remove) {
return bunchedMap.remove(state.transaction, mapSubspace, groupedKey).thenAccept(ignore -> {
});
} else {
final List<Integer> value = omitPositionLists ? Collections.emptyList() : tokenEntry.getValue();
return bunchedMap.put(state.transaction, mapSubspace, groupedKey, value).thenAccept(ignore -> {
});
}
}, state.store.getPipelineSize(PipelineOperation.TEXT_INDEX_UPDATE));
if (state.store.getTimer() != null) {
return state.store.getTimer().instrument(indexUpdateEvent, tokenInsertFuture, state.context.getExecutor(), startTime);
} else {
return tokenInsertFuture;
}
}
use of com.apple.foundationdb.record.logging.KeyValueLogMessage in project fdb-record-layer by FoundationDB.
the class ComparatorCursor method logComparisonFailure.
private void logComparisonFailure(List<Object> expectedKey, List<Object> actualKey) {
if (getTimer() != null) {
getTimer().increment(mismatchesCounts, 1);
// Only log comparison failure the first time we see it (per continuation)
if (!errorLogged) {
errorLogged = true;
KeyValueLogMessage message = KeyValueLogMessage.build("Cursor Result Comparison Failed", LogMessageKeys.EXPECTED, expectedKey, LogMessageKeys.ACTUAL, actualKey, LogMessageKeys.PLAN_HASH, planHashSupplier.get(), LogMessageKeys.PLAN, planStringSupplier.get());
logger.error(message.toString());
}
}
}
use of com.apple.foundationdb.record.logging.KeyValueLogMessage in project fdb-record-layer by FoundationDB.
the class ComparatorCursor method logTerminationFailure.
private void logTerminationFailure(KeyedMergeCursorState<T> referenceCursorState) {
if (getTimer() != null) {
getTimer().increment(mismatchesCounts, 1);
// Only log failure the first time we see it (per continuation)
if (!errorLogged) {
errorLogged = true;
KeyValueLogMessage message = KeyValueLogMessage.build("Not all cursors are exhausted", LogMessageKeys.EXPECTED, isSourceExhausted(referenceCursorState), LogMessageKeys.PLAN_HASH, planHashSupplier.get(), LogMessageKeys.PLAN, planStringSupplier.get());
logger.error(message.toString());
}
}
}
Aggregations