use of org.apache.logging.log4j.message.ParameterizedMessage in project logging-log4j2 by apache.
the class LoggerSupplierTest method flowTracing_SupplierOfParameterizedMessage.
@Test
public void flowTracing_SupplierOfParameterizedMessage() {
logger.traceEntry(new Supplier<ParameterizedMessage>() {
@Override
public ParameterizedMessage get() {
return new ParameterizedMessage("int foo={}", 1234567890);
}
});
assertEquals(1, results.size());
assertThat("Incorrect Entry", results.get(0), startsWith("ENTER[ FLOW ] TRACE Enter"));
assertThat("Missing entry data", results.get(0), containsString("(int foo=1234567890)"));
assertThat("Bad toString()", results.get(0), not(containsString("ParameterizedMessage")));
}
use of org.apache.logging.log4j.message.ParameterizedMessage in project elasticsearch by elastic.
the class ClientScrollableHitSource method clearScroll.
@Override
public void clearScroll(String scrollId, Runnable onCompletion) {
ClearScrollRequest clearScrollRequest = new ClearScrollRequest();
clearScrollRequest.addScrollId(scrollId);
/*
* Unwrap the client so we don't set our task as the parent. If we *did* set our ID then the clear scroll would be cancelled as
* if this task is cancelled. But we want to clear the scroll regardless of whether or not the main request was cancelled.
*/
client.unwrap().clearScroll(clearScrollRequest, new ActionListener<ClearScrollResponse>() {
@Override
public void onResponse(ClearScrollResponse response) {
logger.debug("Freed [{}] contexts", response.getNumFreed());
onCompletion.run();
}
@Override
public void onFailure(Exception e) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Failed to clear scroll [{}]", scrollId), e);
onCompletion.run();
}
});
}
use of org.apache.logging.log4j.message.ParameterizedMessage in project elasticsearch by elastic.
the class ClientScrollableHitSource method searchWithRetry.
/**
* Run a search action and call onResponse when a the response comes in, retrying if the action fails with an exception caused by
* rejected execution.
*
* @param action consumes a listener and starts the action. The listener it consumes is rigged to retry on failure.
* @param onResponse consumes the response from the action
*/
private void searchWithRetry(Consumer<ActionListener<SearchResponse>> action, Consumer<SearchResponse> onResponse) {
/*
* RetryHelper is both an AbstractRunnable and an ActionListener<SearchResponse> - meaning that it both starts the search and
* handles reacts to the results. The complexity is all in onFailure which either adapts the failure to the "fail" listener or
* retries the search. Since both AbstractRunnable and ActionListener define the onFailure method it is called for either failure
* to run the action (either while running or before starting) and for failure on the response from the action.
*/
class RetryHelper extends AbstractRunnable implements ActionListener<SearchResponse> {
private final Iterator<TimeValue> retries = backoffPolicy.iterator();
/**
* The runnable to run that retries in the same context as the original call.
*/
private Runnable retryWithContext;
private volatile int retryCount = 0;
@Override
protected void doRun() throws Exception {
action.accept(this);
}
@Override
public void onResponse(SearchResponse response) {
onResponse.accept(response);
}
@Override
public void onFailure(Exception e) {
if (ExceptionsHelper.unwrap(e, EsRejectedExecutionException.class) != null) {
if (retries.hasNext()) {
retryCount += 1;
TimeValue delay = retries.next();
logger.trace((Supplier<?>) () -> new ParameterizedMessage("retrying rejected search after [{}]", delay), e);
countSearchRetry.run();
threadPool.schedule(delay, ThreadPool.Names.SAME, retryWithContext);
} else {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("giving up on search because we retried [{}] times without success", retryCount), e);
fail.accept(e);
}
} else {
logger.warn("giving up on search because it failed with a non-retryable exception", e);
fail.accept(e);
}
}
}
RetryHelper helper = new RetryHelper();
// Wrap the helper in a runnable that preserves the current context so we keep it on retry.
helper.retryWithContext = threadPool.getThreadContext().preserveContext(helper);
helper.run();
}
use of org.apache.logging.log4j.message.ParameterizedMessage in project elasticsearch by elastic.
the class IndicesService method removeIndex.
@Override
public void removeIndex(final Index index, final IndexRemovalReason reason, final String extraInfo) {
final String indexName = index.getName();
try {
final IndexService indexService;
final IndexEventListener listener;
synchronized (this) {
if (hasIndex(index) == false) {
return;
}
logger.debug("[{}] closing ... (reason [{}])", indexName, reason);
Map<String, IndexService> newIndices = new HashMap<>(indices);
indexService = newIndices.remove(index.getUUID());
assert indexService != null : "IndexService is null for index: " + index;
indices = unmodifiableMap(newIndices);
listener = indexService.getIndexEventListener();
}
listener.beforeIndexRemoved(indexService, reason);
logger.debug("{} closing index service (reason [{}][{}])", index, reason, extraInfo);
indexService.close(extraInfo, reason == IndexRemovalReason.DELETED);
logger.debug("{} closed... (reason [{}][{}])", index, reason, extraInfo);
final IndexSettings indexSettings = indexService.getIndexSettings();
listener.afterIndexRemoved(indexService.index(), indexSettings, reason);
if (reason == IndexRemovalReason.DELETED) {
// now we are done - try to wipe data on disk if possible
deleteIndexStore(extraInfo, indexService.index(), indexSettings);
}
} catch (Exception e) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to remove index {} ([{}][{}])", index, reason, extraInfo), e);
}
}
use of org.apache.logging.log4j.message.ParameterizedMessage in project elasticsearch by elastic.
the class IndicesService method stats.
public NodeIndicesStats stats(boolean includePrevious, CommonStatsFlags flags) {
CommonStats oldStats = new CommonStats(flags);
if (includePrevious) {
Flag[] setFlags = flags.getFlags();
for (Flag flag : setFlags) {
switch(flag) {
case Get:
oldStats.get.add(oldShardsStats.getStats);
break;
case Indexing:
oldStats.indexing.add(oldShardsStats.indexingStats);
break;
case Search:
oldStats.search.add(oldShardsStats.searchStats);
break;
case Merge:
oldStats.merge.add(oldShardsStats.mergeStats);
break;
case Refresh:
oldStats.refresh.add(oldShardsStats.refreshStats);
break;
case Recovery:
oldStats.recoveryStats.add(oldShardsStats.recoveryStats);
break;
case Flush:
oldStats.flush.add(oldShardsStats.flushStats);
break;
}
}
}
Map<Index, List<IndexShardStats>> statsByShard = new HashMap<>();
for (IndexService indexService : this) {
for (IndexShard indexShard : indexService) {
try {
if (indexShard.routingEntry() == null) {
continue;
}
IndexShardStats indexShardStats = new IndexShardStats(indexShard.shardId(), new ShardStats[] { new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesQueryCache, indexShard, flags), indexShard.commitStats(), indexShard.seqNoStats()) });
if (!statsByShard.containsKey(indexService.index())) {
statsByShard.put(indexService.index(), arrayAsArrayList(indexShardStats));
} else {
statsByShard.get(indexService.index()).add(indexShardStats);
}
} catch (IllegalIndexShardStateException e) {
// we can safely ignore illegal state on ones that are closing for example
logger.trace((Supplier<?>) () -> new ParameterizedMessage("{} ignoring shard stats", indexShard.shardId()), e);
}
}
}
return new NodeIndicesStats(oldStats, statsByShard);
}
Aggregations