use of org.apache.logging.log4j.message.ParameterizedMessage in project elasticsearch by elastic.
the class RecoveryTarget method closeInternal.
@Override
protected void closeInternal() {
try {
// clean open index outputs
Iterator<Entry<String, IndexOutput>> iterator = openIndexOutputs.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<String, IndexOutput> entry = iterator.next();
logger.trace("closing IndexOutput file [{}]", entry.getValue());
try {
entry.getValue().close();
} catch (Exception e) {
logger.debug((Supplier<?>) () -> new ParameterizedMessage("error while closing recovery output [{}]", entry.getValue()), e);
}
iterator.remove();
}
// trash temporary files
for (String file : tempFileNames.keySet()) {
logger.trace("cleaning temporary file [{}]", file);
store.deleteQuiet(file);
}
} finally {
// free store. increment happens in constructor
store.decRef();
indexShard.recoveryStats().decCurrentAsTarget();
closedLatch.countDown();
}
}
use of org.apache.logging.log4j.message.ParameterizedMessage in project elasticsearch by elastic.
the class ElasticsearchUncaughtExceptionHandler method onNonFatalUncaught.
// visible for testing
void onNonFatalUncaught(final String threadName, final Throwable t) {
final Logger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get());
logger.warn((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("uncaught exception in thread [{}]", threadName), t);
}
use of org.apache.logging.log4j.message.ParameterizedMessage in project elasticsearch by elastic.
the class TransportShardMultiTermsVectorAction method shardOperation.
@Override
protected MultiTermVectorsShardResponse shardOperation(MultiTermVectorsShardRequest request, ShardId shardId) {
final MultiTermVectorsShardResponse response = new MultiTermVectorsShardResponse();
final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
final IndexShard indexShard = indexService.getShard(shardId.id());
for (int i = 0; i < request.locations.size(); i++) {
TermVectorsRequest termVectorsRequest = request.requests.get(i);
try {
TermVectorsResponse termVectorsResponse = TermVectorsService.getTermVectors(indexShard, termVectorsRequest);
response.add(request.locations.get(i), termVectorsResponse);
} catch (Exception t) {
if (TransportActions.isShardNotAvailableException(t)) {
throw (ElasticsearchException) t;
} else {
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute multi term vectors for [{}]/[{}]", shardId, termVectorsRequest.type(), termVectorsRequest.id()), t);
response.add(request.locations.get(i), new MultiTermVectorsResponse.Failure(request.index(), termVectorsRequest.type(), termVectorsRequest.id(), t));
}
}
}
return response;
}
use of org.apache.logging.log4j.message.ParameterizedMessage in project elasticsearch by elastic.
the class ReplicationOperation method performOnReplica.
private void performOnReplica(final ShardRouting shard, final ReplicaRequest replicaRequest) {
if (logger.isTraceEnabled()) {
logger.trace("[{}] sending op [{}] to replica {} for request [{}]", shard.shardId(), opType, shard, replicaRequest);
}
totalShards.incrementAndGet();
pendingActions.incrementAndGet();
replicasProxy.performOn(shard, replicaRequest, new ActionListener<ReplicaResponse>() {
@Override
public void onResponse(ReplicaResponse response) {
successfulShards.incrementAndGet();
primary.updateLocalCheckpointForShard(response.allocationId(), response.localCheckpoint());
decPendingAndFinishIfNeeded();
}
@Override
public void onFailure(Exception replicaException) {
logger.trace((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("[{}] failure while performing [{}] on replica {}, request [{}]", shard.shardId(), opType, shard, replicaRequest), replicaException);
if (TransportActions.isShardNotAvailableException(replicaException)) {
decPendingAndFinishIfNeeded();
} else {
RestStatus restStatus = ExceptionsHelper.status(replicaException);
shardReplicaFailures.add(new ReplicationResponse.ShardInfo.Failure(shard.shardId(), shard.currentNodeId(), replicaException, restStatus, false));
String message = String.format(Locale.ROOT, "failed to perform %s on replica %s", opType, shard);
replicasProxy.failShardIfNeeded(shard, replicaRequest.primaryTerm(), message, replicaException, ReplicationOperation.this::decPendingAndFinishIfNeeded, ReplicationOperation.this::onPrimaryDemoted, throwable -> decPendingAndFinishIfNeeded());
}
}
});
}
use of org.apache.logging.log4j.message.ParameterizedMessage in project elasticsearch by elastic.
the class RepositoriesService method applyClusterState.
/**
* Checks if new repositories appeared in or disappeared from cluster metadata and updates current list of
* repositories accordingly.
*
* @param event cluster changed event
*/
@Override
public void applyClusterState(ClusterChangedEvent event) {
try {
RepositoriesMetaData oldMetaData = event.previousState().getMetaData().custom(RepositoriesMetaData.TYPE);
RepositoriesMetaData newMetaData = event.state().getMetaData().custom(RepositoriesMetaData.TYPE);
// Check if repositories got changed
if ((oldMetaData == null && newMetaData == null) || (oldMetaData != null && oldMetaData.equals(newMetaData))) {
return;
}
logger.trace("processing new index repositories for state version [{}]", event.state().version());
Map<String, Repository> survivors = new HashMap<>();
// First, remove repositories that are no longer there
for (Map.Entry<String, Repository> entry : repositories.entrySet()) {
if (newMetaData == null || newMetaData.repository(entry.getKey()) == null) {
logger.debug("unregistering repository [{}]", entry.getKey());
closeRepository(entry.getValue());
} else {
survivors.put(entry.getKey(), entry.getValue());
}
}
Map<String, Repository> builder = new HashMap<>();
if (newMetaData != null) {
// Now go through all repositories and update existing or create missing
for (RepositoryMetaData repositoryMetaData : newMetaData.repositories()) {
Repository repository = survivors.get(repositoryMetaData.name());
if (repository != null) {
// Found previous version of this repository
RepositoryMetaData previousMetadata = repository.getMetadata();
if (previousMetadata.type().equals(repositoryMetaData.type()) == false || previousMetadata.settings().equals(repositoryMetaData.settings()) == false) {
// Previous version is different from the version in settings
logger.debug("updating repository [{}]", repositoryMetaData.name());
closeRepository(repository);
repository = null;
try {
repository = createRepository(repositoryMetaData);
} catch (RepositoryException ex) {
// TODO: this catch is bogus, it means the old repo is already closed,
// but we have nothing to replace it
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to change repository [{}]", repositoryMetaData.name()), ex);
}
}
} else {
try {
repository = createRepository(repositoryMetaData);
} catch (RepositoryException ex) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to create repository [{}]", repositoryMetaData.name()), ex);
}
}
if (repository != null) {
logger.debug("registering repository [{}]", repositoryMetaData.name());
builder.put(repositoryMetaData.name(), repository);
}
}
}
repositories = Collections.unmodifiableMap(builder);
} catch (Exception ex) {
logger.warn("failure updating cluster state ", ex);
}
}
Aggregations