use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.
the class IndicesClusterStateService method sendFailShard.
private void sendFailShard(ShardRouting shardRouting, String message, @Nullable Exception failure, ClusterState state) {
try {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] marking and sending shard failed due to [{}]", shardRouting.shardId(), message), failure);
failedShardsCache.put(shardRouting.shardId(), shardRouting);
shardStateAction.localShardFailed(shardRouting, message, failure, SHARD_STATE_ACTION_LISTENER, state);
} catch (Exception inner) {
if (failure != null)
inner.addSuppressed(failure);
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}][{}] failed to mark shard as failed (because of [{}])", shardRouting.getIndexName(), shardRouting.getId(), message), inner);
}
}
use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.
the class IndicesClusterStateService method deleteIndices.
/**
* Deletes indices (with shard data).
*
* @param event cluster change event
*/
private void deleteIndices(final ClusterChangedEvent event) {
final ClusterState previousState = event.previousState();
final ClusterState state = event.state();
final String localNodeId = state.nodes().getLocalNodeId();
assert localNodeId != null;
for (Index index : event.indicesDeleted()) {
if (logger.isDebugEnabled()) {
logger.debug("[{}] cleaning index, no longer part of the metadata", index);
}
AllocatedIndex<? extends Shard> indexService = indicesService.indexService(index);
final IndexSettings indexSettings;
if (indexService != null) {
indexSettings = indexService.getIndexSettings();
indicesService.removeIndex(index, DELETED, "index no longer part of the metadata");
} else if (previousState.metaData().hasIndex(index.getName())) {
// The deleted index was part of the previous cluster state, but not loaded on the local node
final IndexMetaData metaData = previousState.metaData().index(index);
indexSettings = new IndexSettings(metaData, settings);
indicesService.deleteUnassignedIndex("deleted index was not assigned to local node", metaData, state);
} else {
// asserting that the previous cluster state is not initialized/recovered.
assert previousState.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK);
final IndexMetaData metaData = indicesService.verifyIndexIsDeleted(index, event.state());
if (metaData != null) {
indexSettings = new IndexSettings(metaData, settings);
} else {
indexSettings = null;
}
}
if (indexSettings != null) {
threadPool.generic().execute(new AbstractRunnable() {
@Override
public void onFailure(Exception e) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to complete pending deletion for index", index), e);
}
@Override
protected void doRun() throws Exception {
try {
// we are waiting until we can lock the index / all shards on the node and then we ack the delete of the store
// to the master. If we can't acquire the locks here immediately there might be a shard of this index still
// holding on to the lock due to a "currently canceled recovery" or so. The shard will delete itself BEFORE the
// lock is released so it's guaranteed to be deleted by the time we get the lock
indicesService.processPendingDeletes(index, indexSettings, new TimeValue(30, TimeUnit.MINUTES));
} catch (LockObtainFailedException exc) {
logger.warn("[{}] failed to lock all shards for index - timed out after 30 seconds", index);
} catch (InterruptedException e) {
logger.warn("[{}] failed to lock all shards for index - interrupted", index);
}
}
});
}
}
}
use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.
the class SyncedFlushService method sendSyncRequests.
void sendSyncRequests(final String syncId, final List<ShardRouting> shards, ClusterState state, Map<String, Engine.CommitId> expectedCommitIds, final ShardId shardId, final int totalShards, final ActionListener<ShardsSyncedFlushResult> listener) {
final CountDown countDown = new CountDown(shards.size());
final Map<ShardRouting, ShardSyncedFlushResponse> results = ConcurrentCollections.newConcurrentMap();
for (final ShardRouting shard : shards) {
final DiscoveryNode node = state.nodes().get(shard.currentNodeId());
if (node == null) {
logger.trace("{} is assigned to an unknown node. skipping for sync id [{}]. shard routing {}", shardId, syncId, shard);
results.put(shard, new ShardSyncedFlushResponse("unknown node"));
contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
continue;
}
final Engine.CommitId expectedCommitId = expectedCommitIds.get(shard.currentNodeId());
if (expectedCommitId == null) {
logger.trace("{} can't resolve expected commit id for current node, skipping for sync id [{}]. shard routing {}", shardId, syncId, shard);
results.put(shard, new ShardSyncedFlushResponse("no commit id from pre-sync flush"));
contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
continue;
}
logger.trace("{} sending synced flush request to {}. sync id [{}].", shardId, shard, syncId);
transportService.sendRequest(node, SYNCED_FLUSH_ACTION_NAME, new ShardSyncedFlushRequest(shard.shardId(), syncId, expectedCommitId), new TransportResponseHandler<ShardSyncedFlushResponse>() {
@Override
public ShardSyncedFlushResponse newInstance() {
return new ShardSyncedFlushResponse();
}
@Override
public void handleResponse(ShardSyncedFlushResponse response) {
ShardSyncedFlushResponse existing = results.put(shard, response);
assert existing == null : "got two answers for node [" + node + "]";
// count after the assert so we won't decrement twice in handleException
contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
}
@Override
public void handleException(TransportException exp) {
logger.trace((Supplier<?>) () -> new ParameterizedMessage("{} error while performing synced flush on [{}], skipping", shardId, shard), exp);
results.put(shard, new ShardSyncedFlushResponse(exp.getMessage()));
contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
});
}
}
use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.
the class SyncedFlushService method sendPreSyncRequests.
/**
* send presync requests to all started copies of the given shard
*/
void sendPreSyncRequests(final List<ShardRouting> shards, final ClusterState state, final ShardId shardId, final ActionListener<Map<String, Engine.CommitId>> listener) {
final CountDown countDown = new CountDown(shards.size());
final ConcurrentMap<String, Engine.CommitId> commitIds = ConcurrentCollections.newConcurrentMap();
for (final ShardRouting shard : shards) {
logger.trace("{} sending pre-synced flush request to {}", shardId, shard);
final DiscoveryNode node = state.nodes().get(shard.currentNodeId());
if (node == null) {
logger.trace("{} shard routing {} refers to an unknown node. skipping.", shardId, shard);
if (countDown.countDown()) {
listener.onResponse(commitIds);
}
continue;
}
transportService.sendRequest(node, PRE_SYNCED_FLUSH_ACTION_NAME, new PreShardSyncedFlushRequest(shard.shardId()), new TransportResponseHandler<PreSyncedFlushResponse>() {
@Override
public PreSyncedFlushResponse newInstance() {
return new PreSyncedFlushResponse();
}
@Override
public void handleResponse(PreSyncedFlushResponse response) {
Engine.CommitId existing = commitIds.putIfAbsent(node.getId(), response.commitId());
assert existing == null : "got two answers for node [" + node + "]";
// count after the assert so we won't decrement twice in handleException
if (countDown.countDown()) {
listener.onResponse(commitIds);
}
}
@Override
public void handleException(TransportException exp) {
logger.trace((Supplier<?>) () -> new ParameterizedMessage("{} error while performing pre synced flush on [{}], skipping", shardId, shard), exp);
if (countDown.countDown()) {
listener.onResponse(commitIds);
}
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
});
}
}
use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.
the class PeerRecoveryTargetService method waitForClusterState.
private void waitForClusterState(long clusterStateVersion) {
final ClusterState clusterState = clusterService.state();
ClusterStateObserver observer = new ClusterStateObserver(clusterState, clusterService, TimeValue.timeValueMinutes(5), logger, threadPool.getThreadContext());
if (clusterState.getVersion() >= clusterStateVersion) {
logger.trace("node has cluster state with version higher than {} (current: {})", clusterStateVersion, clusterState.getVersion());
return;
} else {
logger.trace("waiting for cluster state version {} (current: {})", clusterStateVersion, clusterState.getVersion());
final PlainActionFuture<Long> future = new PlainActionFuture<>();
observer.waitForNextChange(new ClusterStateObserver.Listener() {
@Override
public void onNewClusterState(ClusterState state) {
future.onResponse(state.getVersion());
}
@Override
public void onClusterServiceClose() {
future.onFailure(new NodeClosedException(clusterService.localNode()));
}
@Override
public void onTimeout(TimeValue timeout) {
future.onFailure(new IllegalStateException("cluster state never updated to version " + clusterStateVersion));
}
}, newState -> newState.getVersion() >= clusterStateVersion);
try {
long currentVersion = future.get();
logger.trace("successfully waited for cluster state with version {} (current: {})", clusterStateVersion, currentVersion);
} catch (Exception e) {
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed waiting for cluster state with version {} (current: {})", clusterStateVersion, clusterService.state().getVersion()), e);
throw ExceptionsHelper.convertToRuntime(e);
}
}
}
Aggregations