use of org.apache.logging.log4j.message.ParameterizedMessage in project elasticsearch by elastic.
the class SyncedFlushService method sendSyncRequests.
void sendSyncRequests(final String syncId, final List<ShardRouting> shards, ClusterState state, Map<String, Engine.CommitId> expectedCommitIds, final ShardId shardId, final int totalShards, final ActionListener<ShardsSyncedFlushResult> listener) {
final CountDown countDown = new CountDown(shards.size());
final Map<ShardRouting, ShardSyncedFlushResponse> results = ConcurrentCollections.newConcurrentMap();
for (final ShardRouting shard : shards) {
final DiscoveryNode node = state.nodes().get(shard.currentNodeId());
if (node == null) {
logger.trace("{} is assigned to an unknown node. skipping for sync id [{}]. shard routing {}", shardId, syncId, shard);
results.put(shard, new ShardSyncedFlushResponse("unknown node"));
contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
continue;
}
final Engine.CommitId expectedCommitId = expectedCommitIds.get(shard.currentNodeId());
if (expectedCommitId == null) {
logger.trace("{} can't resolve expected commit id for current node, skipping for sync id [{}]. shard routing {}", shardId, syncId, shard);
results.put(shard, new ShardSyncedFlushResponse("no commit id from pre-sync flush"));
contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
continue;
}
logger.trace("{} sending synced flush request to {}. sync id [{}].", shardId, shard, syncId);
transportService.sendRequest(node, SYNCED_FLUSH_ACTION_NAME, new ShardSyncedFlushRequest(shard.shardId(), syncId, expectedCommitId), new TransportResponseHandler<ShardSyncedFlushResponse>() {
@Override
public ShardSyncedFlushResponse newInstance() {
return new ShardSyncedFlushResponse();
}
@Override
public void handleResponse(ShardSyncedFlushResponse response) {
ShardSyncedFlushResponse existing = results.put(shard, response);
assert existing == null : "got two answers for node [" + node + "]";
// count after the assert so we won't decrement twice in handleException
contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
}
@Override
public void handleException(TransportException exp) {
logger.trace((Supplier<?>) () -> new ParameterizedMessage("{} error while performing synced flush on [{}], skipping", shardId, shard), exp);
results.put(shard, new ShardSyncedFlushResponse(exp.getMessage()));
contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
});
}
}
use of org.apache.logging.log4j.message.ParameterizedMessage in project elasticsearch by elastic.
the class SyncedFlushService method sendPreSyncRequests.
/**
* send presync requests to all started copies of the given shard
*/
void sendPreSyncRequests(final List<ShardRouting> shards, final ClusterState state, final ShardId shardId, final ActionListener<Map<String, Engine.CommitId>> listener) {
final CountDown countDown = new CountDown(shards.size());
final ConcurrentMap<String, Engine.CommitId> commitIds = ConcurrentCollections.newConcurrentMap();
for (final ShardRouting shard : shards) {
logger.trace("{} sending pre-synced flush request to {}", shardId, shard);
final DiscoveryNode node = state.nodes().get(shard.currentNodeId());
if (node == null) {
logger.trace("{} shard routing {} refers to an unknown node. skipping.", shardId, shard);
if (countDown.countDown()) {
listener.onResponse(commitIds);
}
continue;
}
transportService.sendRequest(node, PRE_SYNCED_FLUSH_ACTION_NAME, new PreShardSyncedFlushRequest(shard.shardId()), new TransportResponseHandler<PreSyncedFlushResponse>() {
@Override
public PreSyncedFlushResponse newInstance() {
return new PreSyncedFlushResponse();
}
@Override
public void handleResponse(PreSyncedFlushResponse response) {
Engine.CommitId existing = commitIds.putIfAbsent(node.getId(), response.commitId());
assert existing == null : "got two answers for node [" + node + "]";
// count after the assert so we won't decrement twice in handleException
if (countDown.countDown()) {
listener.onResponse(commitIds);
}
}
@Override
public void handleException(TransportException exp) {
logger.trace((Supplier<?>) () -> new ParameterizedMessage("{} error while performing pre synced flush on [{}], skipping", shardId, shard), exp);
if (countDown.countDown()) {
listener.onResponse(commitIds);
}
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
});
}
}
use of org.apache.logging.log4j.message.ParameterizedMessage in project elasticsearch by elastic.
the class PeerRecoveryTargetService method waitForClusterState.
private void waitForClusterState(long clusterStateVersion) {
final ClusterState clusterState = clusterService.state();
ClusterStateObserver observer = new ClusterStateObserver(clusterState, clusterService, TimeValue.timeValueMinutes(5), logger, threadPool.getThreadContext());
if (clusterState.getVersion() >= clusterStateVersion) {
logger.trace("node has cluster state with version higher than {} (current: {})", clusterStateVersion, clusterState.getVersion());
return;
} else {
logger.trace("waiting for cluster state version {} (current: {})", clusterStateVersion, clusterState.getVersion());
final PlainActionFuture<Long> future = new PlainActionFuture<>();
observer.waitForNextChange(new ClusterStateObserver.Listener() {
@Override
public void onNewClusterState(ClusterState state) {
future.onResponse(state.getVersion());
}
@Override
public void onClusterServiceClose() {
future.onFailure(new NodeClosedException(clusterService.localNode()));
}
@Override
public void onTimeout(TimeValue timeout) {
future.onFailure(new IllegalStateException("cluster state never updated to version " + clusterStateVersion));
}
}, newState -> newState.getVersion() >= clusterStateVersion);
try {
long currentVersion = future.get();
logger.trace("successfully waited for cluster state with version {} (current: {})", clusterStateVersion, currentVersion);
} catch (Exception e) {
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed waiting for cluster state with version {} (current: {})", clusterStateVersion, clusterService.state().getVersion()), e);
throw ExceptionsHelper.convertToRuntime(e);
}
}
}
use of org.apache.logging.log4j.message.ParameterizedMessage in project elasticsearch by elastic.
the class Store method renameTempFilesSafe.
/**
* Renames all the given files from the key of the map to the
* value of the map. All successfully renamed files are removed from the map in-place.
*/
public void renameTempFilesSafe(Map<String, String> tempFileMap) throws IOException {
// this works just like a lucene commit - we rename all temp files and once we successfully
// renamed all the segments we rename the commit to ensure we don't leave half baked commits behind.
final Map.Entry<String, String>[] entries = tempFileMap.entrySet().toArray(new Map.Entry[tempFileMap.size()]);
ArrayUtil.timSort(entries, new Comparator<Map.Entry<String, String>>() {
@Override
public int compare(Map.Entry<String, String> o1, Map.Entry<String, String> o2) {
String left = o1.getValue();
String right = o2.getValue();
if (left.startsWith(IndexFileNames.SEGMENTS) || right.startsWith(IndexFileNames.SEGMENTS)) {
if (left.startsWith(IndexFileNames.SEGMENTS) == false) {
return -1;
} else if (right.startsWith(IndexFileNames.SEGMENTS) == false) {
return 1;
}
}
return left.compareTo(right);
}
});
metadataLock.writeLock().lock();
// get exceptions if files are still open.
try (Lock writeLock = directory().obtainLock(IndexWriter.WRITE_LOCK_NAME)) {
for (Map.Entry<String, String> entry : entries) {
String tempFile = entry.getKey();
String origFile = entry.getValue();
// first, go and delete the existing ones
try {
directory.deleteFile(origFile);
} catch (FileNotFoundException | NoSuchFileException e) {
} catch (Exception ex) {
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to delete file [{}]", origFile), ex);
}
// now, rename the files... and fail it it won't work
directory.rename(tempFile, origFile);
final String remove = tempFileMap.remove(tempFile);
assert remove != null;
}
directory.syncMetaData();
} finally {
metadataLock.writeLock().unlock();
}
}
use of org.apache.logging.log4j.message.ParameterizedMessage in project elasticsearch by elastic.
the class Store method cleanupAndVerify.
/**
* This method deletes every file in this store that is not contained in the given source meta data or is a
* legacy checksum file. After the delete it pulls the latest metadata snapshot from the store and compares it
* to the given snapshot. If the snapshots are inconsistent an illegal state exception is thrown
*
* @param reason the reason for this cleanup operation logged for each deleted file
* @param sourceMetaData the metadata used for cleanup. all files in this metadata should be kept around.
* @throws IOException if an IOException occurs
* @throws IllegalStateException if the latest snapshot in this store differs from the given one after the cleanup.
*/
public void cleanupAndVerify(String reason, MetadataSnapshot sourceMetaData) throws IOException {
metadataLock.writeLock().lock();
try (Lock writeLock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) {
final StoreDirectory dir = directory;
for (String existingFile : dir.listAll()) {
if (Store.isAutogenerated(existingFile) || sourceMetaData.contains(existingFile)) {
// don't delete snapshot file, or the checksums file (note, this is extra protection since the Store won't delete checksum)
continue;
}
try {
dir.deleteFile(reason, existingFile);
// FNF should not happen since we hold a write lock?
} catch (IOException ex) {
if (existingFile.startsWith(IndexFileNames.SEGMENTS) || existingFile.equals(IndexFileNames.OLD_SEGMENTS_GEN)) {
// if one of those files can't be deleted we better fail the cleanup otherwise we might leave an old commit point around?
throw new IllegalStateException("Can't delete " + existingFile + " - cleanup failed", ex);
}
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to delete file [{}]", existingFile), ex);
// ignore, we don't really care, will get deleted later on
}
}
final Store.MetadataSnapshot metadataOrEmpty = getMetadata(null);
verifyAfterCleanup(sourceMetaData, metadataOrEmpty);
} finally {
metadataLock.writeLock().unlock();
}
}
Aggregations