Search in sources :

Example 66 with ParameterizedMessage

use of org.apache.logging.log4j.message.ParameterizedMessage in project elasticsearch by elastic.

the class SyncedFlushService method sendSyncRequests.

void sendSyncRequests(final String syncId, final List<ShardRouting> shards, ClusterState state, Map<String, Engine.CommitId> expectedCommitIds, final ShardId shardId, final int totalShards, final ActionListener<ShardsSyncedFlushResult> listener) {
    final CountDown countDown = new CountDown(shards.size());
    final Map<ShardRouting, ShardSyncedFlushResponse> results = ConcurrentCollections.newConcurrentMap();
    for (final ShardRouting shard : shards) {
        final DiscoveryNode node = state.nodes().get(shard.currentNodeId());
        if (node == null) {
            logger.trace("{} is assigned to an unknown node. skipping for sync id [{}]. shard routing {}", shardId, syncId, shard);
            results.put(shard, new ShardSyncedFlushResponse("unknown node"));
            contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
            continue;
        }
        final Engine.CommitId expectedCommitId = expectedCommitIds.get(shard.currentNodeId());
        if (expectedCommitId == null) {
            logger.trace("{} can't resolve expected commit id for current node, skipping for sync id [{}]. shard routing {}", shardId, syncId, shard);
            results.put(shard, new ShardSyncedFlushResponse("no commit id from pre-sync flush"));
            contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
            continue;
        }
        logger.trace("{} sending synced flush request to {}. sync id [{}].", shardId, shard, syncId);
        transportService.sendRequest(node, SYNCED_FLUSH_ACTION_NAME, new ShardSyncedFlushRequest(shard.shardId(), syncId, expectedCommitId), new TransportResponseHandler<ShardSyncedFlushResponse>() {

            @Override
            public ShardSyncedFlushResponse newInstance() {
                return new ShardSyncedFlushResponse();
            }

            @Override
            public void handleResponse(ShardSyncedFlushResponse response) {
                ShardSyncedFlushResponse existing = results.put(shard, response);
                assert existing == null : "got two answers for node [" + node + "]";
                // count after the assert so we won't decrement twice in handleException
                contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
            }

            @Override
            public void handleException(TransportException exp) {
                logger.trace((Supplier<?>) () -> new ParameterizedMessage("{} error while performing synced flush on [{}], skipping", shardId, shard), exp);
                results.put(shard, new ShardSyncedFlushResponse(exp.getMessage()));
                contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
            }

            @Override
            public String executor() {
                return ThreadPool.Names.SAME;
            }
        });
    }
}
Also used : DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) CountDown(org.elasticsearch.common.util.concurrent.CountDown) TransportException(org.elasticsearch.transport.TransportException) Supplier(org.apache.logging.log4j.util.Supplier) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) ShardRouting(org.elasticsearch.cluster.routing.ShardRouting) Engine(org.elasticsearch.index.engine.Engine)

Example 67 with ParameterizedMessage

use of org.apache.logging.log4j.message.ParameterizedMessage in project elasticsearch by elastic.

the class SyncedFlushService method sendPreSyncRequests.

/**
     * send presync requests to all started copies of the given shard
     */
void sendPreSyncRequests(final List<ShardRouting> shards, final ClusterState state, final ShardId shardId, final ActionListener<Map<String, Engine.CommitId>> listener) {
    final CountDown countDown = new CountDown(shards.size());
    final ConcurrentMap<String, Engine.CommitId> commitIds = ConcurrentCollections.newConcurrentMap();
    for (final ShardRouting shard : shards) {
        logger.trace("{} sending pre-synced flush request to {}", shardId, shard);
        final DiscoveryNode node = state.nodes().get(shard.currentNodeId());
        if (node == null) {
            logger.trace("{} shard routing {} refers to an unknown node. skipping.", shardId, shard);
            if (countDown.countDown()) {
                listener.onResponse(commitIds);
            }
            continue;
        }
        transportService.sendRequest(node, PRE_SYNCED_FLUSH_ACTION_NAME, new PreShardSyncedFlushRequest(shard.shardId()), new TransportResponseHandler<PreSyncedFlushResponse>() {

            @Override
            public PreSyncedFlushResponse newInstance() {
                return new PreSyncedFlushResponse();
            }

            @Override
            public void handleResponse(PreSyncedFlushResponse response) {
                Engine.CommitId existing = commitIds.putIfAbsent(node.getId(), response.commitId());
                assert existing == null : "got two answers for node [" + node + "]";
                // count after the assert so we won't decrement twice in handleException
                if (countDown.countDown()) {
                    listener.onResponse(commitIds);
                }
            }

            @Override
            public void handleException(TransportException exp) {
                logger.trace((Supplier<?>) () -> new ParameterizedMessage("{} error while performing pre synced flush on [{}], skipping", shardId, shard), exp);
                if (countDown.countDown()) {
                    listener.onResponse(commitIds);
                }
            }

            @Override
            public String executor() {
                return ThreadPool.Names.SAME;
            }
        });
    }
}
Also used : DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) CountDown(org.elasticsearch.common.util.concurrent.CountDown) TransportException(org.elasticsearch.transport.TransportException) Supplier(org.apache.logging.log4j.util.Supplier) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) ShardRouting(org.elasticsearch.cluster.routing.ShardRouting)

Example 68 with ParameterizedMessage

use of org.apache.logging.log4j.message.ParameterizedMessage in project elasticsearch by elastic.

the class PeerRecoveryTargetService method waitForClusterState.

private void waitForClusterState(long clusterStateVersion) {
    final ClusterState clusterState = clusterService.state();
    ClusterStateObserver observer = new ClusterStateObserver(clusterState, clusterService, TimeValue.timeValueMinutes(5), logger, threadPool.getThreadContext());
    if (clusterState.getVersion() >= clusterStateVersion) {
        logger.trace("node has cluster state with version higher than {} (current: {})", clusterStateVersion, clusterState.getVersion());
        return;
    } else {
        logger.trace("waiting for cluster state version {} (current: {})", clusterStateVersion, clusterState.getVersion());
        final PlainActionFuture<Long> future = new PlainActionFuture<>();
        observer.waitForNextChange(new ClusterStateObserver.Listener() {

            @Override
            public void onNewClusterState(ClusterState state) {
                future.onResponse(state.getVersion());
            }

            @Override
            public void onClusterServiceClose() {
                future.onFailure(new NodeClosedException(clusterService.localNode()));
            }

            @Override
            public void onTimeout(TimeValue timeout) {
                future.onFailure(new IllegalStateException("cluster state never updated to version " + clusterStateVersion));
            }
        }, newState -> newState.getVersion() >= clusterStateVersion);
        try {
            long currentVersion = future.get();
            logger.trace("successfully waited for cluster state with version {} (current: {})", clusterStateVersion, currentVersion);
        } catch (Exception e) {
            logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed waiting for cluster state with version {} (current: {})", clusterStateVersion, clusterService.state().getVersion()), e);
            throw ExceptionsHelper.convertToRuntime(e);
        }
    }
}
Also used : ClusterState(org.elasticsearch.cluster.ClusterState) ClusterStateObserver(org.elasticsearch.cluster.ClusterStateObserver) ElasticsearchException(org.elasticsearch.ElasticsearchException) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) RecoveryEngineException(org.elasticsearch.index.engine.RecoveryEngineException) NodeClosedException(org.elasticsearch.node.NodeClosedException) ShardNotFoundException(org.elasticsearch.index.shard.ShardNotFoundException) ElasticsearchTimeoutException(org.elasticsearch.ElasticsearchTimeoutException) ConnectTransportException(org.elasticsearch.transport.ConnectTransportException) IndexNotFoundException(org.elasticsearch.index.IndexNotFoundException) IOException(java.io.IOException) IllegalIndexShardStateException(org.elasticsearch.index.shard.IllegalIndexShardStateException) MapperException(org.elasticsearch.index.mapper.MapperException) PlainActionFuture(org.elasticsearch.action.support.PlainActionFuture) AtomicLong(java.util.concurrent.atomic.AtomicLong) NodeClosedException(org.elasticsearch.node.NodeClosedException) Supplier(org.apache.logging.log4j.util.Supplier) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) TimeValue(org.elasticsearch.common.unit.TimeValue)

Example 69 with ParameterizedMessage

use of org.apache.logging.log4j.message.ParameterizedMessage in project elasticsearch by elastic.

the class Store method renameTempFilesSafe.

/**
     * Renames all the given files from the key of the map to the
     * value of the map. All successfully renamed files are removed from the map in-place.
     */
public void renameTempFilesSafe(Map<String, String> tempFileMap) throws IOException {
    // this works just like a lucene commit - we rename all temp files and once we successfully
    // renamed all the segments we rename the commit to ensure we don't leave half baked commits behind.
    final Map.Entry<String, String>[] entries = tempFileMap.entrySet().toArray(new Map.Entry[tempFileMap.size()]);
    ArrayUtil.timSort(entries, new Comparator<Map.Entry<String, String>>() {

        @Override
        public int compare(Map.Entry<String, String> o1, Map.Entry<String, String> o2) {
            String left = o1.getValue();
            String right = o2.getValue();
            if (left.startsWith(IndexFileNames.SEGMENTS) || right.startsWith(IndexFileNames.SEGMENTS)) {
                if (left.startsWith(IndexFileNames.SEGMENTS) == false) {
                    return -1;
                } else if (right.startsWith(IndexFileNames.SEGMENTS) == false) {
                    return 1;
                }
            }
            return left.compareTo(right);
        }
    });
    metadataLock.writeLock().lock();
    // get exceptions if files are still open.
    try (Lock writeLock = directory().obtainLock(IndexWriter.WRITE_LOCK_NAME)) {
        for (Map.Entry<String, String> entry : entries) {
            String tempFile = entry.getKey();
            String origFile = entry.getValue();
            // first, go and delete the existing ones
            try {
                directory.deleteFile(origFile);
            } catch (FileNotFoundException | NoSuchFileException e) {
            } catch (Exception ex) {
                logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to delete file [{}]", origFile), ex);
            }
            // now, rename the files... and fail it it won't work
            directory.rename(tempFile, origFile);
            final String remove = tempFileMap.remove(tempFile);
            assert remove != null;
        }
        directory.syncMetaData();
    } finally {
        metadataLock.writeLock().unlock();
    }
}
Also used : FileNotFoundException(java.io.FileNotFoundException) NoSuchFileException(java.nio.file.NoSuchFileException) IndexNotFoundException(org.apache.lucene.index.IndexNotFoundException) ElasticsearchException(org.elasticsearch.ElasticsearchException) NoSuchFileException(java.nio.file.NoSuchFileException) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) ShardLockObtainFailedException(org.elasticsearch.env.ShardLockObtainFailedException) EOFException(java.io.EOFException) FileNotFoundException(java.io.FileNotFoundException) AccessDeniedException(java.nio.file.AccessDeniedException) IOException(java.io.IOException) IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException) ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock) Lock(org.apache.lucene.store.Lock) ShardLock(org.elasticsearch.env.ShardLock) Supplier(org.apache.logging.log4j.util.Supplier) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) Map(java.util.Map) HashMap(java.util.HashMap) Collections.emptyMap(java.util.Collections.emptyMap) Collections.unmodifiableMap(java.util.Collections.unmodifiableMap)

Example 70 with ParameterizedMessage

use of org.apache.logging.log4j.message.ParameterizedMessage in project elasticsearch by elastic.

the class Store method cleanupAndVerify.

/**
     * This method deletes every file in this store that is not contained in the given source meta data or is a
     * legacy checksum file. After the delete it pulls the latest metadata snapshot from the store and compares it
     * to the given snapshot. If the snapshots are inconsistent an illegal state exception is thrown
     *
     * @param reason         the reason for this cleanup operation logged for each deleted file
     * @param sourceMetaData the metadata used for cleanup. all files in this metadata should be kept around.
     * @throws IOException           if an IOException occurs
     * @throws IllegalStateException if the latest snapshot in this store differs from the given one after the cleanup.
     */
public void cleanupAndVerify(String reason, MetadataSnapshot sourceMetaData) throws IOException {
    metadataLock.writeLock().lock();
    try (Lock writeLock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) {
        final StoreDirectory dir = directory;
        for (String existingFile : dir.listAll()) {
            if (Store.isAutogenerated(existingFile) || sourceMetaData.contains(existingFile)) {
                // don't delete snapshot file, or the checksums file (note, this is extra protection since the Store won't delete checksum)
                continue;
            }
            try {
                dir.deleteFile(reason, existingFile);
            // FNF should not happen since we hold a write lock?
            } catch (IOException ex) {
                if (existingFile.startsWith(IndexFileNames.SEGMENTS) || existingFile.equals(IndexFileNames.OLD_SEGMENTS_GEN)) {
                    // if one of those files can't be deleted we better fail the cleanup otherwise we might leave an old commit point around?
                    throw new IllegalStateException("Can't delete " + existingFile + " - cleanup failed", ex);
                }
                logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to delete file [{}]", existingFile), ex);
            // ignore, we don't really care, will get deleted later on
            }
        }
        final Store.MetadataSnapshot metadataOrEmpty = getMetadata(null);
        verifyAfterCleanup(sourceMetaData, metadataOrEmpty);
    } finally {
        metadataLock.writeLock().unlock();
    }
}
Also used : Supplier(org.apache.logging.log4j.util.Supplier) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) IOException(java.io.IOException) ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock) Lock(org.apache.lucene.store.Lock) ShardLock(org.elasticsearch.env.ShardLock)

Aggregations

ParameterizedMessage (org.apache.logging.log4j.message.ParameterizedMessage)131 Supplier (org.apache.logging.log4j.util.Supplier)90 IOException (java.io.IOException)75 ElasticsearchException (org.elasticsearch.ElasticsearchException)38 ArrayList (java.util.ArrayList)28 DiscoveryNode (org.elasticsearch.cluster.node.DiscoveryNode)26 ClusterState (org.elasticsearch.cluster.ClusterState)25 HashMap (java.util.HashMap)16 TimeValue (org.elasticsearch.common.unit.TimeValue)14 TransportException (org.elasticsearch.transport.TransportException)14 List (java.util.List)13 Supplier (java.util.function.Supplier)13 Map (java.util.Map)12 CountDownLatch (java.util.concurrent.CountDownLatch)12 ExecutionException (java.util.concurrent.ExecutionException)12 Settings (org.elasticsearch.common.settings.Settings)12 EsRejectedExecutionException (org.elasticsearch.common.util.concurrent.EsRejectedExecutionException)12 AbstractRunnable (org.elasticsearch.common.util.concurrent.AbstractRunnable)11 Index (org.elasticsearch.index.Index)11 CopyOnWriteArrayList (java.util.concurrent.CopyOnWriteArrayList)10