Search in sources :

Example 36 with Supplier

use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.

the class ZenDiscovery method processNextPendingClusterState.

void processNextPendingClusterState(String reason) {
    clusterService.submitStateUpdateTask("zen-disco-receive(from master [" + reason + "])", new LocalClusterUpdateTask(Priority.URGENT) {

        ClusterState newClusterState = null;

        @Override
        public ClusterTasksResult<LocalClusterUpdateTask> execute(ClusterState currentState) {
            newClusterState = publishClusterState.pendingStatesQueue().getNextClusterStateToProcess();
            // all pending states have been processed
            if (newClusterState == null) {
                return unchanged();
            }
            assert newClusterState.nodes().getMasterNode() != null : "received a cluster state without a master";
            assert !newClusterState.blocks().hasGlobalBlock(discoverySettings.getNoMasterBlock()) : "received a cluster state with a master block";
            if (currentState.nodes().isLocalNodeElectedMaster()) {
                return handleAnotherMaster(currentState, newClusterState.nodes().getMasterNode(), newClusterState.version(), "via a new cluster state");
            }
            if (shouldIgnoreOrRejectNewClusterState(logger, currentState, newClusterState)) {
                return unchanged();
            }
            if (currentState.blocks().hasGlobalBlock(discoverySettings.getNoMasterBlock())) {
                // its a fresh update from the master as we transition from a start of not having a master to having one
                logger.debug("got first state from fresh master [{}]", newClusterState.nodes().getMasterNodeId());
                return newState(newClusterState);
            }
            // some optimizations to make sure we keep old objects where possible
            ClusterState.Builder builder = ClusterState.builder(newClusterState);
            // if the routing table did not change, use the original one
            if (newClusterState.routingTable().version() == currentState.routingTable().version()) {
                builder.routingTable(currentState.routingTable());
            }
            // same for metadata
            if (newClusterState.metaData().version() == currentState.metaData().version()) {
                builder.metaData(currentState.metaData());
            } else {
                // if its not the same version, only copy over new indices or ones that changed the version
                MetaData.Builder metaDataBuilder = MetaData.builder(newClusterState.metaData()).removeAllIndices();
                for (IndexMetaData indexMetaData : newClusterState.metaData()) {
                    IndexMetaData currentIndexMetaData = currentState.metaData().index(indexMetaData.getIndex());
                    if (currentIndexMetaData != null && currentIndexMetaData.isSameUUID(indexMetaData.getIndexUUID()) && currentIndexMetaData.getVersion() == indexMetaData.getVersion()) {
                        // safe to reuse
                        metaDataBuilder.put(currentIndexMetaData, false);
                    } else {
                        metaDataBuilder.put(indexMetaData, false);
                    }
                }
                builder.metaData(metaDataBuilder);
            }
            return newState(builder.build());
        }

        @Override
        public void onFailure(String source, Exception e) {
            logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
            if (newClusterState != null) {
                try {
                    publishClusterState.pendingStatesQueue().markAsFailed(newClusterState, e);
                } catch (Exception inner) {
                    inner.addSuppressed(e);
                    logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected exception while failing [{}]", source), inner);
                }
            }
        }

        @Override
        public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
            try {
                if (newClusterState != null) {
                    // check to see that we monitor the correct master of the cluster
                    if (masterFD.masterNode() == null || !masterFD.masterNode().equals(newClusterState.nodes().getMasterNode())) {
                        masterFD.restart(newClusterState.nodes().getMasterNode(), "new cluster state received and we are monitoring the wrong master [" + masterFD.masterNode() + "]");
                    }
                    publishClusterState.pendingStatesQueue().markAsProcessed(newClusterState);
                }
            } catch (Exception e) {
                onFailure(source, e);
            }
        }
    });
}
Also used : ClusterState(org.elasticsearch.cluster.ClusterState) LocalClusterUpdateTask(org.elasticsearch.cluster.LocalClusterUpdateTask) Supplier(org.apache.logging.log4j.util.Supplier) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) ElasticsearchException(org.elasticsearch.ElasticsearchException) TransportException(org.elasticsearch.transport.TransportException) NotMasterException(org.elasticsearch.cluster.NotMasterException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) IndexMetaData(org.elasticsearch.cluster.metadata.IndexMetaData)

Example 37 with Supplier

use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.

the class ZenDiscovery method handleAnotherMaster.

private ClusterStateTaskExecutor.ClusterTasksResult handleAnotherMaster(ClusterState localClusterState, final DiscoveryNode otherMaster, long otherClusterStateVersion, String reason) {
    assert localClusterState.nodes().isLocalNodeElectedMaster() : "handleAnotherMaster called but current node is not a master";
    assert Thread.currentThread().getName().contains(ClusterService.UPDATE_THREAD_NAME) : "not called from the cluster state update thread";
    if (otherClusterStateVersion > localClusterState.version()) {
        return rejoin(localClusterState, "zen-disco-discovered another master with a new cluster_state [" + otherMaster + "][" + reason + "]");
    } else {
        logger.warn("discovered [{}] which is also master but with an older cluster_state, telling [{}] to rejoin the cluster ([{}])", otherMaster, otherMaster, reason);
        // spawn to a background thread to not do blocking operations on the cluster state thread
        threadPool.generic().execute(new AbstractRunnable() {

            @Override
            public void onFailure(Exception e) {
                logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to send rejoin request to [{}]", otherMaster), e);
            }

            @Override
            protected void doRun() throws Exception {
                // make sure we're connected to this node (connect to node does nothing if we're already connected)
                // since the network connections are asymmetric, it may be that we received a state but have disconnected from the node
                // in the past (after a master failure, for example)
                transportService.connectToNode(otherMaster);
                transportService.sendRequest(otherMaster, DISCOVERY_REJOIN_ACTION_NAME, new RejoinClusterRequest(localNode().getId()), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {

                    @Override
                    public void handleException(TransportException exp) {
                        logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to send rejoin request to [{}]", otherMaster), exp);
                    }
                });
            }
        });
        return LocalClusterUpdateTask.unchanged();
    }
}
Also used : AbstractRunnable(org.elasticsearch.common.util.concurrent.AbstractRunnable) Supplier(org.apache.logging.log4j.util.Supplier) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) EmptyTransportResponseHandler(org.elasticsearch.transport.EmptyTransportResponseHandler) TransportException(org.elasticsearch.transport.TransportException) ElasticsearchException(org.elasticsearch.ElasticsearchException) TransportException(org.elasticsearch.transport.TransportException) NotMasterException(org.elasticsearch.cluster.NotMasterException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException)

Example 38 with Supplier

use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.

the class TransportNodesListGatewayStartedShards method nodeOperation.

@Override
protected NodeGatewayStartedShards nodeOperation(NodeRequest request) {
    try {
        final ShardId shardId = request.getShardId();
        logger.trace("{} loading local shard state info", shardId);
        ShardStateMetaData shardStateMetaData = ShardStateMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, nodeEnv.availableShardPaths(request.shardId));
        if (shardStateMetaData != null) {
            IndexMetaData metaData = clusterService.state().metaData().index(shardId.getIndex());
            if (metaData == null) {
                // we may send this requests while processing the cluster state that recovered the index
                // sometimes the request comes in before the local node processed that cluster state
                // in such cases we can load it from disk
                metaData = IndexMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, nodeEnv.indexPaths(shardId.getIndex()));
            }
            if (metaData == null) {
                ElasticsearchException e = new ElasticsearchException("failed to find local IndexMetaData");
                e.setShard(request.shardId);
                throw e;
            }
            if (indicesService.getShardOrNull(shardId) == null) {
                // we don't have an open shard on the store, validate the files on disk are openable
                ShardPath shardPath = null;
                try {
                    IndexSettings indexSettings = new IndexSettings(metaData, settings);
                    shardPath = ShardPath.loadShardPath(logger, nodeEnv, shardId, indexSettings);
                    if (shardPath == null) {
                        throw new IllegalStateException(shardId + " no shard path found");
                    }
                    Store.tryOpenIndex(shardPath.resolveIndex(), shardId, nodeEnv::shardLock, logger);
                } catch (Exception exception) {
                    final ShardPath finalShardPath = shardPath;
                    logger.trace((Supplier<?>) () -> new ParameterizedMessage("{} can't open index for shard [{}] in path [{}]", shardId, shardStateMetaData, (finalShardPath != null) ? finalShardPath.resolveIndex() : ""), exception);
                    String allocationId = shardStateMetaData.allocationId != null ? shardStateMetaData.allocationId.getId() : null;
                    return new NodeGatewayStartedShards(clusterService.localNode(), allocationId, shardStateMetaData.primary, exception);
                }
            }
            logger.debug("{} shard state info found: [{}]", shardId, shardStateMetaData);
            String allocationId = shardStateMetaData.allocationId != null ? shardStateMetaData.allocationId.getId() : null;
            return new NodeGatewayStartedShards(clusterService.localNode(), allocationId, shardStateMetaData.primary);
        }
        logger.trace("{} no local shard info found", shardId);
        return new NodeGatewayStartedShards(clusterService.localNode(), null, false);
    } catch (Exception e) {
        throw new ElasticsearchException("failed to load started shards", e);
    }
}
Also used : IndexSettings(org.elasticsearch.index.IndexSettings) ElasticsearchException(org.elasticsearch.ElasticsearchException) ElasticsearchException(org.elasticsearch.ElasticsearchException) FailedNodeException(org.elasticsearch.action.FailedNodeException) IOException(java.io.IOException) ShardStateMetaData(org.elasticsearch.index.shard.ShardStateMetaData) IndexMetaData(org.elasticsearch.cluster.metadata.IndexMetaData) ShardId(org.elasticsearch.index.shard.ShardId) ShardPath(org.elasticsearch.index.shard.ShardPath) Supplier(org.apache.logging.log4j.util.Supplier) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage)

Example 39 with Supplier

use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.

the class IndexService method onShardClose.

private void onShardClose(ShardLock lock, boolean ownsShard) {
    if (deleted.get()) {
        // we remove that shards content if this index has been deleted
        try {
            if (ownsShard) {
                try {
                    eventListener.beforeIndexShardDeleted(lock.getShardId(), indexSettings.getSettings());
                } finally {
                    shardStoreDeleter.deleteShardStore("delete index", lock, indexSettings);
                    eventListener.afterIndexShardDeleted(lock.getShardId(), indexSettings.getSettings());
                }
            }
        } catch (IOException e) {
            shardStoreDeleter.addPendingDelete(lock.getShardId(), indexSettings);
            logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to delete shard content - scheduled a retry", lock.getShardId().id()), e);
        }
    }
}
Also used : LongSupplier(java.util.function.LongSupplier) Supplier(org.apache.logging.log4j.util.Supplier) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) IOException(java.io.IOException)

Example 40 with Supplier

use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.

the class IndexService method closeShard.

private void closeShard(String reason, ShardId sId, IndexShard indexShard, Store store, IndexEventListener listener) {
    final int shardId = sId.id();
    final Settings indexSettings = this.getIndexSettings().getSettings();
    try {
        try {
            listener.beforeIndexShardClosed(sId, indexShard, indexSettings);
        } finally {
            // and close the shard so no operations are allowed to it
            if (indexShard != null) {
                try {
                    // only flush we are we closed (closed index or shutdown) and if we are not deleted
                    final boolean flushEngine = deleted.get() == false && closed.get();
                    indexShard.close(reason, flushEngine);
                } catch (Exception e) {
                    logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to close index shard", shardId), e);
                // ignore
                }
            }
            // call this before we close the store, so we can release resources for it
            listener.afterIndexShardClosed(sId, indexShard, indexSettings);
        }
    } finally {
        try {
            if (store != null) {
                store.close();
            } else {
                logger.trace("[{}] store not initialized prior to closing shard, nothing to close", shardId);
            }
        } catch (Exception e) {
            logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to close store on shard removal (reason: [{}])", shardId, reason), e);
        }
    }
}
Also used : LongSupplier(java.util.function.LongSupplier) Supplier(org.apache.logging.log4j.util.Supplier) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) Settings(org.elasticsearch.common.settings.Settings) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) ShardNotFoundException(org.elasticsearch.index.shard.ShardNotFoundException) ShardLockObtainFailedException(org.elasticsearch.env.ShardLockObtainFailedException) IndexShardClosedException(org.elasticsearch.index.shard.IndexShardClosedException) IOException(java.io.IOException)

Aggregations

Supplier (org.apache.logging.log4j.util.Supplier)94 ParameterizedMessage (org.apache.logging.log4j.message.ParameterizedMessage)91 IOException (java.io.IOException)55 ElasticsearchException (org.elasticsearch.ElasticsearchException)27 ArrayList (java.util.ArrayList)25 ClusterState (org.elasticsearch.cluster.ClusterState)21 DiscoveryNode (org.elasticsearch.cluster.node.DiscoveryNode)21 TimeValue (org.elasticsearch.common.unit.TimeValue)14 HashMap (java.util.HashMap)12 Map (java.util.Map)11 Settings (org.elasticsearch.common.settings.Settings)11 TransportException (org.elasticsearch.transport.TransportException)11 List (java.util.List)10 ExecutionException (java.util.concurrent.ExecutionException)10 Index (org.elasticsearch.index.Index)10 CountDownLatch (java.util.concurrent.CountDownLatch)9 NotMasterException (org.elasticsearch.cluster.NotMasterException)8 ClusterStateUpdateResponse (org.elasticsearch.cluster.ack.ClusterStateUpdateResponse)8 ClusterBlockException (org.elasticsearch.cluster.block.ClusterBlockException)8 NoSuchFileException (java.nio.file.NoSuchFileException)7