Search in sources :

Example 81 with IndexNotFoundException

use of org.elasticsearch.index.IndexNotFoundException in project crate by crate.

the class TransportResizeAction method prepareCreateIndexRequest.

// static for unittesting this method
static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest(final ResizeRequest resizeRequest, final ClusterState state, final IntFunction<DocsStats> perShardDocStats, String sourceIndexName, String targetIndexName) {
    final CreateIndexRequest targetIndex = resizeRequest.getTargetIndexRequest();
    final IndexMetadata metadata = state.metadata().index(sourceIndexName);
    if (metadata == null) {
        throw new IndexNotFoundException(sourceIndexName);
    }
    final Settings targetIndexSettings = Settings.builder().put(targetIndex.settings()).normalizePrefix(IndexMetadata.INDEX_SETTING_PREFIX).build();
    final int numShards;
    if (IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexSettings)) {
        numShards = IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings);
    } else {
        assert resizeRequest.getResizeType() == ResizeType.SHRINK : "split must specify the number of shards explicitly";
        numShards = 1;
    }
    for (int i = 0; i < numShards; i++) {
        if (resizeRequest.getResizeType() == ResizeType.SHRINK) {
            Set<ShardId> shardIds = IndexMetadata.selectShrinkShards(i, metadata, numShards);
            long count = 0;
            for (ShardId id : shardIds) {
                DocsStats docsStats = perShardDocStats.apply(id.id());
                if (docsStats != null) {
                    count += docsStats.getCount();
                }
                if (count > IndexWriter.MAX_DOCS) {
                    throw new IllegalStateException("Can't merge index with more than [" + IndexWriter.MAX_DOCS + "] docs - too many documents in shards " + shardIds);
                }
            }
        } else {
            Objects.requireNonNull(IndexMetadata.selectSplitShard(i, metadata, numShards));
        // we just execute this to ensure we get the right exceptions if the number of shards is wrong or less then etc.
        }
    }
    if (IndexMetadata.INDEX_ROUTING_PARTITION_SIZE_SETTING.exists(targetIndexSettings)) {
        throw new IllegalArgumentException("cannot provide a routing partition size value when resizing an index");
    }
    if (IndexMetadata.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.exists(targetIndexSettings)) {
        throw new IllegalArgumentException("cannot provide index.number_of_routing_shards on resize");
    }
    if (IndexSettings.INDEX_SOFT_DELETES_SETTING.exists(metadata.getSettings()) && IndexSettings.INDEX_SOFT_DELETES_SETTING.get(metadata.getSettings()) && IndexSettings.INDEX_SOFT_DELETES_SETTING.exists(targetIndexSettings) && IndexSettings.INDEX_SOFT_DELETES_SETTING.get(targetIndexSettings) == false) {
        throw new IllegalArgumentException("Can't disable [index.soft_deletes.enabled] setting on resize");
    }
    String cause = resizeRequest.getResizeType().name().toLowerCase(Locale.ROOT) + "_index";
    targetIndex.cause(cause);
    Settings.Builder settingsBuilder = Settings.builder().put(targetIndexSettings);
    settingsBuilder.put("index.number_of_shards", numShards);
    targetIndex.settings(settingsBuilder);
    return new CreateIndexClusterStateUpdateRequest(cause, targetIndex.index(), targetIndexName).ackTimeout(targetIndex.timeout()).masterNodeTimeout(targetIndex.masterNodeTimeout()).settings(targetIndex.settings()).aliases(targetIndex.aliases()).waitForActiveShards(targetIndex.waitForActiveShards()).recoverFrom(metadata.getIndex()).resizeType(resizeRequest.getResizeType()).copySettings(resizeRequest.getCopySettings() == null ? false : resizeRequest.getCopySettings());
}
Also used : ShardId(org.elasticsearch.index.shard.ShardId) CreateIndexClusterStateUpdateRequest(org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest) IndexNotFoundException(org.elasticsearch.index.IndexNotFoundException) DocsStats(org.elasticsearch.index.shard.DocsStats) CreateIndexRequest(org.elasticsearch.action.admin.indices.create.CreateIndexRequest) IndexMetadata(org.elasticsearch.cluster.metadata.IndexMetadata) Settings(org.elasticsearch.common.settings.Settings) IndexSettings(org.elasticsearch.index.IndexSettings)

Example 82 with IndexNotFoundException

use of org.elasticsearch.index.IndexNotFoundException in project crate by crate.

the class AllocateStalePrimaryAllocationCommand method execute.

@Override
public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) {
    final DiscoveryNode discoNode;
    try {
        discoNode = allocation.nodes().resolveNode(node);
    } catch (IllegalArgumentException e) {
        return explainOrThrowRejectedCommand(explain, allocation, e);
    }
    final RoutingNodes routingNodes = allocation.routingNodes();
    RoutingNode routingNode = routingNodes.node(discoNode.getId());
    if (routingNode == null) {
        return explainOrThrowMissingRoutingNode(allocation, explain, discoNode);
    }
    try {
        allocation.routingTable().shardRoutingTable(index, shardId).primaryShard();
    } catch (IndexNotFoundException | ShardNotFoundException e) {
        return explainOrThrowRejectedCommand(explain, allocation, e);
    }
    ShardRouting shardRouting = null;
    for (ShardRouting shard : allocation.routingNodes().unassigned()) {
        if (shard.getIndexName().equals(index) && shard.getId() == shardId && shard.primary()) {
            shardRouting = shard;
            break;
        }
    }
    if (shardRouting == null) {
        return explainOrThrowRejectedCommand(explain, allocation, "primary [" + index + "][" + shardId + "] is already assigned");
    }
    if (acceptDataLoss == false) {
        String dataLossWarning = "allocating an empty primary for [" + index + "][" + shardId + "] can result in data loss. Please " + "confirm by setting the accept_data_loss parameter to true";
        return explainOrThrowRejectedCommand(explain, allocation, dataLossWarning);
    }
    if (shardRouting.recoverySource().getType() != RecoverySource.Type.EXISTING_STORE) {
        return explainOrThrowRejectedCommand(explain, allocation, "trying to allocate an existing primary shard [" + index + "][" + shardId + "], while no such shard has ever been active");
    }
    initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting, null, RecoverySource.ExistingStoreRecoverySource.FORCE_STALE_PRIMARY_INSTANCE);
    return new RerouteExplanation(this, allocation.decision(Decision.YES, name() + " (allocation command)", "ignore deciders"));
}
Also used : DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) RoutingNode(org.elasticsearch.cluster.routing.RoutingNode) ShardNotFoundException(org.elasticsearch.index.shard.ShardNotFoundException) RoutingNodes(org.elasticsearch.cluster.routing.RoutingNodes) IndexNotFoundException(org.elasticsearch.index.IndexNotFoundException) RerouteExplanation(org.elasticsearch.cluster.routing.allocation.RerouteExplanation) ShardRouting(org.elasticsearch.cluster.routing.ShardRouting)

Example 83 with IndexNotFoundException

use of org.elasticsearch.index.IndexNotFoundException in project crate by crate.

the class CancelAllocationCommand method execute.

@Override
public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) {
    DiscoveryNode discoNode = allocation.nodes().resolveNode(node);
    ShardRouting shardRouting = null;
    RoutingNodes routingNodes = allocation.routingNodes();
    RoutingNode routingNode = routingNodes.node(discoNode.getId());
    IndexMetadata indexMetadata = null;
    if (routingNode != null) {
        indexMetadata = allocation.metadata().index(index());
        if (indexMetadata == null) {
            throw new IndexNotFoundException(index());
        }
        ShardId shardId = new ShardId(indexMetadata.getIndex(), shardId());
        shardRouting = routingNode.getByShardId(shardId);
    }
    if (shardRouting == null) {
        if (explain) {
            return new RerouteExplanation(this, allocation.decision(Decision.NO, "cancel_allocation_command", "can't cancel " + shardId + ", failed to find it on node " + discoNode));
        }
        throw new IllegalArgumentException("[cancel_allocation] can't cancel " + shardId + ", failed to find it on node " + discoNode);
    }
    if (shardRouting.primary() && allowPrimary == false) {
        if ((shardRouting.initializing() && shardRouting.relocatingNodeId() != null) == false) {
            // only allow cancelling initializing shard of primary relocation without allowPrimary flag
            if (explain) {
                return new RerouteExplanation(this, allocation.decision(Decision.NO, "cancel_allocation_command", "can't cancel " + shardId + " on node " + discoNode + ", shard is primary and " + shardRouting.state().name().toLowerCase(Locale.ROOT)));
            }
            throw new IllegalArgumentException("[cancel_allocation] can't cancel " + shardId + " on node " + discoNode + ", shard is primary and " + shardRouting.state().name().toLowerCase(Locale.ROOT));
        }
    }
    routingNodes.failShard(LogManager.getLogger(CancelAllocationCommand.class), shardRouting, new UnassignedInfo(UnassignedInfo.Reason.REROUTE_CANCELLED, null), indexMetadata, allocation.changes());
    // TODO: We don't have to remove a cancelled shard from in-sync set once we have a strict resync implementation.
    allocation.removeAllocationId(shardRouting);
    return new RerouteExplanation(this, allocation.decision(Decision.YES, "cancel_allocation_command", "shard " + shardId + " on node " + discoNode + " can be cancelled"));
}
Also used : ShardId(org.elasticsearch.index.shard.ShardId) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) RoutingNode(org.elasticsearch.cluster.routing.RoutingNode) RoutingNodes(org.elasticsearch.cluster.routing.RoutingNodes) UnassignedInfo(org.elasticsearch.cluster.routing.UnassignedInfo) IndexNotFoundException(org.elasticsearch.index.IndexNotFoundException) RerouteExplanation(org.elasticsearch.cluster.routing.allocation.RerouteExplanation) ShardRouting(org.elasticsearch.cluster.routing.ShardRouting) IndexMetadata(org.elasticsearch.cluster.metadata.IndexMetadata)

Example 84 with IndexNotFoundException

use of org.elasticsearch.index.IndexNotFoundException in project crate by crate.

the class AllocateEmptyPrimaryAllocationCommand method execute.

@Override
public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) {
    final DiscoveryNode discoNode;
    try {
        discoNode = allocation.nodes().resolveNode(node);
    } catch (IllegalArgumentException e) {
        return explainOrThrowRejectedCommand(explain, allocation, e);
    }
    final RoutingNodes routingNodes = allocation.routingNodes();
    RoutingNode routingNode = routingNodes.node(discoNode.getId());
    if (routingNode == null) {
        return explainOrThrowMissingRoutingNode(allocation, explain, discoNode);
    }
    try {
        allocation.routingTable().shardRoutingTable(index, shardId).primaryShard();
    } catch (IndexNotFoundException | ShardNotFoundException e) {
        return explainOrThrowRejectedCommand(explain, allocation, e);
    }
    ShardRouting shardRouting = null;
    for (ShardRouting shard : allocation.routingNodes().unassigned()) {
        if (shard.getIndexName().equals(index) && shard.getId() == shardId && shard.primary()) {
            shardRouting = shard;
            break;
        }
    }
    if (shardRouting == null) {
        return explainOrThrowRejectedCommand(explain, allocation, "primary [" + index + "][" + shardId + "] is already assigned");
    }
    if (shardRouting.recoverySource().getType() != RecoverySource.Type.EMPTY_STORE && acceptDataLoss == false) {
        String dataLossWarning = "allocating an empty primary for [" + index + "][" + shardId + "] can result in data loss. Please confirm by setting the accept_data_loss parameter to true";
        return explainOrThrowRejectedCommand(explain, allocation, dataLossWarning);
    }
    UnassignedInfo unassignedInfoToUpdate = null;
    if (shardRouting.unassignedInfo().getReason() != UnassignedInfo.Reason.FORCED_EMPTY_PRIMARY) {
        String unassignedInfoMessage = "force empty allocation from previous reason " + shardRouting.unassignedInfo().getReason() + ", " + shardRouting.unassignedInfo().getMessage();
        unassignedInfoToUpdate = new UnassignedInfo(UnassignedInfo.Reason.FORCED_EMPTY_PRIMARY, unassignedInfoMessage, shardRouting.unassignedInfo().getFailure(), 0, System.nanoTime(), System.currentTimeMillis(), false, shardRouting.unassignedInfo().getLastAllocationStatus(), Collections.emptySet());
    }
    initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting, unassignedInfoToUpdate, EmptyStoreRecoverySource.INSTANCE);
    return new RerouteExplanation(this, allocation.decision(Decision.YES, name() + " (allocation command)", "ignore deciders"));
}
Also used : DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) RoutingNode(org.elasticsearch.cluster.routing.RoutingNode) ShardNotFoundException(org.elasticsearch.index.shard.ShardNotFoundException) RoutingNodes(org.elasticsearch.cluster.routing.RoutingNodes) UnassignedInfo(org.elasticsearch.cluster.routing.UnassignedInfo) IndexNotFoundException(org.elasticsearch.index.IndexNotFoundException) RerouteExplanation(org.elasticsearch.cluster.routing.allocation.RerouteExplanation) ShardRouting(org.elasticsearch.cluster.routing.ShardRouting)

Example 85 with IndexNotFoundException

use of org.elasticsearch.index.IndexNotFoundException in project crate by crate.

the class PeerRecoveryTargetService method doRecovery.

private void doRecovery(final long recoveryId) {
    final StartRecoveryRequest request;
    final RecoveryState.Timer timer;
    CancellableThreads cancellableThreads;
    try (RecoveryRef recoveryRef = onGoingRecoveries.getRecovery(recoveryId)) {
        if (recoveryRef == null) {
            LOGGER.trace("not running recovery with id [{}] - can not find it (probably finished)", recoveryId);
            return;
        }
        final RecoveryTarget recoveryTarget = recoveryRef.target();
        timer = recoveryTarget.state().getTimer();
        cancellableThreads = recoveryTarget.cancellableThreads();
        try {
            assert recoveryTarget.sourceNode() != null : "can not do a recovery without a source node";
            LOGGER.trace("{} preparing shard for peer recovery", recoveryTarget.shardId());
            recoveryTarget.indexShard().prepareForIndexRecovery();
            final long startingSeqNo = recoveryTarget.indexShard().recoverLocallyUpToGlobalCheckpoint();
            assert startingSeqNo == UNASSIGNED_SEQ_NO || recoveryTarget.state().getStage() == RecoveryState.Stage.TRANSLOG : "unexpected recovery stage [" + recoveryTarget.state().getStage() + "] starting seqno [ " + startingSeqNo + "]";
            request = getStartRecoveryRequest(LOGGER, clusterService.localNode(), recoveryTarget, startingSeqNo);
        } catch (final Exception e) {
            // this will be logged as warning later on...
            LOGGER.trace("unexpected error while preparing shard for peer recovery, failing recovery", e);
            onGoingRecoveries.failRecovery(recoveryId, new RecoveryFailedException(recoveryTarget.state(), "failed to prepare shard for recovery", e), true);
            return;
        }
    }
    Consumer<Exception> handleException = e -> {
        if (LOGGER.isTraceEnabled()) {
            LOGGER.trace(() -> new ParameterizedMessage("[{}][{}] Got exception on recovery", request.shardId().getIndex().getName(), request.shardId().id()), e);
        }
        Throwable cause = SQLExceptions.unwrap(e);
        if (cause instanceof CancellableThreads.ExecutionCancelledException) {
            // this can also come from the source wrapped in a RemoteTransportException
            onGoingRecoveries.failRecovery(recoveryId, new RecoveryFailedException(request, "source has canceled the recovery", cause), false);
            return;
        }
        if (cause instanceof RecoveryEngineException) {
            // unwrap an exception that was thrown as part of the recovery
            cause = cause.getCause();
        }
        // do it twice, in case we have double transport exception
        cause = SQLExceptions.unwrap(cause);
        if (cause instanceof RecoveryEngineException) {
            // unwrap an exception that was thrown as part of the recovery
            cause = cause.getCause();
        }
        if (cause instanceof IllegalIndexShardStateException || cause instanceof IndexNotFoundException || cause instanceof ShardNotFoundException) {
            // if the target is not ready yet, retry
            retryRecovery(recoveryId, "remote shard not ready", recoverySettings.retryDelayStateSync(), recoverySettings.activityTimeout());
            return;
        }
        if (cause instanceof DelayRecoveryException) {
            retryRecovery(recoveryId, cause, recoverySettings.retryDelayStateSync(), recoverySettings.activityTimeout());
            return;
        }
        if (cause instanceof ConnectTransportException) {
            LOGGER.debug("delaying recovery of {} for [{}] due to networking error [{}]", request.shardId(), recoverySettings.retryDelayNetwork(), cause.getMessage());
            retryRecovery(recoveryId, cause.getMessage(), recoverySettings.retryDelayNetwork(), recoverySettings.activityTimeout());
            return;
        }
        if (cause instanceof AlreadyClosedException) {
            onGoingRecoveries.failRecovery(recoveryId, new RecoveryFailedException(request, "source shard is closed", cause), false);
            return;
        }
        onGoingRecoveries.failRecovery(recoveryId, new RecoveryFailedException(request, e), true);
    };
    try {
        LOGGER.trace("{} starting recovery from {}", request.shardId(), request.sourceNode());
        cancellableThreads.executeIO(() -> transportService.sendRequest(request.sourceNode(), PeerRecoverySourceService.Actions.START_RECOVERY, request, new TransportResponseHandler<RecoveryResponse>() {

            @Override
            public void handleResponse(RecoveryResponse recoveryResponse) {
                final TimeValue recoveryTime = new TimeValue(timer.time());
                // do this through ongoing recoveries to remove it from the collection
                onGoingRecoveries.markRecoveryAsDone(recoveryId);
                if (LOGGER.isTraceEnabled()) {
                    StringBuilder sb = new StringBuilder();
                    sb.append('[').append(request.shardId().getIndex().getName()).append(']').append('[').append(request.shardId().id()).append("] ");
                    sb.append("recovery completed from ").append(request.sourceNode()).append(", took[").append(recoveryTime).append("]\n");
                    sb.append("   phase1: recovered_files [").append(recoveryResponse.phase1FileNames.size()).append("]").append(" with total_size of [").append(new ByteSizeValue(recoveryResponse.phase1TotalSize)).append("]").append(", took [").append(timeValueMillis(recoveryResponse.phase1Time)).append("], throttling_wait [").append(timeValueMillis(recoveryResponse.phase1ThrottlingWaitTime)).append(']').append("\n");
                    sb.append("         : reusing_files   [").append(recoveryResponse.phase1ExistingFileNames.size()).append("] with total_size of [").append(new ByteSizeValue(recoveryResponse.phase1ExistingTotalSize)).append("]\n");
                    sb.append("   phase2: start took [").append(timeValueMillis(recoveryResponse.startTime)).append("]\n");
                    sb.append("         : recovered [").append(recoveryResponse.phase2Operations).append("]").append(" transaction log operations").append(", took [").append(timeValueMillis(recoveryResponse.phase2Time)).append("]").append("\n");
                    LOGGER.trace("{}", sb);
                } else {
                    LOGGER.debug("{} recovery done from [{}], took [{}]", request.shardId(), request.sourceNode(), recoveryTime);
                }
            }

            @Override
            public void handleException(TransportException e) {
                handleException.accept(e);
            }

            @Override
            public String executor() {
                // we do some heavy work like refreshes in the response so fork off to the generic threadpool
                return ThreadPool.Names.GENERIC;
            }

            @Override
            public RecoveryResponse read(StreamInput in) throws IOException {
                return new RecoveryResponse(in);
            }
        }));
    } catch (CancellableThreads.ExecutionCancelledException e) {
        LOGGER.trace("recovery cancelled", e);
    } catch (Exception e) {
        handleException.accept(e);
    }
}
Also used : ElasticsearchException(org.elasticsearch.ElasticsearchException) CancellableThreads(org.elasticsearch.common.util.CancellableThreads) ShardId(org.elasticsearch.index.shard.ShardId) TransportChannel(org.elasticsearch.transport.TransportChannel) IndexMetadata(org.elasticsearch.cluster.metadata.IndexMetadata) ClusterService(org.elasticsearch.cluster.service.ClusterService) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) RecoveryEngineException(org.elasticsearch.index.engine.RecoveryEngineException) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) ShardNotFoundException(org.elasticsearch.index.shard.ShardNotFoundException) TranslogCorruptedException(org.elasticsearch.index.translog.TranslogCorruptedException) ElasticsearchTimeoutException(org.elasticsearch.ElasticsearchTimeoutException) ClusterState(org.elasticsearch.cluster.ClusterState) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) ConnectTransportException(org.elasticsearch.transport.ConnectTransportException) Settings(org.elasticsearch.common.settings.Settings) IndexNotFoundException(org.elasticsearch.index.IndexNotFoundException) Store(org.elasticsearch.index.store.Store) ChannelActionListener(org.elasticsearch.action.support.ChannelActionListener) ThreadPool(org.elasticsearch.threadpool.ThreadPool) TransportResponse(org.elasticsearch.transport.TransportResponse) TransportService(org.elasticsearch.transport.TransportService) ClusterStateObserver(org.elasticsearch.cluster.ClusterStateObserver) Nullable(javax.annotation.Nullable) ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) RecoveryRef(org.elasticsearch.indices.recovery.RecoveriesCollection.RecoveryRef) IndexEventListener(org.elasticsearch.index.shard.IndexEventListener) IndexShard(org.elasticsearch.index.shard.IndexShard) UNASSIGNED_SEQ_NO(org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO) IOException(java.io.IOException) IllegalIndexShardStateException(org.elasticsearch.index.shard.IllegalIndexShardStateException) TransportRequestHandler(org.elasticsearch.transport.TransportRequestHandler) Consumer(java.util.function.Consumer) AtomicLong(java.util.concurrent.atomic.AtomicLong) AbstractRunnable(org.elasticsearch.common.util.concurrent.AbstractRunnable) Logger(org.apache.logging.log4j.Logger) TimeValue.timeValueMillis(io.crate.common.unit.TimeValue.timeValueMillis) StreamInput(org.elasticsearch.common.io.stream.StreamInput) TimeValue(io.crate.common.unit.TimeValue) Translog(org.elasticsearch.index.translog.Translog) TransportResponseHandler(org.elasticsearch.transport.TransportResponseHandler) SQLExceptions(io.crate.exceptions.SQLExceptions) LogManager(org.apache.logging.log4j.LogManager) TransportException(org.elasticsearch.transport.TransportException) RateLimiter(org.apache.lucene.store.RateLimiter) ActionListener(org.elasticsearch.action.ActionListener) MapperException(org.elasticsearch.index.mapper.MapperException) ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) RecoveryEngineException(org.elasticsearch.index.engine.RecoveryEngineException) RecoveryRef(org.elasticsearch.indices.recovery.RecoveriesCollection.RecoveryRef) TimeValue(io.crate.common.unit.TimeValue) CancellableThreads(org.elasticsearch.common.util.CancellableThreads) TransportResponseHandler(org.elasticsearch.transport.TransportResponseHandler) IllegalIndexShardStateException(org.elasticsearch.index.shard.IllegalIndexShardStateException) ConnectTransportException(org.elasticsearch.transport.ConnectTransportException) TransportException(org.elasticsearch.transport.TransportException) ElasticsearchException(org.elasticsearch.ElasticsearchException) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) RecoveryEngineException(org.elasticsearch.index.engine.RecoveryEngineException) ShardNotFoundException(org.elasticsearch.index.shard.ShardNotFoundException) TranslogCorruptedException(org.elasticsearch.index.translog.TranslogCorruptedException) ElasticsearchTimeoutException(org.elasticsearch.ElasticsearchTimeoutException) ConnectTransportException(org.elasticsearch.transport.ConnectTransportException) IndexNotFoundException(org.elasticsearch.index.IndexNotFoundException) IOException(java.io.IOException) IllegalIndexShardStateException(org.elasticsearch.index.shard.IllegalIndexShardStateException) TransportException(org.elasticsearch.transport.TransportException) MapperException(org.elasticsearch.index.mapper.MapperException) ShardNotFoundException(org.elasticsearch.index.shard.ShardNotFoundException) ConnectTransportException(org.elasticsearch.transport.ConnectTransportException) StreamInput(org.elasticsearch.common.io.stream.StreamInput) IndexNotFoundException(org.elasticsearch.index.IndexNotFoundException) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage)

Aggregations

IndexNotFoundException (org.elasticsearch.index.IndexNotFoundException)92 ClusterState (org.elasticsearch.cluster.ClusterState)22 ShardNotFoundException (org.elasticsearch.index.shard.ShardNotFoundException)21 ShardId (org.elasticsearch.index.shard.ShardId)19 ShardRouting (org.elasticsearch.cluster.routing.ShardRouting)16 Index (org.elasticsearch.index.Index)16 Map (java.util.Map)15 ArrayList (java.util.ArrayList)14 IOException (java.io.IOException)13 IndexMetadata (org.elasticsearch.cluster.metadata.IndexMetadata)12 List (java.util.List)11 IndicesOptions (org.elasticsearch.action.support.IndicesOptions)11 ClusterName (org.elasticsearch.cluster.ClusterName)9 DiscoveryNode (org.elasticsearch.cluster.node.DiscoveryNode)9 Settings (org.elasticsearch.common.settings.Settings)9 Matchers.containsString (org.hamcrest.Matchers.containsString)9 HashMap (java.util.HashMap)8 Nullable (javax.annotation.Nullable)8 RoutingNode (org.elasticsearch.cluster.routing.RoutingNode)8 RoutingNodes (org.elasticsearch.cluster.routing.RoutingNodes)8