Search in sources :

Example 21 with ClusterStateUpdateTask

use of org.elasticsearch.cluster.ClusterStateUpdateTask in project crate by crate.

the class SnapshotsService method deleteSnapshot.

/**
 * Deletes snapshot from repository.
 * <p>
 * If the snapshot is still running cancels the snapshot first and then deletes it from the repository.
 *
 * @param snapshot snapshot
 * @param listener listener
 * @param repositoryStateId the unique id for the state of the repository
 */
private void deleteSnapshot(final Snapshot snapshot, final ActionListener<Void> listener, final long repositoryStateId, final boolean immediatePriority) {
    LOGGER.info("deleting snapshot [{}]", snapshot);
    Priority priority = immediatePriority ? Priority.IMMEDIATE : Priority.NORMAL;
    clusterService.submitStateUpdateTask("delete snapshot", new ClusterStateUpdateTask(priority) {

        boolean waitForSnapshot = false;

        @Override
        public ClusterState execute(ClusterState currentState) {
            SnapshotDeletionsInProgress deletionsInProgress = currentState.custom(SnapshotDeletionsInProgress.TYPE);
            if (deletionsInProgress != null && deletionsInProgress.hasDeletionsInProgress()) {
                throw new ConcurrentSnapshotExecutionException(snapshot, "cannot delete - another snapshot is currently being deleted in [" + deletionsInProgress + "]");
            }
            RestoreInProgress restoreInProgress = currentState.custom(RestoreInProgress.TYPE);
            if (restoreInProgress != null) {
                for (RestoreInProgress.Entry entry : restoreInProgress) {
                    if (entry.snapshot().equals(snapshot)) {
                        throw new ConcurrentSnapshotExecutionException(snapshot, "cannot delete snapshot during a restore in progress in [" + restoreInProgress + "]");
                    }
                }
            }
            ClusterState.Builder clusterStateBuilder = ClusterState.builder(currentState);
            SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE);
            SnapshotsInProgress.Entry snapshotEntry = snapshots != null ? snapshots.snapshot(snapshot) : null;
            if (snapshotEntry == null) {
                // This snapshot is not running - delete
                if (snapshots != null && !snapshots.entries().isEmpty()) {
                    // However other snapshots are running - cannot continue
                    throw new ConcurrentSnapshotExecutionException(snapshot, "another snapshot is currently running cannot delete");
                }
                // add the snapshot deletion to the cluster state
                SnapshotDeletionsInProgress.Entry entry = new SnapshotDeletionsInProgress.Entry(snapshot, threadPool.absoluteTimeInMillis(), repositoryStateId);
                if (deletionsInProgress != null) {
                    deletionsInProgress = deletionsInProgress.withAddedEntry(entry);
                } else {
                    deletionsInProgress = SnapshotDeletionsInProgress.newInstance(entry);
                }
                clusterStateBuilder.putCustom(SnapshotDeletionsInProgress.TYPE, deletionsInProgress);
            } else {
                // This snapshot is currently running - stopping shards first
                waitForSnapshot = true;
                final ImmutableOpenMap<ShardId, ShardSnapshotStatus> shards;
                final State state = snapshotEntry.state();
                final String failure;
                if (state == State.INIT) {
                    // snapshot is still initializing, mark it as aborted
                    shards = snapshotEntry.shards();
                    assert shards.isEmpty();
                    failure = "Snapshot was aborted during initialization";
                } else if (state == State.STARTED) {
                    // snapshot is started - mark every non completed shard as aborted
                    final ImmutableOpenMap.Builder<ShardId, ShardSnapshotStatus> shardsBuilder = ImmutableOpenMap.builder();
                    for (ObjectObjectCursor<ShardId, ShardSnapshotStatus> shardEntry : snapshotEntry.shards()) {
                        ShardSnapshotStatus status = shardEntry.value;
                        if (status.state().completed() == false) {
                            status = new ShardSnapshotStatus(status.nodeId(), ShardState.ABORTED, "aborted by snapshot deletion", status.generation());
                        }
                        shardsBuilder.put(shardEntry.key, status);
                    }
                    shards = shardsBuilder.build();
                    failure = "Snapshot was aborted by deletion";
                } else {
                    boolean hasUncompletedShards = false;
                    // Cleanup in case a node gone missing and snapshot wasn't updated for some reason
                    for (ObjectCursor<ShardSnapshotStatus> shardStatus : snapshotEntry.shards().values()) {
                        // Check if we still have shard running on existing nodes
                        if (shardStatus.value.state().completed() == false && shardStatus.value.nodeId() != null && currentState.nodes().get(shardStatus.value.nodeId()) != null) {
                            hasUncompletedShards = true;
                            break;
                        }
                    }
                    if (hasUncompletedShards) {
                        // snapshot is being finalized - wait for shards to complete finalization process
                        LOGGER.debug("trying to delete completed snapshot - should wait for shards to finalize on all nodes");
                        return currentState;
                    } else {
                        // no shards to wait for but a node is gone - this is the only case
                        // where we force to finish the snapshot
                        LOGGER.debug("trying to delete completed snapshot with no finalizing shards - can delete immediately");
                        shards = snapshotEntry.shards();
                    }
                    failure = snapshotEntry.failure();
                }
                SnapshotsInProgress.Entry newSnapshot = new SnapshotsInProgress.Entry(snapshotEntry, State.ABORTED, shards, failure);
                clusterStateBuilder.putCustom(SnapshotsInProgress.TYPE, new SnapshotsInProgress(newSnapshot));
            }
            return clusterStateBuilder.build();
        }

        @Override
        public void onFailure(String source, Exception e) {
            listener.onFailure(e);
        }

        @Override
        public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
            if (waitForSnapshot) {
                LOGGER.trace("adding snapshot completion listener to wait for deleted snapshot to finish");
                addListener(snapshot, ActionListener.wrap(snapshotInfo -> {
                    LOGGER.debug("deleted snapshot completed - deleting files");
                    threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> {
                        try {
                            deleteSnapshot(snapshot.getRepository(), snapshot.getSnapshotId().getName(), listener, true);
                        } catch (Exception ex) {
                            LOGGER.warn(() -> new ParameterizedMessage("[{}] failed to delete snapshot", snapshot), ex);
                        }
                    });
                }, e -> {
                    LOGGER.warn("deleted snapshot failed - deleting files", e);
                    threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> {
                        try {
                            deleteSnapshot(snapshot.getRepository(), snapshot.getSnapshotId().getName(), listener, true);
                        } catch (SnapshotMissingException smex) {
                            LOGGER.info(() -> new ParameterizedMessage("Tried deleting in-progress snapshot [{}], but it could not be found after failing to abort.", smex.getSnapshotName()), e);
                            listener.onFailure(new SnapshotException(snapshot, "Tried deleting in-progress snapshot [" + smex.getSnapshotName() + "], but it " + "could not be found after failing to abort.", smex));
                        }
                    });
                }));
            } else {
                LOGGER.debug("deleted snapshot is not running - deleting files");
                deleteSnapshotFromRepository(snapshot, listener, repositoryStateId, newState.nodes().getMinNodeVersion());
            }
        }
    });
}
Also used : ImmutableOpenMap(org.elasticsearch.common.collect.ImmutableOpenMap) SnapshotDeletionsInProgress(org.elasticsearch.cluster.SnapshotDeletionsInProgress) ShardId(org.elasticsearch.index.shard.ShardId) ClusterState(org.elasticsearch.cluster.ClusterState) Priority(org.elasticsearch.common.Priority) ClusterStateUpdateTask(org.elasticsearch.cluster.ClusterStateUpdateTask) FailedToCommitClusterStateException(org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException) RepositoryException(org.elasticsearch.repositories.RepositoryException) RepositoryMissingException(org.elasticsearch.repositories.RepositoryMissingException) NotMasterException(org.elasticsearch.cluster.NotMasterException) RestoreInProgress(org.elasticsearch.cluster.RestoreInProgress) ClusterState(org.elasticsearch.cluster.ClusterState) State(org.elasticsearch.cluster.SnapshotsInProgress.State) ShardState(org.elasticsearch.cluster.SnapshotsInProgress.ShardState) SnapshotsInProgress(org.elasticsearch.cluster.SnapshotsInProgress) ShardSnapshotStatus(org.elasticsearch.cluster.SnapshotsInProgress.ShardSnapshotStatus) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage)

Example 22 with ClusterStateUpdateTask

use of org.elasticsearch.cluster.ClusterStateUpdateTask in project crate by crate.

the class CorruptedBlobStoreRepositoryIT method testFindDanglingLatestGeneration.

public void testFindDanglingLatestGeneration() throws Exception {
    Path repo = randomRepoPath();
    final String repoName = "test";
    logger.info("-->  creating repository at {}", repo.toAbsolutePath());
    execute("CREATE REPOSITORY test TYPE fs with (location=?, compress=false, chunk_size=?)", new Object[] { repo.toAbsolutePath().toString(), randomIntBetween(100, 1000) + ByteSizeUnit.BYTES.getSuffix() });
    execute("create table doc.test1(x integer)");
    execute("create table doc.test2(x integer)");
    logger.info("--> indexing some data");
    execute("insert into doc.test1 values(1),(2)");
    execute("insert into doc.test2 values(1),(2)");
    final String snapshot = "snapshot1";
    logger.info("--> creating snapshot");
    CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot(repoName, snapshot).setWaitForCompletion(true).setIndices("test*").get();
    assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
    assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
    final Repository repository = internalCluster().getCurrentMasterNodeInstance(RepositoriesService.class).repository(repoName);
    logger.info("--> move index-N blob to next generation");
    final RepositoryData repositoryData = getRepositoryData(repository);
    final long beforeMoveGen = repositoryData.getGenId();
    Files.move(repo.resolve("index-" + beforeMoveGen), repo.resolve("index-" + (beforeMoveGen + 1)));
    logger.info("--> set next generation as pending in the cluster state");
    final PlainActionFuture<Void> csUpdateFuture = PlainActionFuture.newFuture();
    internalCluster().getCurrentMasterNodeInstance(ClusterService.class).submitStateUpdateTask("set pending generation", new ClusterStateUpdateTask() {

        @Override
        public ClusterState execute(ClusterState currentState) {
            return ClusterState.builder(currentState).metadata(Metadata.builder(currentState.getMetadata()).putCustom(RepositoriesMetadata.TYPE, currentState.metadata().<RepositoriesMetadata>custom(RepositoriesMetadata.TYPE).withUpdatedGeneration(repository.getMetadata().name(), beforeMoveGen, beforeMoveGen + 1)).build()).build();
        }

        @Override
        public void onFailure(String source, Exception e) {
            csUpdateFuture.onFailure(e);
        }

        @Override
        public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
            csUpdateFuture.onResponse(null);
        }
    });
    csUpdateFuture.get();
    logger.info("--> full cluster restart");
    internalCluster().fullRestart();
    ensureGreen();
    Repository repositoryAfterRestart = internalCluster().getCurrentMasterNodeInstance(RepositoriesService.class).repository(repoName);
    logger.info("--> verify index-N blob is found at the new location");
    assertThat(getRepositoryData(repositoryAfterRestart).getGenId(), is(beforeMoveGen + 1));
    logger.info("--> delete snapshot");
    execute("drop snapshot test.snapshot1");
    logger.info("--> verify index-N blob is found at the expected location");
    assertThat(getRepositoryData(repositoryAfterRestart).getGenId(), is(beforeMoveGen + 2));
    logger.info("--> make sure snapshot doesn't exist");
    expectThrows(SnapshotMissingException.class, () -> client().admin().cluster().prepareGetSnapshots(repoName).addSnapshots(snapshot).get().getSnapshots());
}
Also used : Path(java.nio.file.Path) ClusterState(org.elasticsearch.cluster.ClusterState) ClusterStateUpdateTask(org.elasticsearch.cluster.ClusterStateUpdateTask) Matchers.containsString(org.hamcrest.Matchers.containsString) RepositoryException(org.elasticsearch.repositories.RepositoryException) RepositoryData(org.elasticsearch.repositories.RepositoryData) RepositoriesMetadata(org.elasticsearch.cluster.metadata.RepositoriesMetadata) Repository(org.elasticsearch.repositories.Repository) BlobStoreRepository(org.elasticsearch.repositories.blobstore.BlobStoreRepository) ClusterService(org.elasticsearch.cluster.service.ClusterService) CreateSnapshotResponse(org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse) RepositoriesService(org.elasticsearch.repositories.RepositoriesService)

Example 23 with ClusterStateUpdateTask

use of org.elasticsearch.cluster.ClusterStateUpdateTask in project elasticsearch by elastic.

the class RoutingService method performReroute.

// visible for testing
protected void performReroute(String reason) {
    try {
        if (lifecycle.stopped()) {
            return;
        }
        if (rerouting.compareAndSet(false, true) == false) {
            logger.trace("already has pending reroute, ignoring {}", reason);
            return;
        }
        logger.trace("rerouting {}", reason);
        clusterService.submitStateUpdateTask(CLUSTER_UPDATE_TASK_SOURCE + "(" + reason + ")", new ClusterStateUpdateTask(Priority.HIGH) {

            @Override
            public ClusterState execute(ClusterState currentState) {
                rerouting.set(false);
                return allocationService.reroute(currentState, reason);
            }

            @Override
            public void onNoLongerMaster(String source) {
                rerouting.set(false);
            // no biggie
            }

            @Override
            public void onFailure(String source, Exception e) {
                rerouting.set(false);
                ClusterState state = clusterService.state();
                if (logger.isTraceEnabled()) {
                    logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}], current state:\n{}", source, state), e);
                } else {
                    logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}], current state version [{}]", source, state.version()), e);
                }
            }
        });
    } catch (Exception e) {
        rerouting.set(false);
        ClusterState state = clusterService.state();
        logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to reroute routing table, current state:\n{}", state), e);
    }
}
Also used : ClusterState(org.elasticsearch.cluster.ClusterState) ClusterStateUpdateTask(org.elasticsearch.cluster.ClusterStateUpdateTask) Supplier(org.apache.logging.log4j.util.Supplier) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage)

Example 24 with ClusterStateUpdateTask

use of org.elasticsearch.cluster.ClusterStateUpdateTask in project elasticsearch by elastic.

the class TribeIT method updateMetaData.

private static void updateMetaData(InternalTestCluster cluster, UnaryOperator<MetaData.Builder> addCustoms) {
    ClusterService clusterService = cluster.getInstance(ClusterService.class, cluster.getMasterName());
    final CountDownLatch latch = new CountDownLatch(1);
    clusterService.submitStateUpdateTask("update customMetaData", new ClusterStateUpdateTask(Priority.IMMEDIATE) {

        @Override
        public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
            latch.countDown();
        }

        @Override
        public ClusterState execute(ClusterState currentState) throws Exception {
            MetaData.Builder builder = MetaData.builder(currentState.metaData());
            builder = addCustoms.apply(builder);
            return ClusterState.builder(currentState).metaData(builder).build();
        }

        @Override
        public void onFailure(String source, Exception e) {
            fail("failed to apply cluster state from [" + source + "] with " + e.getMessage());
        }
    });
    try {
        latch.await(1, TimeUnit.MINUTES);
    } catch (InterruptedException e) {
        fail("latch waiting on publishing custom md interrupted [" + e.getMessage() + "]");
    }
    assertThat("timed out trying to add custom metadata to " + cluster.getClusterName(), latch.getCount(), equalTo(0L));
}
Also used : ClusterState(org.elasticsearch.cluster.ClusterState) ClusterService(org.elasticsearch.cluster.service.ClusterService) ClusterStateUpdateTask(org.elasticsearch.cluster.ClusterStateUpdateTask) Matchers.containsString(org.hamcrest.Matchers.containsString) CountDownLatch(java.util.concurrent.CountDownLatch) MasterNotDiscoveredException(org.elasticsearch.discovery.MasterNotDiscoveredException) ClusterBlockException(org.elasticsearch.cluster.block.ClusterBlockException) IOException(java.io.IOException)

Example 25 with ClusterStateUpdateTask

use of org.elasticsearch.cluster.ClusterStateUpdateTask in project elasticsearch by elastic.

the class DiscoveryWithServiceDisruptionsIT method testStaleMasterNotHijackingMajority.

/**
     * Tests that emulates a frozen elected master node that unfreezes and pushes his cluster state to other nodes
     * that already are following another elected master node. These nodes should reject this cluster state and prevent
     * them from following the stale master.
     */
@TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE,org.elasticsearch.test.disruption:TRACE")
public void testStaleMasterNotHijackingMajority() throws Exception {
    // 3 node cluster with unicast discovery and minimum_master_nodes set to 2:
    final List<String> nodes = startCluster(3, 2);
    // Save the current master node as old master node, because that node will get frozen
    final String oldMasterNode = internalCluster().getMasterName();
    for (String node : nodes) {
        ensureStableCluster(3, node);
    }
    assertMaster(oldMasterNode, nodes);
    // Simulating a painful gc by suspending all threads for a long time on the current elected master node.
    SingleNodeDisruption masterNodeDisruption = new LongGCDisruption(random(), oldMasterNode);
    // Save the majority side
    final List<String> majoritySide = new ArrayList<>(nodes);
    majoritySide.remove(oldMasterNode);
    // Keeps track of the previous and current master when a master node transition took place on each node on the majority side:
    final Map<String, List<Tuple<String, String>>> masters = Collections.synchronizedMap(new HashMap<String, List<Tuple<String, String>>>());
    for (final String node : majoritySide) {
        masters.put(node, new ArrayList<Tuple<String, String>>());
        internalCluster().getInstance(ClusterService.class, node).addListener(event -> {
            DiscoveryNode previousMaster = event.previousState().nodes().getMasterNode();
            DiscoveryNode currentMaster = event.state().nodes().getMasterNode();
            if (!Objects.equals(previousMaster, currentMaster)) {
                logger.info("node {} received new cluster state: {} \n and had previous cluster state: {}", node, event.state(), event.previousState());
                String previousMasterNodeName = previousMaster != null ? previousMaster.getName() : null;
                String currentMasterNodeName = currentMaster != null ? currentMaster.getName() : null;
                masters.get(node).add(new Tuple<>(previousMasterNodeName, currentMasterNodeName));
            }
        });
    }
    final CountDownLatch oldMasterNodeSteppedDown = new CountDownLatch(1);
    internalCluster().getInstance(ClusterService.class, oldMasterNode).addListener(event -> {
        if (event.state().nodes().getMasterNodeId() == null) {
            oldMasterNodeSteppedDown.countDown();
        }
    });
    internalCluster().setDisruptionScheme(masterNodeDisruption);
    logger.info("freezing node [{}]", oldMasterNode);
    masterNodeDisruption.startDisrupting();
    // Wait for the majority side to get stable
    assertDifferentMaster(majoritySide.get(0), oldMasterNode);
    assertDifferentMaster(majoritySide.get(1), oldMasterNode);
    // the test is periodically tripping on the following assertion. To find out which threads are blocking the nodes from making
    // progress we print a stack dump
    boolean failed = true;
    try {
        assertDiscoveryCompleted(majoritySide);
        failed = false;
    } finally {
        if (failed) {
            logger.error("discovery failed to complete, probably caused by a blocked thread: {}", new HotThreads().busiestThreads(Integer.MAX_VALUE).ignoreIdleThreads(false).detect());
        }
    }
    // The old master node is frozen, but here we submit a cluster state update task that doesn't get executed,
    // but will be queued and once the old master node un-freezes it gets executed.
    // The old master node will send this update + the cluster state where he is flagged as master to the other
    // nodes that follow the new master. These nodes should ignore this update.
    internalCluster().getInstance(ClusterService.class, oldMasterNode).submitStateUpdateTask("sneaky-update", new ClusterStateUpdateTask(Priority.IMMEDIATE) {

        @Override
        public ClusterState execute(ClusterState currentState) throws Exception {
            return ClusterState.builder(currentState).build();
        }

        @Override
        public void onFailure(String source, Exception e) {
            logger.warn((Supplier<?>) () -> new ParameterizedMessage("failure [{}]", source), e);
        }
    });
    // Save the new elected master node
    final String newMasterNode = internalCluster().getMasterName(majoritySide.get(0));
    logger.info("new detected master node [{}]", newMasterNode);
    // Stop disruption
    logger.info("Unfreeze node [{}]", oldMasterNode);
    masterNodeDisruption.stopDisrupting();
    oldMasterNodeSteppedDown.await(30, TimeUnit.SECONDS);
    // Make sure that the end state is consistent on all nodes:
    assertDiscoveryCompleted(nodes);
    assertMaster(newMasterNode, nodes);
    assertThat(masters.size(), equalTo(2));
    for (Map.Entry<String, List<Tuple<String, String>>> entry : masters.entrySet()) {
        String nodeName = entry.getKey();
        List<Tuple<String, String>> recordedMasterTransition = entry.getValue();
        assertThat("[" + nodeName + "] Each node should only record two master node transitions", recordedMasterTransition.size(), equalTo(2));
        assertThat("[" + nodeName + "] First transition's previous master should be [null]", recordedMasterTransition.get(0).v1(), equalTo(oldMasterNode));
        assertThat("[" + nodeName + "] First transition's current master should be [" + newMasterNode + "]", recordedMasterTransition.get(0).v2(), nullValue());
        assertThat("[" + nodeName + "] Second transition's previous master should be [null]", recordedMasterTransition.get(1).v1(), nullValue());
        assertThat("[" + nodeName + "] Second transition's current master should be [" + newMasterNode + "]", recordedMasterTransition.get(1).v2(), equalTo(newMasterNode));
    }
}
Also used : DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) ArrayList(java.util.ArrayList) ArrayList(java.util.ArrayList) List(java.util.List) Supplier(org.apache.logging.log4j.util.Supplier) ClusterState(org.elasticsearch.cluster.ClusterState) HotThreads(org.elasticsearch.monitor.jvm.HotThreads) ClusterStateUpdateTask(org.elasticsearch.cluster.ClusterStateUpdateTask) CountDownLatch(java.util.concurrent.CountDownLatch) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) ElasticsearchException(org.elasticsearch.ElasticsearchException) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) NoShardAvailableActionException(org.elasticsearch.action.NoShardAvailableActionException) ClusterService(org.elasticsearch.cluster.service.ClusterService) IntermittentLongGCDisruption(org.elasticsearch.test.disruption.IntermittentLongGCDisruption) LongGCDisruption(org.elasticsearch.test.disruption.LongGCDisruption) SingleNodeDisruption(org.elasticsearch.test.disruption.SingleNodeDisruption) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) Tuple(org.elasticsearch.common.collect.Tuple) TestLogging(org.elasticsearch.test.junit.annotations.TestLogging)

Aggregations

ClusterState (org.elasticsearch.cluster.ClusterState)45 ClusterStateUpdateTask (org.elasticsearch.cluster.ClusterStateUpdateTask)45 CountDownLatch (java.util.concurrent.CountDownLatch)21 Matchers.containsString (org.hamcrest.Matchers.containsString)15 ArrayList (java.util.ArrayList)13 ClusterService (org.elasticsearch.cluster.service.ClusterService)13 IOException (java.io.IOException)12 BrokenBarrierException (java.util.concurrent.BrokenBarrierException)12 ParameterizedMessage (org.apache.logging.log4j.message.ParameterizedMessage)12 List (java.util.List)11 Logger (org.apache.logging.log4j.Logger)9 CopyOnWriteArrayList (java.util.concurrent.CopyOnWriteArrayList)8 SnapshotDeletionsInProgress (org.elasticsearch.cluster.SnapshotDeletionsInProgress)8 SnapshotsInProgress (org.elasticsearch.cluster.SnapshotsInProgress)8 FailedToCommitClusterStateException (org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException)8 Priority (org.elasticsearch.common.Priority)8 Settings (org.elasticsearch.common.settings.Settings)8 Set (java.util.Set)7 ImmutableOpenMap (org.elasticsearch.common.collect.ImmutableOpenMap)7 RepositoryMissingException (org.elasticsearch.repositories.RepositoryMissingException)7