Search in sources :

Example 1 with Priority

use of org.elasticsearch.common.Priority in project elasticsearch by elastic.

the class ClusterServiceTests method testPrioritizedTasks.

/**
     * Note, this test can only work as long as we have a single thread executor executing the state update tasks!
     */
public void testPrioritizedTasks() throws Exception {
    BlockingTask block = new BlockingTask(Priority.IMMEDIATE);
    clusterService.submitStateUpdateTask("test", block);
    int taskCount = randomIntBetween(5, 20);
    // will hold all the tasks in the order in which they were executed
    List<PrioritizedTask> tasks = new ArrayList<>(taskCount);
    CountDownLatch latch = new CountDownLatch(taskCount);
    for (int i = 0; i < taskCount; i++) {
        Priority priority = randomFrom(Priority.values());
        clusterService.submitStateUpdateTask("test", new PrioritizedTask(priority, latch, tasks));
    }
    block.close();
    latch.await();
    Priority prevPriority = null;
    for (PrioritizedTask task : tasks) {
        if (prevPriority == null) {
            prevPriority = task.priority();
        } else {
            assertThat(task.priority().sameOrAfter(prevPriority), is(true));
        }
    }
}
Also used : Priority(org.elasticsearch.common.Priority) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ArrayList(java.util.ArrayList) CountDownLatch(java.util.concurrent.CountDownLatch)

Example 2 with Priority

use of org.elasticsearch.common.Priority in project elasticsearch by elastic.

the class PrioritizedExecutorsTests method testPriorityQueue.

public void testPriorityQueue() throws Exception {
    PriorityBlockingQueue<Priority> queue = new PriorityBlockingQueue<>();
    List<Priority> priorities = Arrays.asList(Priority.values());
    Collections.shuffle(priorities, random());
    for (Priority priority : priorities) {
        queue.add(priority);
    }
    Priority prevPriority = null;
    while (!queue.isEmpty()) {
        if (prevPriority == null) {
            prevPriority = queue.poll();
        } else {
            assertThat(queue.poll().after(prevPriority), is(true));
        }
    }
}
Also used : Priority(org.elasticsearch.common.Priority) PriorityBlockingQueue(java.util.concurrent.PriorityBlockingQueue)

Example 3 with Priority

use of org.elasticsearch.common.Priority in project elasticsearch by elastic.

the class SnapshotsService method deleteSnapshot.

/**
     * Deletes snapshot from repository.
     * <p>
     * If the snapshot is still running cancels the snapshot first and then deletes it from the repository.
     *
     * @param snapshot snapshot
     * @param listener listener
     * @param repositoryStateId the unique id for the state of the repository
     */
private void deleteSnapshot(final Snapshot snapshot, final DeleteSnapshotListener listener, final long repositoryStateId, final boolean immediatePriority) {
    Priority priority = immediatePriority ? Priority.IMMEDIATE : Priority.NORMAL;
    clusterService.submitStateUpdateTask("delete snapshot", new ClusterStateUpdateTask(priority) {

        boolean waitForSnapshot = false;

        @Override
        public ClusterState execute(ClusterState currentState) throws Exception {
            SnapshotDeletionsInProgress deletionsInProgress = currentState.custom(SnapshotDeletionsInProgress.TYPE);
            if (deletionsInProgress != null && deletionsInProgress.hasDeletionsInProgress()) {
                throw new ConcurrentSnapshotExecutionException(snapshot, "cannot delete - another snapshot is currently being deleted");
            }
            RestoreInProgress restoreInProgress = currentState.custom(RestoreInProgress.TYPE);
            if (restoreInProgress != null) {
                // and the files the restore depends on would all be gone
                if (restoreInProgress.entries().isEmpty() == false) {
                    throw new ConcurrentSnapshotExecutionException(snapshot, "cannot delete snapshot during a restore");
                }
            }
            ClusterState.Builder clusterStateBuilder = ClusterState.builder(currentState);
            SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE);
            SnapshotsInProgress.Entry snapshotEntry = snapshots != null ? snapshots.snapshot(snapshot) : null;
            if (snapshotEntry == null) {
                // This snapshot is not running - delete
                if (snapshots != null && !snapshots.entries().isEmpty()) {
                    // However other snapshots are running - cannot continue
                    throw new ConcurrentSnapshotExecutionException(snapshot, "another snapshot is currently running cannot delete");
                }
                // add the snapshot deletion to the cluster state
                SnapshotDeletionsInProgress.Entry entry = new SnapshotDeletionsInProgress.Entry(snapshot, System.currentTimeMillis(), repositoryStateId);
                if (deletionsInProgress != null) {
                    deletionsInProgress = deletionsInProgress.withAddedEntry(entry);
                } else {
                    deletionsInProgress = SnapshotDeletionsInProgress.newInstance(entry);
                }
                clusterStateBuilder.putCustom(SnapshotDeletionsInProgress.TYPE, deletionsInProgress);
            } else {
                // This snapshot is currently running - stopping shards first
                waitForSnapshot = true;
                ImmutableOpenMap<ShardId, ShardSnapshotStatus> shards;
                if (snapshotEntry.state() == State.STARTED && snapshotEntry.shards() != null) {
                    // snapshot is currently running - stop started shards
                    ImmutableOpenMap.Builder<ShardId, ShardSnapshotStatus> shardsBuilder = ImmutableOpenMap.builder();
                    for (ObjectObjectCursor<ShardId, ShardSnapshotStatus> shardEntry : snapshotEntry.shards()) {
                        ShardSnapshotStatus status = shardEntry.value;
                        if (!status.state().completed()) {
                            shardsBuilder.put(shardEntry.key, new ShardSnapshotStatus(status.nodeId(), State.ABORTED));
                        } else {
                            shardsBuilder.put(shardEntry.key, status);
                        }
                    }
                    shards = shardsBuilder.build();
                } else if (snapshotEntry.state() == State.INIT) {
                    // snapshot hasn't started yet - end it
                    shards = snapshotEntry.shards();
                    endSnapshot(snapshotEntry);
                } else {
                    boolean hasUncompletedShards = false;
                    // Cleanup in case a node gone missing and snapshot wasn't updated for some reason
                    for (ObjectCursor<ShardSnapshotStatus> shardStatus : snapshotEntry.shards().values()) {
                        // Check if we still have shard running on existing nodes
                        if (shardStatus.value.state().completed() == false && shardStatus.value.nodeId() != null && currentState.nodes().get(shardStatus.value.nodeId()) != null) {
                            hasUncompletedShards = true;
                            break;
                        }
                    }
                    if (hasUncompletedShards) {
                        // snapshot is being finalized - wait for shards to complete finalization process
                        logger.debug("trying to delete completed snapshot - should wait for shards to finalize on all nodes");
                        return currentState;
                    } else {
                        // no shards to wait for - finish the snapshot
                        logger.debug("trying to delete completed snapshot with no finalizing shards - can delete immediately");
                        shards = snapshotEntry.shards();
                        endSnapshot(snapshotEntry);
                    }
                }
                SnapshotsInProgress.Entry newSnapshot = new SnapshotsInProgress.Entry(snapshotEntry, State.ABORTED, shards);
                snapshots = new SnapshotsInProgress(newSnapshot);
                clusterStateBuilder.putCustom(SnapshotsInProgress.TYPE, snapshots);
            }
            return clusterStateBuilder.build();
        }

        @Override
        public void onFailure(String source, Exception e) {
            listener.onFailure(e);
        }

        @Override
        public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
            if (waitForSnapshot) {
                logger.trace("adding snapshot completion listener to wait for deleted snapshot to finish");
                addListener(new SnapshotCompletionListener() {

                    @Override
                    public void onSnapshotCompletion(Snapshot completedSnapshot, SnapshotInfo snapshotInfo) {
                        if (completedSnapshot.equals(snapshot)) {
                            logger.trace("deleted snapshot completed - deleting files");
                            removeListener(this);
                            threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> deleteSnapshot(completedSnapshot.getRepository(), completedSnapshot.getSnapshotId().getName(), listener, true));
                        }
                    }

                    @Override
                    public void onSnapshotFailure(Snapshot failedSnapshot, Exception e) {
                        if (failedSnapshot.equals(snapshot)) {
                            logger.trace("deleted snapshot failed - deleting files", e);
                            removeListener(this);
                            threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> deleteSnapshot(failedSnapshot.getRepository(), failedSnapshot.getSnapshotId().getName(), listener, true));
                        }
                    }
                });
            } else {
                logger.trace("deleted snapshot is not running - deleting files");
                deleteSnapshotFromRepository(snapshot, listener, repositoryStateId);
            }
        }
    });
}
Also used : ClusterState(org.elasticsearch.cluster.ClusterState) Priority(org.elasticsearch.common.Priority) ClusterStateUpdateTask(org.elasticsearch.cluster.ClusterStateUpdateTask) ImmutableOpenMap(org.elasticsearch.common.collect.ImmutableOpenMap) RepositoryMissingException(org.elasticsearch.repositories.RepositoryMissingException) IOException(java.io.IOException) SnapshotDeletionsInProgress(org.elasticsearch.cluster.SnapshotDeletionsInProgress) RestoreInProgress(org.elasticsearch.cluster.RestoreInProgress) SnapshotsInProgress(org.elasticsearch.cluster.SnapshotsInProgress) ShardSnapshotStatus(org.elasticsearch.cluster.SnapshotsInProgress.ShardSnapshotStatus) IndexShardSnapshotStatus(org.elasticsearch.index.snapshots.IndexShardSnapshotStatus) ObjectObjectCursor(com.carrotsearch.hppc.cursors.ObjectObjectCursor)

Example 4 with Priority

use of org.elasticsearch.common.Priority in project elasticsearch by elastic.

the class ClusterServiceTests method testOneExecutorDontStarveAnother.

public void testOneExecutorDontStarveAnother() throws InterruptedException {
    final List<String> executionOrder = Collections.synchronizedList(new ArrayList<>());
    final Semaphore allowProcessing = new Semaphore(0);
    final Semaphore startedProcessing = new Semaphore(0);
    class TaskExecutor implements ClusterStateTaskExecutor<String> {

        @Override
        public ClusterTasksResult<String> execute(ClusterState currentState, List<String> tasks) throws Exception {
            // do this first, so startedProcessing can be used as a notification that this is done.
            executionOrder.addAll(tasks);
            startedProcessing.release(tasks.size());
            allowProcessing.acquire(tasks.size());
            return ClusterTasksResult.<String>builder().successes(tasks).build(ClusterState.builder(currentState).build());
        }
    }
    TaskExecutor executorA = new TaskExecutor();
    TaskExecutor executorB = new TaskExecutor();
    final ClusterStateTaskConfig config = ClusterStateTaskConfig.build(Priority.NORMAL);
    final ClusterStateTaskListener noopListener = (source, e) -> {
        throw new AssertionError(source, e);
    };
    // this blocks the cluster state queue, so we can set it up right
    clusterService.submitStateUpdateTask("0", "A0", config, executorA, noopListener);
    // wait to be processed
    startedProcessing.acquire(1);
    assertThat(executionOrder, equalTo(Arrays.asList("A0")));
    // these will be the first batch
    clusterService.submitStateUpdateTask("1", "A1", config, executorA, noopListener);
    clusterService.submitStateUpdateTask("2", "A2", config, executorA, noopListener);
    // release the first 0 task, but not the second
    allowProcessing.release(1);
    startedProcessing.acquire(2);
    assertThat(executionOrder, equalTo(Arrays.asList("A0", "A1", "A2")));
    // setup the queue with pending tasks for another executor same priority
    clusterService.submitStateUpdateTask("3", "B3", config, executorB, noopListener);
    clusterService.submitStateUpdateTask("4", "B4", config, executorB, noopListener);
    clusterService.submitStateUpdateTask("5", "A5", config, executorA, noopListener);
    clusterService.submitStateUpdateTask("6", "A6", config, executorA, noopListener);
    // now release the processing
    allowProcessing.release(6);
    // wait for last task to be processed
    startedProcessing.acquire(4);
    assertThat(executionOrder, equalTo(Arrays.asList("A0", "A1", "A2", "B3", "B4", "A5", "A6")));
}
Also used : Matchers.hasToString(org.hamcrest.Matchers.hasToString) Arrays(java.util.Arrays) Level(org.apache.logging.log4j.Level) ClusterBlocks(org.elasticsearch.cluster.block.ClusterBlocks) Matchers.hasKey(org.hamcrest.Matchers.hasKey) ClusterState(org.elasticsearch.cluster.ClusterState) ClusterStateUpdateTask(org.elasticsearch.cluster.ClusterStateUpdateTask) Settings(org.elasticsearch.common.settings.Settings) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) After(org.junit.After) Map(java.util.Map) ThreadPool(org.elasticsearch.threadpool.ThreadPool) MockLogAppender(org.elasticsearch.test.MockLogAppender) ClusterStateObserver(org.elasticsearch.cluster.ClusterStateObserver) Releasable(org.elasticsearch.common.lease.Releasable) AfterClass(org.junit.AfterClass) CyclicBarrier(java.util.concurrent.CyclicBarrier) Priority(org.elasticsearch.common.Priority) TestLogging(org.elasticsearch.test.junit.annotations.TestLogging) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Set(java.util.Set) ClusterChangedEvent(org.elasticsearch.cluster.ClusterChangedEvent) Collectors(java.util.stream.Collectors) Sets(org.elasticsearch.common.util.set.Sets) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) Logger(org.apache.logging.log4j.Logger) Version(org.elasticsearch.Version) Supplier(org.apache.logging.log4j.util.Supplier) Matchers.equalTo(org.hamcrest.Matchers.equalTo) Matchers.is(org.hamcrest.Matchers.is) Matchers.anyOf(org.hamcrest.Matchers.anyOf) Matchers.containsString(org.hamcrest.Matchers.containsString) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) BeforeClass(org.junit.BeforeClass) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) ConcurrentMap(java.util.concurrent.ConcurrentMap) BaseFuture(org.elasticsearch.common.util.concurrent.BaseFuture) HashSet(java.util.HashSet) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) ClusterStateTaskListener(org.elasticsearch.cluster.ClusterStateTaskListener) TimeValue(org.elasticsearch.common.unit.TimeValue) ESTestCase(org.elasticsearch.test.ESTestCase) Before(org.junit.Before) Loggers(org.elasticsearch.common.logging.Loggers) Collections.emptyMap(java.util.Collections.emptyMap) DiscoveryNodes(org.elasticsearch.cluster.node.DiscoveryNodes) TestThreadPool(org.elasticsearch.threadpool.TestThreadPool) Matchers.empty(org.hamcrest.Matchers.empty) Collections.emptySet(java.util.Collections.emptySet) Discovery(org.elasticsearch.discovery.Discovery) Semaphore(java.util.concurrent.Semaphore) DiscoverySettings(org.elasticsearch.discovery.DiscoverySettings) ClusterStateTaskConfig(org.elasticsearch.cluster.ClusterStateTaskConfig) BrokenBarrierException(java.util.concurrent.BrokenBarrierException) ClusterStateTaskExecutor(org.elasticsearch.cluster.ClusterStateTaskExecutor) TimeUnit(java.util.concurrent.TimeUnit) ExceptionsHelper(org.elasticsearch.ExceptionsHelper) LocalNodeMasterListener(org.elasticsearch.cluster.LocalNodeMasterListener) NodeConnectionsService(org.elasticsearch.cluster.NodeConnectionsService) ClusterSettings(org.elasticsearch.common.settings.ClusterSettings) LocalClusterUpdateTask(org.elasticsearch.cluster.LocalClusterUpdateTask) Tuple(org.elasticsearch.common.collect.Tuple) Collections(java.util.Collections) ClusterServiceUtils.setState(org.elasticsearch.test.ClusterServiceUtils.setState) ClusterState(org.elasticsearch.cluster.ClusterState) ClusterStateTaskExecutor(org.elasticsearch.cluster.ClusterStateTaskExecutor) ClusterStateTaskConfig(org.elasticsearch.cluster.ClusterStateTaskConfig) ClusterStateTaskExecutor(org.elasticsearch.cluster.ClusterStateTaskExecutor) List(java.util.List) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ArrayList(java.util.ArrayList) Matchers.hasToString(org.hamcrest.Matchers.hasToString) Matchers.containsString(org.hamcrest.Matchers.containsString) Semaphore(java.util.concurrent.Semaphore) ClusterStateTaskListener(org.elasticsearch.cluster.ClusterStateTaskListener)

Aggregations

Priority (org.elasticsearch.common.Priority)3 ArrayList (java.util.ArrayList)2 CopyOnWriteArrayList (java.util.concurrent.CopyOnWriteArrayList)2 CountDownLatch (java.util.concurrent.CountDownLatch)2 ClusterState (org.elasticsearch.cluster.ClusterState)2 ClusterStateUpdateTask (org.elasticsearch.cluster.ClusterStateUpdateTask)2 ObjectObjectCursor (com.carrotsearch.hppc.cursors.ObjectObjectCursor)1 IOException (java.io.IOException)1 Arrays (java.util.Arrays)1 Collections (java.util.Collections)1 Collections.emptyMap (java.util.Collections.emptyMap)1 Collections.emptySet (java.util.Collections.emptySet)1 HashMap (java.util.HashMap)1 HashSet (java.util.HashSet)1 List (java.util.List)1 Map (java.util.Map)1 Set (java.util.Set)1 BrokenBarrierException (java.util.concurrent.BrokenBarrierException)1 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)1 ConcurrentMap (java.util.concurrent.ConcurrentMap)1