Search in sources :

Example 16 with AbstractRunnable

use of org.elasticsearch.common.util.concurrent.AbstractRunnable in project elasticsearch by elastic.

the class AsyncBulkByScrollActionTests method testThreadPoolRejectionsAbortRequest.

/**
     * Mimicks a ThreadPool rejecting execution of the task.
     */
public void testThreadPoolRejectionsAbortRequest() throws Exception {
    testTask.rethrottle(1);
    setupClient(new TestThreadPool(getTestName()) {

        @Override
        public ScheduledFuture<?> schedule(TimeValue delay, String name, Runnable command) {
            // While we're here we can check that the sleep made it through
            assertThat(delay.nanos(), greaterThan(0L));
            assertThat(delay.seconds(), lessThanOrEqualTo(10L));
            ((AbstractRunnable) command).onRejection(new EsRejectedExecutionException("test"));
            return null;
        }
    });
    ScrollableHitSource.Response response = new ScrollableHitSource.Response(false, emptyList(), 0, emptyList(), null);
    simulateScrollResponse(new DummyAsyncBulkByScrollAction(), timeValueNanos(System.nanoTime()), 10, response);
    ExecutionException e = expectThrows(ExecutionException.class, () -> listener.get());
    assertThat(e.getMessage(), equalTo("EsRejectedExecutionException[test]"));
    assertThat(client.scrollsCleared, contains(scrollId));
    // When the task is rejected we don't increment the throttled timer
    assertEquals(timeValueMillis(0), testTask.getStatus().getThrottled());
}
Also used : UpdateResponse(org.elasticsearch.action.update.UpdateResponse) SearchResponse(org.elasticsearch.action.search.SearchResponse) DeleteResponse(org.elasticsearch.action.delete.DeleteResponse) BulkItemResponse(org.elasticsearch.action.bulk.BulkItemResponse) BulkResponse(org.elasticsearch.action.bulk.BulkResponse) ClearScrollResponse(org.elasticsearch.action.search.ClearScrollResponse) IndexResponse(org.elasticsearch.action.index.IndexResponse) InternalSearchResponse(org.elasticsearch.search.internal.InternalSearchResponse) ActionResponse(org.elasticsearch.action.ActionResponse) DocWriteResponse(org.elasticsearch.action.DocWriteResponse) AbstractRunnable(org.elasticsearch.common.util.concurrent.AbstractRunnable) Matchers.containsString(org.hamcrest.Matchers.containsString) TestUtil.randomSimpleString(org.apache.lucene.util.TestUtil.randomSimpleString) TestThreadPool(org.elasticsearch.threadpool.TestThreadPool) SearchPhaseExecutionException(org.elasticsearch.action.search.SearchPhaseExecutionException) ExecutionException(java.util.concurrent.ExecutionException) EsRejectedExecutionException(org.elasticsearch.common.util.concurrent.EsRejectedExecutionException) TimeValue(org.elasticsearch.common.unit.TimeValue) ScheduledFuture(java.util.concurrent.ScheduledFuture) EsRejectedExecutionException(org.elasticsearch.common.util.concurrent.EsRejectedExecutionException)

Example 17 with AbstractRunnable

use of org.elasticsearch.common.util.concurrent.AbstractRunnable in project elasticsearch by elastic.

the class WorkingBulkByScrollTaskTests method testDelayNeverNegative.

public void testDelayNeverNegative() throws IOException {
    // Thread pool that returns a ScheduledFuture that claims to have a negative delay
    ThreadPool threadPool = new TestThreadPool("test") {

        public ScheduledFuture<?> schedule(TimeValue delay, String name, Runnable command) {
            return new ScheduledFuture<Void>() {

                @Override
                public long getDelay(TimeUnit unit) {
                    return -1;
                }

                @Override
                public int compareTo(Delayed o) {
                    throw new UnsupportedOperationException();
                }

                @Override
                public boolean cancel(boolean mayInterruptIfRunning) {
                    throw new UnsupportedOperationException();
                }

                @Override
                public boolean isCancelled() {
                    throw new UnsupportedOperationException();
                }

                @Override
                public boolean isDone() {
                    throw new UnsupportedOperationException();
                }

                @Override
                public Void get() throws InterruptedException, ExecutionException {
                    throw new UnsupportedOperationException();
                }

                @Override
                public Void get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException {
                    throw new UnsupportedOperationException();
                }
            };
        }
    };
    try {
        // Have the task use the thread pool to delay a task that does nothing
        task.delayPrepareBulkRequest(threadPool, timeValueSeconds(0), 1, new AbstractRunnable() {

            @Override
            protected void doRun() throws Exception {
            }

            @Override
            public void onFailure(Exception e) {
                throw new UnsupportedOperationException();
            }
        });
        // Even though the future returns a negative delay we just return 0 because the time is up.
        assertEquals(timeValueSeconds(0), task.getStatus().getThrottledUntil());
    } finally {
        threadPool.shutdown();
    }
}
Also used : AbstractRunnable(org.elasticsearch.common.util.concurrent.AbstractRunnable) Delayed(java.util.concurrent.Delayed) AbstractRunnable(org.elasticsearch.common.util.concurrent.AbstractRunnable) ThreadPool(org.elasticsearch.threadpool.ThreadPool) TestThreadPool(org.elasticsearch.threadpool.TestThreadPool) TimeUnit(java.util.concurrent.TimeUnit) TestThreadPool(org.elasticsearch.threadpool.TestThreadPool) TimeValue(org.elasticsearch.common.unit.TimeValue) ScheduledFuture(java.util.concurrent.ScheduledFuture) TimeoutException(java.util.concurrent.TimeoutException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException)

Example 18 with AbstractRunnable

use of org.elasticsearch.common.util.concurrent.AbstractRunnable in project elasticsearch by elastic.

the class AbstractAsyncBulkByScrollAction method onScrollResponse.

/**
     * Process a scroll response.
     * @param lastBatchStartTime the time when the last batch started. Used to calculate the throttling delay.
     * @param lastBatchSize the size of the last batch. Used to calculate the throttling delay.
     * @param response the scroll response to process
     */
void onScrollResponse(TimeValue lastBatchStartTime, int lastBatchSize, ScrollableHitSource.Response response) {
    if (task.isCancelled()) {
        finishHim(null);
        return;
    }
    if (// If any of the shards failed that should abort the request.
    (response.getFailures().size() > 0) || // Timeouts aren't shard failures but we still need to pass them back to the user.
    response.isTimedOut()) {
        refreshAndFinish(emptyList(), response.getFailures(), response.isTimedOut());
        return;
    }
    long total = response.getTotalHits();
    if (mainRequest.getSize() > 0) {
        total = min(total, mainRequest.getSize());
    }
    task.setTotal(total);
    AbstractRunnable prepareBulkRequestRunnable = new AbstractRunnable() {

        @Override
        protected void doRun() throws Exception {
            /*
                 * It is important that the batch start time be calculated from here, scroll response to scroll response. That way the time
                 * waiting on the scroll doesn't count against this batch in the throttle.
                 */
            prepareBulkRequest(timeValueNanos(System.nanoTime()), response);
        }

        @Override
        public void onFailure(Exception e) {
            finishHim(e);
        }
    };
    prepareBulkRequestRunnable = (AbstractRunnable) threadPool.getThreadContext().preserveContext(prepareBulkRequestRunnable);
    task.delayPrepareBulkRequest(threadPool, lastBatchStartTime, lastBatchSize, prepareBulkRequestRunnable);
}
Also used : AbstractRunnable(org.elasticsearch.common.util.concurrent.AbstractRunnable) EsRejectedExecutionException(org.elasticsearch.common.util.concurrent.EsRejectedExecutionException)

Example 19 with AbstractRunnable

use of org.elasticsearch.common.util.concurrent.AbstractRunnable in project elasticsearch by elastic.

the class ClientScrollableHitSource method searchWithRetry.

/**
     * Run a search action and call onResponse when a the response comes in, retrying if the action fails with an exception caused by
     * rejected execution.
     *
     * @param action consumes a listener and starts the action. The listener it consumes is rigged to retry on failure.
     * @param onResponse consumes the response from the action
     */
private void searchWithRetry(Consumer<ActionListener<SearchResponse>> action, Consumer<SearchResponse> onResponse) {
    /*
         * RetryHelper is both an AbstractRunnable and an ActionListener<SearchResponse> - meaning that it both starts the search and
         * handles reacts to the results. The complexity is all in onFailure which either adapts the failure to the "fail" listener or
         * retries the search. Since both AbstractRunnable and ActionListener define the onFailure method it is called for either failure
         * to run the action (either while running or before starting) and for failure on the response from the action.
         */
    class RetryHelper extends AbstractRunnable implements ActionListener<SearchResponse> {

        private final Iterator<TimeValue> retries = backoffPolicy.iterator();

        /**
             * The runnable to run that retries in the same context as the original call.
             */
        private Runnable retryWithContext;

        private volatile int retryCount = 0;

        @Override
        protected void doRun() throws Exception {
            action.accept(this);
        }

        @Override
        public void onResponse(SearchResponse response) {
            onResponse.accept(response);
        }

        @Override
        public void onFailure(Exception e) {
            if (ExceptionsHelper.unwrap(e, EsRejectedExecutionException.class) != null) {
                if (retries.hasNext()) {
                    retryCount += 1;
                    TimeValue delay = retries.next();
                    logger.trace((Supplier<?>) () -> new ParameterizedMessage("retrying rejected search after [{}]", delay), e);
                    countSearchRetry.run();
                    threadPool.schedule(delay, ThreadPool.Names.SAME, retryWithContext);
                } else {
                    logger.warn((Supplier<?>) () -> new ParameterizedMessage("giving up on search because we retried [{}] times without success", retryCount), e);
                    fail.accept(e);
                }
            } else {
                logger.warn("giving up on search because it failed with a non-retryable exception", e);
                fail.accept(e);
            }
        }
    }
    RetryHelper helper = new RetryHelper();
    // Wrap the helper in a runnable that preserves the current context so we keep it on retry.
    helper.retryWithContext = threadPool.getThreadContext().preserveContext(helper);
    helper.run();
}
Also used : AbstractRunnable(org.elasticsearch.common.util.concurrent.AbstractRunnable) ActionListener(org.elasticsearch.action.ActionListener) AbstractRunnable(org.elasticsearch.common.util.concurrent.AbstractRunnable) Iterator(java.util.Iterator) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) EsRejectedExecutionException(org.elasticsearch.common.util.concurrent.EsRejectedExecutionException) TimeValue(org.elasticsearch.common.unit.TimeValue) SearchResponse(org.elasticsearch.action.search.SearchResponse) EsRejectedExecutionException(org.elasticsearch.common.util.concurrent.EsRejectedExecutionException)

Example 20 with AbstractRunnable

use of org.elasticsearch.common.util.concurrent.AbstractRunnable in project elasticsearch by elastic.

the class IndicesClusterStateService method deleteIndices.

/**
     * Deletes indices (with shard data).
     *
     * @param event cluster change event
     */
private void deleteIndices(final ClusterChangedEvent event) {
    final ClusterState previousState = event.previousState();
    final ClusterState state = event.state();
    final String localNodeId = state.nodes().getLocalNodeId();
    assert localNodeId != null;
    for (Index index : event.indicesDeleted()) {
        if (logger.isDebugEnabled()) {
            logger.debug("[{}] cleaning index, no longer part of the metadata", index);
        }
        AllocatedIndex<? extends Shard> indexService = indicesService.indexService(index);
        final IndexSettings indexSettings;
        if (indexService != null) {
            indexSettings = indexService.getIndexSettings();
            indicesService.removeIndex(index, DELETED, "index no longer part of the metadata");
        } else if (previousState.metaData().hasIndex(index.getName())) {
            // The deleted index was part of the previous cluster state, but not loaded on the local node
            final IndexMetaData metaData = previousState.metaData().index(index);
            indexSettings = new IndexSettings(metaData, settings);
            indicesService.deleteUnassignedIndex("deleted index was not assigned to local node", metaData, state);
        } else {
            // asserting that the previous cluster state is not initialized/recovered.
            assert previousState.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK);
            final IndexMetaData metaData = indicesService.verifyIndexIsDeleted(index, event.state());
            if (metaData != null) {
                indexSettings = new IndexSettings(metaData, settings);
            } else {
                indexSettings = null;
            }
        }
        if (indexSettings != null) {
            threadPool.generic().execute(new AbstractRunnable() {

                @Override
                public void onFailure(Exception e) {
                    logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to complete pending deletion for index", index), e);
                }

                @Override
                protected void doRun() throws Exception {
                    try {
                        // we are waiting until we can lock the index / all shards on the node and then we ack the delete of the store
                        // to the master. If we can't acquire the locks here immediately there might be a shard of this index still
                        // holding on to the lock due to a "currently canceled recovery" or so. The shard will delete itself BEFORE the
                        // lock is released so it's guaranteed to be deleted by the time we get the lock
                        indicesService.processPendingDeletes(index, indexSettings, new TimeValue(30, TimeUnit.MINUTES));
                    } catch (LockObtainFailedException exc) {
                        logger.warn("[{}] failed to lock all shards for index - timed out after 30 seconds", index);
                    } catch (InterruptedException e) {
                        logger.warn("[{}] failed to lock all shards for index - interrupted", index);
                    }
                }
            });
        }
    }
}
Also used : AbstractRunnable(org.elasticsearch.common.util.concurrent.AbstractRunnable) ClusterState(org.elasticsearch.cluster.ClusterState) IndexSettings(org.elasticsearch.index.IndexSettings) Index(org.elasticsearch.index.Index) ShardNotFoundException(org.elasticsearch.index.shard.ShardNotFoundException) ShardLockObtainFailedException(org.elasticsearch.env.ShardLockObtainFailedException) LockObtainFailedException(org.apache.lucene.store.LockObtainFailedException) IndexShardRelocatedException(org.elasticsearch.index.shard.IndexShardRelocatedException) RecoveryFailedException(org.elasticsearch.indices.recovery.RecoveryFailedException) ResourceAlreadyExistsException(org.elasticsearch.ResourceAlreadyExistsException) IOException(java.io.IOException) IndexMetaData(org.elasticsearch.cluster.metadata.IndexMetaData) ShardLockObtainFailedException(org.elasticsearch.env.ShardLockObtainFailedException) LockObtainFailedException(org.apache.lucene.store.LockObtainFailedException) Supplier(org.apache.logging.log4j.util.Supplier) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) TimeValue(org.elasticsearch.common.unit.TimeValue)

Aggregations

AbstractRunnable (org.elasticsearch.common.util.concurrent.AbstractRunnable)33 IOException (java.io.IOException)19 ExecutionException (java.util.concurrent.ExecutionException)11 ParameterizedMessage (org.apache.logging.log4j.message.ParameterizedMessage)10 BrokenBarrierException (java.util.concurrent.BrokenBarrierException)9 DiscoveryNode (org.elasticsearch.cluster.node.DiscoveryNode)9 CountDownLatch (java.util.concurrent.CountDownLatch)8 CyclicBarrier (java.util.concurrent.CyclicBarrier)8 AtomicReference (java.util.concurrent.atomic.AtomicReference)8 TimeValue (org.elasticsearch.common.unit.TimeValue)8 ElasticsearchException (org.elasticsearch.ElasticsearchException)7 CopyOnWriteArrayList (java.util.concurrent.CopyOnWriteArrayList)6 Supplier (org.apache.logging.log4j.util.Supplier)6 EsRejectedExecutionException (org.elasticsearch.common.util.concurrent.EsRejectedExecutionException)6 TestThreadPool (org.elasticsearch.threadpool.TestThreadPool)5 UnknownHostException (java.net.UnknownHostException)4 ArrayList (java.util.ArrayList)4 AlreadyClosedException (org.apache.lucene.store.AlreadyClosedException)4 ClusterState (org.elasticsearch.cluster.ClusterState)4 NotMasterException (org.elasticsearch.cluster.NotMasterException)4