use of org.elasticsearch.common.util.concurrent.AbstractRunnable in project elasticsearch by elastic.
the class AsyncBulkByScrollActionTests method testThreadPoolRejectionsAbortRequest.
/**
* Mimicks a ThreadPool rejecting execution of the task.
*/
public void testThreadPoolRejectionsAbortRequest() throws Exception {
testTask.rethrottle(1);
setupClient(new TestThreadPool(getTestName()) {
@Override
public ScheduledFuture<?> schedule(TimeValue delay, String name, Runnable command) {
// While we're here we can check that the sleep made it through
assertThat(delay.nanos(), greaterThan(0L));
assertThat(delay.seconds(), lessThanOrEqualTo(10L));
((AbstractRunnable) command).onRejection(new EsRejectedExecutionException("test"));
return null;
}
});
ScrollableHitSource.Response response = new ScrollableHitSource.Response(false, emptyList(), 0, emptyList(), null);
simulateScrollResponse(new DummyAsyncBulkByScrollAction(), timeValueNanos(System.nanoTime()), 10, response);
ExecutionException e = expectThrows(ExecutionException.class, () -> listener.get());
assertThat(e.getMessage(), equalTo("EsRejectedExecutionException[test]"));
assertThat(client.scrollsCleared, contains(scrollId));
// When the task is rejected we don't increment the throttled timer
assertEquals(timeValueMillis(0), testTask.getStatus().getThrottled());
}
use of org.elasticsearch.common.util.concurrent.AbstractRunnable in project elasticsearch by elastic.
the class WorkingBulkByScrollTaskTests method testDelayNeverNegative.
public void testDelayNeverNegative() throws IOException {
// Thread pool that returns a ScheduledFuture that claims to have a negative delay
ThreadPool threadPool = new TestThreadPool("test") {
public ScheduledFuture<?> schedule(TimeValue delay, String name, Runnable command) {
return new ScheduledFuture<Void>() {
@Override
public long getDelay(TimeUnit unit) {
return -1;
}
@Override
public int compareTo(Delayed o) {
throw new UnsupportedOperationException();
}
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
throw new UnsupportedOperationException();
}
@Override
public boolean isCancelled() {
throw new UnsupportedOperationException();
}
@Override
public boolean isDone() {
throw new UnsupportedOperationException();
}
@Override
public Void get() throws InterruptedException, ExecutionException {
throw new UnsupportedOperationException();
}
@Override
public Void get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException {
throw new UnsupportedOperationException();
}
};
}
};
try {
// Have the task use the thread pool to delay a task that does nothing
task.delayPrepareBulkRequest(threadPool, timeValueSeconds(0), 1, new AbstractRunnable() {
@Override
protected void doRun() throws Exception {
}
@Override
public void onFailure(Exception e) {
throw new UnsupportedOperationException();
}
});
// Even though the future returns a negative delay we just return 0 because the time is up.
assertEquals(timeValueSeconds(0), task.getStatus().getThrottledUntil());
} finally {
threadPool.shutdown();
}
}
use of org.elasticsearch.common.util.concurrent.AbstractRunnable in project elasticsearch by elastic.
the class AbstractAsyncBulkByScrollAction method onScrollResponse.
/**
* Process a scroll response.
* @param lastBatchStartTime the time when the last batch started. Used to calculate the throttling delay.
* @param lastBatchSize the size of the last batch. Used to calculate the throttling delay.
* @param response the scroll response to process
*/
void onScrollResponse(TimeValue lastBatchStartTime, int lastBatchSize, ScrollableHitSource.Response response) {
if (task.isCancelled()) {
finishHim(null);
return;
}
if (// If any of the shards failed that should abort the request.
(response.getFailures().size() > 0) || // Timeouts aren't shard failures but we still need to pass them back to the user.
response.isTimedOut()) {
refreshAndFinish(emptyList(), response.getFailures(), response.isTimedOut());
return;
}
long total = response.getTotalHits();
if (mainRequest.getSize() > 0) {
total = min(total, mainRequest.getSize());
}
task.setTotal(total);
AbstractRunnable prepareBulkRequestRunnable = new AbstractRunnable() {
@Override
protected void doRun() throws Exception {
/*
* It is important that the batch start time be calculated from here, scroll response to scroll response. That way the time
* waiting on the scroll doesn't count against this batch in the throttle.
*/
prepareBulkRequest(timeValueNanos(System.nanoTime()), response);
}
@Override
public void onFailure(Exception e) {
finishHim(e);
}
};
prepareBulkRequestRunnable = (AbstractRunnable) threadPool.getThreadContext().preserveContext(prepareBulkRequestRunnable);
task.delayPrepareBulkRequest(threadPool, lastBatchStartTime, lastBatchSize, prepareBulkRequestRunnable);
}
use of org.elasticsearch.common.util.concurrent.AbstractRunnable in project elasticsearch by elastic.
the class ClientScrollableHitSource method searchWithRetry.
/**
* Run a search action and call onResponse when a the response comes in, retrying if the action fails with an exception caused by
* rejected execution.
*
* @param action consumes a listener and starts the action. The listener it consumes is rigged to retry on failure.
* @param onResponse consumes the response from the action
*/
private void searchWithRetry(Consumer<ActionListener<SearchResponse>> action, Consumer<SearchResponse> onResponse) {
/*
* RetryHelper is both an AbstractRunnable and an ActionListener<SearchResponse> - meaning that it both starts the search and
* handles reacts to the results. The complexity is all in onFailure which either adapts the failure to the "fail" listener or
* retries the search. Since both AbstractRunnable and ActionListener define the onFailure method it is called for either failure
* to run the action (either while running or before starting) and for failure on the response from the action.
*/
class RetryHelper extends AbstractRunnable implements ActionListener<SearchResponse> {
private final Iterator<TimeValue> retries = backoffPolicy.iterator();
/**
* The runnable to run that retries in the same context as the original call.
*/
private Runnable retryWithContext;
private volatile int retryCount = 0;
@Override
protected void doRun() throws Exception {
action.accept(this);
}
@Override
public void onResponse(SearchResponse response) {
onResponse.accept(response);
}
@Override
public void onFailure(Exception e) {
if (ExceptionsHelper.unwrap(e, EsRejectedExecutionException.class) != null) {
if (retries.hasNext()) {
retryCount += 1;
TimeValue delay = retries.next();
logger.trace((Supplier<?>) () -> new ParameterizedMessage("retrying rejected search after [{}]", delay), e);
countSearchRetry.run();
threadPool.schedule(delay, ThreadPool.Names.SAME, retryWithContext);
} else {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("giving up on search because we retried [{}] times without success", retryCount), e);
fail.accept(e);
}
} else {
logger.warn("giving up on search because it failed with a non-retryable exception", e);
fail.accept(e);
}
}
}
RetryHelper helper = new RetryHelper();
// Wrap the helper in a runnable that preserves the current context so we keep it on retry.
helper.retryWithContext = threadPool.getThreadContext().preserveContext(helper);
helper.run();
}
use of org.elasticsearch.common.util.concurrent.AbstractRunnable in project elasticsearch by elastic.
the class IndicesClusterStateService method deleteIndices.
/**
* Deletes indices (with shard data).
*
* @param event cluster change event
*/
private void deleteIndices(final ClusterChangedEvent event) {
final ClusterState previousState = event.previousState();
final ClusterState state = event.state();
final String localNodeId = state.nodes().getLocalNodeId();
assert localNodeId != null;
for (Index index : event.indicesDeleted()) {
if (logger.isDebugEnabled()) {
logger.debug("[{}] cleaning index, no longer part of the metadata", index);
}
AllocatedIndex<? extends Shard> indexService = indicesService.indexService(index);
final IndexSettings indexSettings;
if (indexService != null) {
indexSettings = indexService.getIndexSettings();
indicesService.removeIndex(index, DELETED, "index no longer part of the metadata");
} else if (previousState.metaData().hasIndex(index.getName())) {
// The deleted index was part of the previous cluster state, but not loaded on the local node
final IndexMetaData metaData = previousState.metaData().index(index);
indexSettings = new IndexSettings(metaData, settings);
indicesService.deleteUnassignedIndex("deleted index was not assigned to local node", metaData, state);
} else {
// asserting that the previous cluster state is not initialized/recovered.
assert previousState.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK);
final IndexMetaData metaData = indicesService.verifyIndexIsDeleted(index, event.state());
if (metaData != null) {
indexSettings = new IndexSettings(metaData, settings);
} else {
indexSettings = null;
}
}
if (indexSettings != null) {
threadPool.generic().execute(new AbstractRunnable() {
@Override
public void onFailure(Exception e) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to complete pending deletion for index", index), e);
}
@Override
protected void doRun() throws Exception {
try {
// we are waiting until we can lock the index / all shards on the node and then we ack the delete of the store
// to the master. If we can't acquire the locks here immediately there might be a shard of this index still
// holding on to the lock due to a "currently canceled recovery" or so. The shard will delete itself BEFORE the
// lock is released so it's guaranteed to be deleted by the time we get the lock
indicesService.processPendingDeletes(index, indexSettings, new TimeValue(30, TimeUnit.MINUTES));
} catch (LockObtainFailedException exc) {
logger.warn("[{}] failed to lock all shards for index - timed out after 30 seconds", index);
} catch (InterruptedException e) {
logger.warn("[{}] failed to lock all shards for index - interrupted", index);
}
}
});
}
}
}
Aggregations