use of org.elasticsearch.common.unit.TimeValue in project elasticsearch by elastic.
the class AsyncBulkByScrollActionTests method testCancelWhileDelayedAfterScrollResponse.
/**
* Tests that we can cancel the request during its throttling delay. This can't use {@link #cancelTaskCase(Consumer)} because it needs
* to send the request un-canceled and cancel it at a specific time.
*/
public void testCancelWhileDelayedAfterScrollResponse() throws Exception {
String reason = randomSimpleString(random());
/*
* Replace the thread pool with one that will cancel the task as soon as anything is scheduled, which reindex tries to do when there
* is a delay.
*/
setupClient(new TestThreadPool(getTestName()) {
@Override
public ScheduledFuture<?> schedule(TimeValue delay, String name, Runnable command) {
/*
* This is called twice:
* 1. To schedule the throttling. When that happens we immediately cancel the task.
* 2. After the task is canceled.
* Both times we use delegate to the standard behavior so the task is scheduled as expected so it can be cancelled and all
* that good stuff.
*/
if (delay.nanos() > 0) {
generic().execute(() -> taskManager.cancel(testTask, reason, () -> {
}));
}
return super.schedule(delay, name, command);
}
});
// Send the scroll response which will trigger the custom thread pool above, canceling the request before running the response
DummyAsyncBulkByScrollAction action = new DummyAsyncBulkByScrollAction();
boolean previousScrollSet = usually();
if (previousScrollSet) {
action.setScroll(scrollId());
}
long total = randomIntBetween(0, Integer.MAX_VALUE);
ScrollableHitSource.Response response = new ScrollableHitSource.Response(false, emptyList(), total, emptyList(), null);
// Use a long delay here so the test will time out if the cancellation doesn't reschedule the throttled task
testTask.rethrottle(1);
simulateScrollResponse(action, timeValueNanos(System.nanoTime()), 1000, response);
// Now that we've got our cancel we'll just verify that it all came through all right
assertEquals(reason, listener.get(10, TimeUnit.SECONDS).getReasonCancelled());
if (previousScrollSet) {
// Canceled tasks always start to clear the scroll before they die.
assertThat(client.scrollsCleared, contains(scrollId));
}
}
use of org.elasticsearch.common.unit.TimeValue in project elasticsearch by elastic.
the class WorkingBulkByScrollTaskTests method testDelayAndRethrottle.
/**
* Furiously rethrottles a delayed request to make sure that we never run it twice.
*/
public void testDelayAndRethrottle() throws IOException, InterruptedException {
List<Throwable> errors = new CopyOnWriteArrayList<>();
AtomicBoolean done = new AtomicBoolean();
int threads = between(1, 10);
CyclicBarrier waitForShutdown = new CyclicBarrier(threads);
/*
* We never end up waiting this long because the test rethrottles over and over again, ratcheting down the delay a random amount
* each time.
*/
float originalRequestsPerSecond = (float) randomDoubleBetween(1, 10000, true);
task.rethrottle(originalRequestsPerSecond);
TimeValue maxDelay = timeValueSeconds(between(1, 5));
assertThat(maxDelay.nanos(), greaterThanOrEqualTo(0L));
int batchSizeForMaxDelay = (int) (maxDelay.seconds() * originalRequestsPerSecond);
ThreadPool threadPool = new TestThreadPool(getTestName()) {
@Override
public ScheduledFuture<?> schedule(TimeValue delay, String name, Runnable command) {
assertThat(delay.nanos(), both(greaterThanOrEqualTo(0L)).and(lessThanOrEqualTo(maxDelay.nanos())));
return super.schedule(delay, name, command);
}
};
try {
task.delayPrepareBulkRequest(threadPool, timeValueNanos(System.nanoTime()), batchSizeForMaxDelay, new AbstractRunnable() {
@Override
protected void doRun() throws Exception {
boolean oldValue = done.getAndSet(true);
if (oldValue) {
throw new RuntimeException("Ran twice oh no!");
}
}
@Override
public void onFailure(Exception e) {
errors.add(e);
}
});
// Rethrottle on a random number of threads, on of which is this thread.
Runnable test = () -> {
try {
int rethrottles = 0;
while (false == done.get()) {
float requestsPerSecond = (float) randomDoubleBetween(0, originalRequestsPerSecond * 2, true);
task.rethrottle(requestsPerSecond);
rethrottles += 1;
}
logger.info("Rethrottled [{}] times", rethrottles);
waitForShutdown.await();
} catch (Exception e) {
errors.add(e);
}
};
for (int i = 1; i < threads; i++) {
threadPool.generic().execute(test);
}
test.run();
} finally {
// Other threads should finish up quickly as they are checking the same AtomicBoolean.
threadPool.shutdown();
threadPool.awaitTermination(10, TimeUnit.SECONDS);
}
assertThat(errors, empty());
}
use of org.elasticsearch.common.unit.TimeValue in project elasticsearch by elastic.
the class BulkByScrollTaskTests method testMergeStatuses.
public void testMergeStatuses() {
BulkByScrollTask.StatusOrException[] statuses = new BulkByScrollTask.StatusOrException[between(2, 100)];
boolean containsNullStatuses = randomBoolean();
int mergedTotal = 0;
int mergedUpdated = 0;
int mergedCreated = 0;
int mergedDeleted = 0;
int mergedBatches = 0;
int mergedVersionConflicts = 0;
int mergedNoops = 0;
int mergedBulkRetries = 0;
int mergedSearchRetries = 0;
TimeValue mergedThrottled = timeValueNanos(0);
float mergedRequestsPerSecond = 0;
TimeValue mergedThrottledUntil = timeValueNanos(Integer.MAX_VALUE);
for (int i = 0; i < statuses.length; i++) {
if (containsNullStatuses && rarely()) {
continue;
}
int total = between(0, 10000);
int updated = between(0, total);
int created = between(0, total - updated);
int deleted = between(0, total - updated - created);
int batches = between(0, 10);
int versionConflicts = between(0, 100);
int noops = total - updated - created - deleted;
int bulkRetries = between(0, 100);
int searchRetries = between(0, 100);
TimeValue throttled = timeValueNanos(between(0, 10000));
float requestsPerSecond = randomValueOtherThanMany(r -> r <= 0, () -> randomFloat());
String reasonCancelled = randomBoolean() ? null : "test";
TimeValue throttledUntil = timeValueNanos(between(0, 1000));
statuses[i] = new BulkByScrollTask.StatusOrException(new BulkByScrollTask.Status(i, total, updated, created, deleted, batches, versionConflicts, noops, bulkRetries, searchRetries, throttled, requestsPerSecond, reasonCancelled, throttledUntil));
mergedTotal += total;
mergedUpdated += updated;
mergedCreated += created;
mergedDeleted += deleted;
mergedBatches += batches;
mergedVersionConflicts += versionConflicts;
mergedNoops += noops;
mergedBulkRetries += bulkRetries;
mergedSearchRetries += searchRetries;
mergedThrottled = timeValueNanos(mergedThrottled.nanos() + throttled.nanos());
mergedRequestsPerSecond += requestsPerSecond;
mergedThrottledUntil = timeValueNanos(min(mergedThrottledUntil.nanos(), throttledUntil.nanos()));
}
String reasonCancelled = randomBoolean() ? randomAsciiOfLength(10) : null;
BulkByScrollTask.Status merged = new BulkByScrollTask.Status(Arrays.asList(statuses), reasonCancelled);
assertEquals(mergedTotal, merged.getTotal());
assertEquals(mergedUpdated, merged.getUpdated());
assertEquals(mergedCreated, merged.getCreated());
assertEquals(mergedDeleted, merged.getDeleted());
assertEquals(mergedBatches, merged.getBatches());
assertEquals(mergedVersionConflicts, merged.getVersionConflicts());
assertEquals(mergedNoops, merged.getNoops());
assertEquals(mergedBulkRetries, merged.getBulkRetries());
assertEquals(mergedSearchRetries, merged.getSearchRetries());
assertEquals(mergedThrottled, merged.getThrottled());
assertEquals(mergedRequestsPerSecond, merged.getRequestsPerSecond(), 0.0001f);
assertEquals(mergedThrottledUntil, merged.getThrottledUntil());
assertEquals(reasonCancelled, merged.getReasonCancelled());
}
use of org.elasticsearch.common.unit.TimeValue in project elasticsearch by elastic.
the class BulkByScrollTaskTests method checkStatusNegatives.
/**
* Build a task status with only some values. Used for testing negative values.
*/
private void checkStatusNegatives(Integer sliceId, long total, long updated, long created, long deleted, int batches, long versionConflicts, long noops, long bulkRetries, long searchRetries, String fieldName) {
TimeValue throttle = parseTimeValue(randomPositiveTimeValue(), "test");
TimeValue throttledUntil = parseTimeValue(randomPositiveTimeValue(), "test");
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new BulkByScrollTask.Status(sliceId, total, updated, created, deleted, batches, versionConflicts, noops, bulkRetries, searchRetries, throttle, 0f, null, throttledUntil));
assertEquals(e.getMessage(), fieldName + " must be greater than 0 but was [-1]");
}
use of org.elasticsearch.common.unit.TimeValue in project elasticsearch by elastic.
the class BulkProcessorRetryIT method executeBulkRejectionLoad.
private void executeBulkRejectionLoad(BackoffPolicy backoffPolicy, boolean rejectedExecutionExpected) throws Throwable {
final CorrelatingBackoffPolicy internalPolicy = new CorrelatingBackoffPolicy(backoffPolicy);
int numberOfAsyncOps = randomIntBetween(600, 700);
final CountDownLatch latch = new CountDownLatch(numberOfAsyncOps);
final Set<Object> responses = Collections.newSetFromMap(new ConcurrentHashMap<>());
assertAcked(prepareCreate(INDEX_NAME));
ensureGreen();
BulkProcessor bulkProcessor = BulkProcessor.builder(client(), new BulkProcessor.Listener() {
@Override
public void beforeBulk(long executionId, BulkRequest request) {
// no op
}
@Override
public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
internalPolicy.logResponse(response);
responses.add(response);
latch.countDown();
}
@Override
public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
responses.add(failure);
latch.countDown();
}
}).setBulkActions(1).setConcurrentRequests(randomIntBetween(0, 100)).setBackoffPolicy(internalPolicy).build();
indexDocs(bulkProcessor, numberOfAsyncOps);
latch.await(10, TimeUnit.SECONDS);
bulkProcessor.close();
assertThat(responses.size(), equalTo(numberOfAsyncOps));
// validate all responses
for (Object response : responses) {
if (response instanceof BulkResponse) {
BulkResponse bulkResponse = (BulkResponse) response;
for (BulkItemResponse bulkItemResponse : bulkResponse.getItems()) {
if (bulkItemResponse.isFailed()) {
BulkItemResponse.Failure failure = bulkItemResponse.getFailure();
Throwable rootCause = ExceptionsHelper.unwrapCause(failure.getCause());
if (rootCause instanceof EsRejectedExecutionException) {
if (rejectedExecutionExpected == false) {
Iterator<TimeValue> backoffState = internalPolicy.backoffStateFor(bulkResponse);
assertNotNull("backoffState is null (indicates a bulk request got rejected without retry)", backoffState);
if (backoffState.hasNext()) {
// we're not expecting that we overwhelmed it even once when we maxed out the number of retries
throw new AssertionError("Got rejected although backoff policy would allow more retries", rootCause);
} else {
logger.debug("We maxed out the number of bulk retries and got rejected (this is ok).");
}
}
} else {
throw new AssertionError("Unexpected failure", rootCause);
}
}
}
} else {
Throwable t = (Throwable) response;
// we're not expecting any other errors
throw new AssertionError("Unexpected failure", t);
}
}
client().admin().indices().refresh(new RefreshRequest()).get();
// validate we did not create any duplicates due to retries
Matcher<Long> searchResultCount;
// it is ok if we lost some index operations to rejected executions (which is possible even when backing off (although less likely)
searchResultCount = lessThanOrEqualTo((long) numberOfAsyncOps);
SearchResponse results = client().prepareSearch(INDEX_NAME).setTypes(TYPE_NAME).setQuery(QueryBuilders.matchAllQuery()).setSize(0).get();
assertThat(results.getHits().getTotalHits(), searchResultCount);
}
Aggregations