use of org.graylog.shaded.elasticsearch7.org.elasticsearch.common.unit.TimeValue in project elasticsearch by elastic.
the class GceInstancesServiceImpl method client.
public synchronized Compute client() {
if (refreshInterval != null && refreshInterval.millis() != 0) {
if (client != null && (refreshInterval.millis() < 0 || (System.currentTimeMillis() - lastRefresh) < refreshInterval.millis())) {
if (logger.isTraceEnabled())
logger.trace("using cache to retrieve client");
return client;
}
lastRefresh = System.currentTimeMillis();
}
try {
gceJsonFactory = new JacksonFactory();
logger.info("starting GCE discovery service");
// Forcing Google Token API URL as set in GCE SDK to
// http://metadata/computeMetadata/v1/instance/service-accounts/default/token
// See https://developers.google.com/compute/docs/metadata#metadataserver
String tokenServerEncodedUrl = GceMetadataService.GCE_HOST.get(settings) + "/computeMetadata/v1/instance/service-accounts/default/token";
ComputeCredential credential = new ComputeCredential.Builder(getGceHttpTransport(), gceJsonFactory).setTokenServerEncodedUrl(tokenServerEncodedUrl).build();
// hack around code messiness in GCE code
// TODO: get this fixed
Access.doPrivilegedIOException(credential::refreshToken);
logger.debug("token [{}] will expire in [{}] s", credential.getAccessToken(), credential.getExpiresInSeconds());
if (credential.getExpiresInSeconds() != null) {
refreshInterval = TimeValue.timeValueSeconds(credential.getExpiresInSeconds() - 1);
}
Compute.Builder builder = new Compute.Builder(getGceHttpTransport(), gceJsonFactory, null).setApplicationName(VERSION).setRootUrl(GCE_ROOT_URL.get(settings));
if (RETRY_SETTING.exists(settings)) {
TimeValue maxWait = MAX_WAIT_SETTING.get(settings);
RetryHttpInitializerWrapper retryHttpInitializerWrapper;
if (maxWait.getMillis() > 0) {
retryHttpInitializerWrapper = new RetryHttpInitializerWrapper(credential, maxWait);
} else {
retryHttpInitializerWrapper = new RetryHttpInitializerWrapper(credential);
}
builder.setHttpRequestInitializer(retryHttpInitializerWrapper);
} else {
builder.setHttpRequestInitializer(credential);
}
this.client = builder.build();
} catch (Exception e) {
logger.warn("unable to start GCE discovery service", e);
throw new IllegalArgumentException("unable to start GCE discovery service", e);
}
return this.client;
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.common.unit.TimeValue in project elasticsearch by elastic.
the class AsyncBulkByScrollActionTests method bulkRetryTestCase.
/**
* Execute a bulk retry test case. The total number of failures is random and the number of retries attempted is set to
* testRequest.getMaxRetries and controled by the failWithRejection parameter.
*/
private void bulkRetryTestCase(boolean failWithRejection) throws Exception {
int totalFailures = randomIntBetween(1, testRequest.getMaxRetries());
int size = randomIntBetween(1, 100);
testRequest.setMaxRetries(totalFailures - (failWithRejection ? 1 : 0));
client.bulksToReject = client.bulksAttempts.get() + totalFailures;
/*
* When we get a successful bulk response we usually start the next scroll request but lets just intercept that so we don't have to
* deal with it. We just wait for it to happen.
*/
CountDownLatch successLatch = new CountDownLatch(1);
DummyAsyncBulkByScrollAction action = new DummyActionWithoutBackoff() {
@Override
void startNextScroll(TimeValue lastBatchStartTime, int lastBatchSize) {
successLatch.countDown();
}
};
BulkRequest request = new BulkRequest();
for (int i = 0; i < size + 1; i++) {
request.add(new IndexRequest("index", "type", "id" + i));
}
action.sendBulkRequest(timeValueNanos(System.nanoTime()), request);
if (failWithRejection) {
BulkByScrollResponse response = listener.get();
assertThat(response.getBulkFailures(), hasSize(1));
assertEquals(response.getBulkFailures().get(0).getStatus(), RestStatus.TOO_MANY_REQUESTS);
assertThat(response.getSearchFailures(), empty());
assertNull(response.getReasonCancelled());
} else {
assertTrue(successLatch.await(10, TimeUnit.SECONDS));
}
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.common.unit.TimeValue in project elasticsearch by elastic.
the class AsyncBulkByScrollActionTests method testCancelWhileDelayedAfterScrollResponse.
/**
* Tests that we can cancel the request during its throttling delay. This can't use {@link #cancelTaskCase(Consumer)} because it needs
* to send the request un-canceled and cancel it at a specific time.
*/
public void testCancelWhileDelayedAfterScrollResponse() throws Exception {
String reason = randomSimpleString(random());
/*
* Replace the thread pool with one that will cancel the task as soon as anything is scheduled, which reindex tries to do when there
* is a delay.
*/
setupClient(new TestThreadPool(getTestName()) {
@Override
public ScheduledFuture<?> schedule(TimeValue delay, String name, Runnable command) {
/*
* This is called twice:
* 1. To schedule the throttling. When that happens we immediately cancel the task.
* 2. After the task is canceled.
* Both times we use delegate to the standard behavior so the task is scheduled as expected so it can be cancelled and all
* that good stuff.
*/
if (delay.nanos() > 0) {
generic().execute(() -> taskManager.cancel(testTask, reason, () -> {
}));
}
return super.schedule(delay, name, command);
}
});
// Send the scroll response which will trigger the custom thread pool above, canceling the request before running the response
DummyAsyncBulkByScrollAction action = new DummyAsyncBulkByScrollAction();
boolean previousScrollSet = usually();
if (previousScrollSet) {
action.setScroll(scrollId());
}
long total = randomIntBetween(0, Integer.MAX_VALUE);
ScrollableHitSource.Response response = new ScrollableHitSource.Response(false, emptyList(), total, emptyList(), null);
// Use a long delay here so the test will time out if the cancellation doesn't reschedule the throttled task
testTask.rethrottle(1);
simulateScrollResponse(action, timeValueNanos(System.nanoTime()), 1000, response);
// Now that we've got our cancel we'll just verify that it all came through all right
assertEquals(reason, listener.get(10, TimeUnit.SECONDS).getReasonCancelled());
if (previousScrollSet) {
// Canceled tasks always start to clear the scroll before they die.
assertThat(client.scrollsCleared, contains(scrollId));
}
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.common.unit.TimeValue in project elasticsearch by elastic.
the class WorkingBulkByScrollTaskTests method testDelayAndRethrottle.
/**
* Furiously rethrottles a delayed request to make sure that we never run it twice.
*/
public void testDelayAndRethrottle() throws IOException, InterruptedException {
List<Throwable> errors = new CopyOnWriteArrayList<>();
AtomicBoolean done = new AtomicBoolean();
int threads = between(1, 10);
CyclicBarrier waitForShutdown = new CyclicBarrier(threads);
/*
* We never end up waiting this long because the test rethrottles over and over again, ratcheting down the delay a random amount
* each time.
*/
float originalRequestsPerSecond = (float) randomDoubleBetween(1, 10000, true);
task.rethrottle(originalRequestsPerSecond);
TimeValue maxDelay = timeValueSeconds(between(1, 5));
assertThat(maxDelay.nanos(), greaterThanOrEqualTo(0L));
int batchSizeForMaxDelay = (int) (maxDelay.seconds() * originalRequestsPerSecond);
ThreadPool threadPool = new TestThreadPool(getTestName()) {
@Override
public ScheduledFuture<?> schedule(TimeValue delay, String name, Runnable command) {
assertThat(delay.nanos(), both(greaterThanOrEqualTo(0L)).and(lessThanOrEqualTo(maxDelay.nanos())));
return super.schedule(delay, name, command);
}
};
try {
task.delayPrepareBulkRequest(threadPool, timeValueNanos(System.nanoTime()), batchSizeForMaxDelay, new AbstractRunnable() {
@Override
protected void doRun() throws Exception {
boolean oldValue = done.getAndSet(true);
if (oldValue) {
throw new RuntimeException("Ran twice oh no!");
}
}
@Override
public void onFailure(Exception e) {
errors.add(e);
}
});
// Rethrottle on a random number of threads, on of which is this thread.
Runnable test = () -> {
try {
int rethrottles = 0;
while (false == done.get()) {
float requestsPerSecond = (float) randomDoubleBetween(0, originalRequestsPerSecond * 2, true);
task.rethrottle(requestsPerSecond);
rethrottles += 1;
}
logger.info("Rethrottled [{}] times", rethrottles);
waitForShutdown.await();
} catch (Exception e) {
errors.add(e);
}
};
for (int i = 1; i < threads; i++) {
threadPool.generic().execute(test);
}
test.run();
} finally {
// Other threads should finish up quickly as they are checking the same AtomicBoolean.
threadPool.shutdown();
threadPool.awaitTermination(10, TimeUnit.SECONDS);
}
assertThat(errors, empty());
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.common.unit.TimeValue in project elasticsearch by elastic.
the class BulkByScrollTaskTests method testMergeStatuses.
public void testMergeStatuses() {
BulkByScrollTask.StatusOrException[] statuses = new BulkByScrollTask.StatusOrException[between(2, 100)];
boolean containsNullStatuses = randomBoolean();
int mergedTotal = 0;
int mergedUpdated = 0;
int mergedCreated = 0;
int mergedDeleted = 0;
int mergedBatches = 0;
int mergedVersionConflicts = 0;
int mergedNoops = 0;
int mergedBulkRetries = 0;
int mergedSearchRetries = 0;
TimeValue mergedThrottled = timeValueNanos(0);
float mergedRequestsPerSecond = 0;
TimeValue mergedThrottledUntil = timeValueNanos(Integer.MAX_VALUE);
for (int i = 0; i < statuses.length; i++) {
if (containsNullStatuses && rarely()) {
continue;
}
int total = between(0, 10000);
int updated = between(0, total);
int created = between(0, total - updated);
int deleted = between(0, total - updated - created);
int batches = between(0, 10);
int versionConflicts = between(0, 100);
int noops = total - updated - created - deleted;
int bulkRetries = between(0, 100);
int searchRetries = between(0, 100);
TimeValue throttled = timeValueNanos(between(0, 10000));
float requestsPerSecond = randomValueOtherThanMany(r -> r <= 0, () -> randomFloat());
String reasonCancelled = randomBoolean() ? null : "test";
TimeValue throttledUntil = timeValueNanos(between(0, 1000));
statuses[i] = new BulkByScrollTask.StatusOrException(new BulkByScrollTask.Status(i, total, updated, created, deleted, batches, versionConflicts, noops, bulkRetries, searchRetries, throttled, requestsPerSecond, reasonCancelled, throttledUntil));
mergedTotal += total;
mergedUpdated += updated;
mergedCreated += created;
mergedDeleted += deleted;
mergedBatches += batches;
mergedVersionConflicts += versionConflicts;
mergedNoops += noops;
mergedBulkRetries += bulkRetries;
mergedSearchRetries += searchRetries;
mergedThrottled = timeValueNanos(mergedThrottled.nanos() + throttled.nanos());
mergedRequestsPerSecond += requestsPerSecond;
mergedThrottledUntil = timeValueNanos(min(mergedThrottledUntil.nanos(), throttledUntil.nanos()));
}
String reasonCancelled = randomBoolean() ? randomAsciiOfLength(10) : null;
BulkByScrollTask.Status merged = new BulkByScrollTask.Status(Arrays.asList(statuses), reasonCancelled);
assertEquals(mergedTotal, merged.getTotal());
assertEquals(mergedUpdated, merged.getUpdated());
assertEquals(mergedCreated, merged.getCreated());
assertEquals(mergedDeleted, merged.getDeleted());
assertEquals(mergedBatches, merged.getBatches());
assertEquals(mergedVersionConflicts, merged.getVersionConflicts());
assertEquals(mergedNoops, merged.getNoops());
assertEquals(mergedBulkRetries, merged.getBulkRetries());
assertEquals(mergedSearchRetries, merged.getSearchRetries());
assertEquals(mergedThrottled, merged.getThrottled());
assertEquals(mergedRequestsPerSecond, merged.getRequestsPerSecond(), 0.0001f);
assertEquals(mergedThrottledUntil, merged.getThrottledUntil());
assertEquals(reasonCancelled, merged.getReasonCancelled());
}
Aggregations