use of org.graylog.shaded.elasticsearch7.org.elasticsearch.common.unit.TimeValue in project elasticsearch by elastic.
the class PublishClusterStateAction method innerPublish.
private void innerPublish(final ClusterChangedEvent clusterChangedEvent, final Set<DiscoveryNode> nodesToPublishTo, final SendingController sendingController, final boolean sendFullVersion, final Map<Version, BytesReference> serializedStates, final Map<Version, BytesReference> serializedDiffs) {
final ClusterState clusterState = clusterChangedEvent.state();
final ClusterState previousState = clusterChangedEvent.previousState();
final TimeValue publishTimeout = discoverySettings.getPublishTimeout();
final long publishingStartInNanos = System.nanoTime();
for (final DiscoveryNode node : nodesToPublishTo) {
// we don't send full version if node didn't exist in the previous version of cluster state
if (sendFullVersion || !previousState.nodes().nodeExists(node)) {
sendFullClusterState(clusterState, serializedStates, node, publishTimeout, sendingController);
} else {
sendClusterStateDiff(clusterState, serializedDiffs, serializedStates, node, publishTimeout, sendingController);
}
}
sendingController.waitForCommit(discoverySettings.getCommitTimeout());
try {
long timeLeftInNanos = Math.max(0, publishTimeout.nanos() - (System.nanoTime() - publishingStartInNanos));
final BlockingClusterStatePublishResponseHandler publishResponseHandler = sendingController.getPublishResponseHandler();
sendingController.setPublishingTimedOut(!publishResponseHandler.awaitAllNodes(TimeValue.timeValueNanos(timeLeftInNanos)));
if (sendingController.getPublishingTimedOut()) {
DiscoveryNode[] pendingNodes = publishResponseHandler.pendingNodes();
// everyone may have just responded
if (pendingNodes.length > 0) {
logger.warn("timed out waiting for all nodes to process published state [{}] (timeout [{}], pending nodes: {})", clusterState.version(), publishTimeout, pendingNodes);
}
}
} catch (InterruptedException e) {
// ignore & restore interrupt
Thread.currentThread().interrupt();
}
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.common.unit.TimeValue in project elasticsearch by elastic.
the class IndexWarmer method warm.
void warm(Engine.Searcher searcher, IndexShard shard, IndexSettings settings) {
if (shard.state() == IndexShardState.CLOSED) {
return;
}
if (settings.isWarmerEnabled() == false) {
return;
}
if (logger.isTraceEnabled()) {
logger.trace("{} top warming [{}]", shard.shardId(), searcher.reader());
}
shard.warmerService().onPreWarm();
long time = System.nanoTime();
final List<TerminationHandle> terminationHandles = new ArrayList<>();
// get a handle on pending tasks
for (final Listener listener : listeners) {
terminationHandles.add(listener.warmReader(shard, searcher));
}
// wait for termination
for (TerminationHandle terminationHandle : terminationHandles) {
try {
terminationHandle.awaitTermination();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
logger.warn("top warming has been interrupted", e);
break;
}
}
long took = System.nanoTime() - time;
shard.warmerService().onPostWarm(took);
if (shard.warmerService().logger().isTraceEnabled()) {
shard.warmerService().logger().trace("top warming took [{}]", new TimeValue(took, TimeUnit.NANOSECONDS));
}
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.common.unit.TimeValue in project elasticsearch by elastic.
the class AsyncBulkByScrollActionTests method testScrollDelay.
public void testScrollDelay() throws Exception {
/*
* Replace the thread pool with one that will save the delay sent for the command. We'll use that to check that we used a proper
* delay for throttling.
*/
AtomicReference<TimeValue> capturedDelay = new AtomicReference<>();
AtomicReference<Runnable> capturedCommand = new AtomicReference<>();
setupClient(new TestThreadPool(getTestName()) {
@Override
public ScheduledFuture<?> schedule(TimeValue delay, String name, Runnable command) {
capturedDelay.set(delay);
capturedCommand.set(command);
return null;
}
});
DummyAsyncBulkByScrollAction action = new DummyAsyncBulkByScrollAction();
action.setScroll(scrollId());
// Set the base for the scroll to wait - this is added to the figure we calculate below
firstSearchRequest.scroll(timeValueSeconds(10));
// Set throttle to 1 request per second to make the math simpler
testTask.rethrottle(1f);
// Make the last batch look nearly instant but have 100 documents
action.startNextScroll(timeValueNanos(System.nanoTime()), 100);
// So the next request is going to have to wait an extra 100 seconds or so (base was 10 seconds, so 110ish)
assertThat(client.lastScroll.get().request.scroll().keepAlive().seconds(), either(equalTo(110L)).or(equalTo(109L)));
// Now we can simulate a response and check the delay that we used for the task
SearchHit hit = new SearchHit(0, "id", new Text("type"), emptyMap());
SearchHits hits = new SearchHits(new SearchHit[] { hit }, 0, 0);
InternalSearchResponse internalResponse = new InternalSearchResponse(hits, null, null, null, false, false, 1);
SearchResponse searchResponse = new SearchResponse(internalResponse, scrollId(), 5, 4, randomLong(), null);
if (randomBoolean()) {
client.lastScroll.get().listener.onResponse(searchResponse);
// The delay is still 100ish seconds because there hasn't been much time between when we requested the bulk and when we got it.
assertThat(capturedDelay.get().seconds(), either(equalTo(100L)).or(equalTo(99L)));
} else {
// Let's rethrottle between the starting the scroll and getting the response
testTask.rethrottle(10f);
client.lastScroll.get().listener.onResponse(searchResponse);
// The delay uses the new throttle
assertThat(capturedDelay.get().seconds(), either(equalTo(10L)).or(equalTo(9L)));
}
// Running the command ought to increment the delay counter on the task.
capturedCommand.get().run();
assertEquals(capturedDelay.get(), testTask.getStatus().getThrottled());
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.common.unit.TimeValue in project elasticsearch by elastic.
the class AsyncBulkByScrollActionTests method testThreadPoolRejectionsAbortRequest.
/**
* Mimicks a ThreadPool rejecting execution of the task.
*/
public void testThreadPoolRejectionsAbortRequest() throws Exception {
testTask.rethrottle(1);
setupClient(new TestThreadPool(getTestName()) {
@Override
public ScheduledFuture<?> schedule(TimeValue delay, String name, Runnable command) {
// While we're here we can check that the sleep made it through
assertThat(delay.nanos(), greaterThan(0L));
assertThat(delay.seconds(), lessThanOrEqualTo(10L));
((AbstractRunnable) command).onRejection(new EsRejectedExecutionException("test"));
return null;
}
});
ScrollableHitSource.Response response = new ScrollableHitSource.Response(false, emptyList(), 0, emptyList(), null);
simulateScrollResponse(new DummyAsyncBulkByScrollAction(), timeValueNanos(System.nanoTime()), 10, response);
ExecutionException e = expectThrows(ExecutionException.class, () -> listener.get());
assertThat(e.getMessage(), equalTo("EsRejectedExecutionException[test]"));
assertThat(client.scrollsCleared, contains(scrollId));
// When the task is rejected we don't increment the throttled timer
assertEquals(timeValueMillis(0), testTask.getStatus().getThrottled());
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.common.unit.TimeValue in project elasticsearch by elastic.
the class BackoffPolicyTests method testWrapBackoffPolicy.
public void testWrapBackoffPolicy() {
TimeValue timeValue = timeValueMillis(between(0, Integer.MAX_VALUE));
int maxNumberOfRetries = between(1, 1000);
BackoffPolicy policy = BackoffPolicy.constantBackoff(timeValue, maxNumberOfRetries);
AtomicInteger retries = new AtomicInteger();
policy = BackoffPolicy.wrap(policy, retries::getAndIncrement);
int expectedRetries = 0;
{
// Fetching the iterator doesn't call the callback
Iterator<TimeValue> itr = policy.iterator();
assertEquals(expectedRetries, retries.get());
while (itr.hasNext()) {
// hasNext doesn't trigger the callback
assertEquals(expectedRetries, retries.get());
// next does
itr.next();
expectedRetries += 1;
assertEquals(expectedRetries, retries.get());
}
// next doesn't call the callback when there isn't a backoff available
expectThrows(NoSuchElementException.class, () -> itr.next());
assertEquals(expectedRetries, retries.get());
}
{
// The second iterator also calls the callback
Iterator<TimeValue> itr = policy.iterator();
itr.next();
expectedRetries += 1;
assertEquals(expectedRetries, retries.get());
}
}
Aggregations