use of org.apache.druid.java.util.http.client.Request in project druid by druid-io.
the class HttpLoadQueuePeon method doSegmentManagement.
private void doSegmentManagement() {
if (stopped || !mainLoopInProgress.compareAndSet(false, true)) {
log.trace("[%s]Ignoring tick. Either in-progress already or stopped.", serverId);
return;
}
final int batchSize = config.getHttpLoadQueuePeonBatchSize();
final List<DataSegmentChangeRequest> newRequests = new ArrayList<>(batchSize);
synchronized (lock) {
Iterator<Map.Entry<DataSegment, SegmentHolder>> iter = Iterators.concat(segmentsToDrop.entrySet().iterator(), segmentsToLoad.entrySet().iterator());
while (newRequests.size() < batchSize && iter.hasNext()) {
Map.Entry<DataSegment, SegmentHolder> entry = iter.next();
if (entry.getValue().hasTimedOut()) {
entry.getValue().requestFailed("timed out");
iter.remove();
} else {
newRequests.add(entry.getValue().getChangeRequest());
}
}
}
if (newRequests.size() == 0) {
log.trace("[%s]Found no load/drop requests. SegmentsToLoad[%d], SegmentsToDrop[%d], batchSize[%d].", serverId, segmentsToLoad.size(), segmentsToDrop.size(), config.getHttpLoadQueuePeonBatchSize());
mainLoopInProgress.set(false);
return;
}
try {
log.trace("Sending [%d] load/drop requests to Server[%s].", newRequests.size(), serverId);
BytesAccumulatingResponseHandler responseHandler = new BytesAccumulatingResponseHandler();
ListenableFuture<InputStream> future = httpClient.go(new Request(HttpMethod.POST, changeRequestURL).addHeader(HttpHeaders.Names.ACCEPT, MediaType.APPLICATION_JSON).addHeader(HttpHeaders.Names.CONTENT_TYPE, MediaType.APPLICATION_JSON).setContent(requestBodyWriter.writeValueAsBytes(newRequests)), responseHandler, new Duration(config.getHttpLoadQueuePeonHostTimeout().getMillis() + 5000));
Futures.addCallback(future, new FutureCallback<InputStream>() {
@Override
public void onSuccess(InputStream result) {
boolean scheduleNextRunImmediately = true;
try {
if (responseHandler.getStatus() == HttpServletResponse.SC_NO_CONTENT) {
log.trace("Received NO CONTENT reseponse from [%s]", serverId);
} else if (HttpServletResponse.SC_OK == responseHandler.getStatus()) {
try {
List<SegmentLoadDropHandler.DataSegmentChangeRequestAndStatus> statuses = jsonMapper.readValue(result, RESPONSE_ENTITY_TYPE_REF);
log.trace("Server[%s] returned status response [%s].", serverId, statuses);
synchronized (lock) {
if (stopped) {
log.trace("Ignoring response from Server[%s]. We are already stopped.", serverId);
scheduleNextRunImmediately = false;
return;
}
for (SegmentLoadDropHandler.DataSegmentChangeRequestAndStatus e : statuses) {
switch(e.getStatus().getState()) {
case SUCCESS:
case FAILED:
handleResponseStatus(e.getRequest(), e.getStatus());
break;
case PENDING:
log.trace("Request[%s] is still pending on server[%s].", e.getRequest(), serverId);
break;
default:
scheduleNextRunImmediately = false;
log.error("Server[%s] returned unknown state in status[%s].", serverId, e.getStatus());
}
}
}
} catch (Exception ex) {
scheduleNextRunImmediately = false;
logRequestFailure(ex);
}
} else {
scheduleNextRunImmediately = false;
logRequestFailure(new RE("Unexpected Response Status."));
}
} finally {
mainLoopInProgress.set(false);
if (scheduleNextRunImmediately) {
processingExecutor.execute(HttpLoadQueuePeon.this::doSegmentManagement);
}
}
}
@Override
public void onFailure(Throwable t) {
try {
logRequestFailure(t);
} finally {
mainLoopInProgress.set(false);
}
}
private void logRequestFailure(Throwable t) {
log.error(t, "Request[%s] Failed with status[%s]. Reason[%s].", changeRequestURL, responseHandler.getStatus(), responseHandler.getDescription());
}
}, processingExecutor);
} catch (Throwable th) {
log.error(th, "Error sending load/drop request to [%s].", serverId);
mainLoopInProgress.set(false);
}
}
use of org.apache.druid.java.util.http.client.Request in project druid by druid-io.
the class ChangeRequestHttpSyncer method sync.
private void sync() {
if (!startStopLock.awaitStarted(1, TimeUnit.MILLISECONDS)) {
log.info("Skipping sync() call for server[%s].", logIdentity);
return;
}
lastSyncTime = System.currentTimeMillis();
try {
final String req = getRequestString();
BytesAccumulatingResponseHandler responseHandler = new BytesAccumulatingResponseHandler();
log.debug("Sending sync request to server[%s]", logIdentity);
ListenableFuture<InputStream> syncRequestFuture = httpClient.go(new Request(HttpMethod.GET, new URL(baseServerURL, req)).addHeader(HttpHeaders.Names.ACCEPT, SmileMediaTypes.APPLICATION_JACKSON_SMILE).addHeader(HttpHeaders.Names.CONTENT_TYPE, SmileMediaTypes.APPLICATION_JACKSON_SMILE), responseHandler, Duration.millis(serverHttpTimeout));
log.debug("Sent sync request to [%s]", logIdentity);
Futures.addCallback(syncRequestFuture, new FutureCallback<InputStream>() {
@Override
public void onSuccess(InputStream stream) {
synchronized (startStopLock) {
if (!startStopLock.awaitStarted(1, TimeUnit.MILLISECONDS)) {
log.info("Skipping sync() success for server[%s].", logIdentity);
return;
}
try {
if (responseHandler.getStatus() == HttpServletResponse.SC_NO_CONTENT) {
log.debug("Received NO CONTENT from server[%s]", logIdentity);
lastSuccessfulSyncTime = System.currentTimeMillis();
return;
} else if (responseHandler.getStatus() != HttpServletResponse.SC_OK) {
handleFailure(new RE("Bad Sync Response."));
return;
}
log.debug("Received sync response from [%s]", logIdentity);
ChangeRequestsSnapshot<T> changes = smileMapper.readValue(stream, responseTypeReferences);
log.debug("Finished reading sync response from [%s]", logIdentity);
if (changes.isResetCounter()) {
log.info("[%s] requested resetCounter for reason [%s].", logIdentity, changes.getResetCause());
counter = null;
return;
}
if (counter == null) {
listener.fullSync(changes.getRequests());
} else {
listener.deltaSync(changes.getRequests());
}
counter = changes.getCounter();
if (initializationLatch.getCount() > 0) {
initializationLatch.countDown();
log.info("[%s] synced successfully for the first time.", logIdentity);
}
if (consecutiveFailedAttemptCount > 0) {
consecutiveFailedAttemptCount = 0;
log.info("[%s] synced successfully.", logIdentity);
}
lastSuccessfulSyncTime = System.currentTimeMillis();
} catch (Exception ex) {
String logMsg = StringUtils.nonStrictFormat("Error processing sync response from [%s]. Reason [%s]", logIdentity, ex.getMessage());
if (incrementFailedAttemptAndCheckUnstabilityTimeout()) {
log.error(ex, logMsg);
} else {
log.info("Temporary Failure. %s", logMsg);
log.debug(ex, logMsg);
}
} finally {
addNextSyncToWorkQueue();
}
}
}
@Override
public void onFailure(Throwable t) {
synchronized (startStopLock) {
if (!startStopLock.awaitStarted(1, TimeUnit.MILLISECONDS)) {
log.info("Skipping sync() failure for URL[%s].", logIdentity);
return;
}
try {
handleFailure(t);
} finally {
addNextSyncToWorkQueue();
}
}
}
private void handleFailure(Throwable t) {
String logMsg = StringUtils.nonStrictFormat("failed to get sync response from [%s]. Return code [%s], Reason: [%s]", logIdentity, responseHandler.getStatus(), responseHandler.getDescription());
if (incrementFailedAttemptAndCheckUnstabilityTimeout()) {
log.error(t, logMsg);
} else {
log.info("Temporary Failure. %s", logMsg);
log.debug(t, logMsg);
}
}
}, executor);
} catch (Throwable th) {
try {
String logMsg = StringUtils.nonStrictFormat("Fatal error while fetching segment list from [%s].", logIdentity);
if (incrementFailedAttemptAndCheckUnstabilityTimeout()) {
log.makeAlert(th, logMsg).emit();
} else {
log.info("Temporary Failure. %s", logMsg);
log.debug(th, logMsg);
}
} finally {
addNextSyncToWorkQueue();
}
}
}
use of org.apache.druid.java.util.http.client.Request in project druid by druid-io.
the class DirectDruidClient method run.
@Override
public Sequence<T> run(final QueryPlus<T> queryPlus, final ResponseContext context) {
final Query<T> query = queryPlus.getQuery();
QueryToolChest<T, Query<T>> toolChest = warehouse.getToolChest(query);
boolean isBySegment = QueryContexts.isBySegment(query);
final JavaType queryResultType = isBySegment ? toolChest.getBySegmentResultType() : toolChest.getBaseResultType();
final ListenableFuture<InputStream> future;
final String url = scheme + "://" + host + "/druid/v2/";
final String cancelUrl = url + query.getId();
try {
log.debug("Querying queryId[%s] url[%s]", query.getId(), url);
final long requestStartTimeNs = System.nanoTime();
final long timeoutAt = query.getContextValue(QUERY_FAIL_TIME);
final long maxScatterGatherBytes = QueryContexts.getMaxScatterGatherBytes(query);
final AtomicLong totalBytesGathered = context.getTotalBytes();
final long maxQueuedBytes = QueryContexts.getMaxQueuedBytes(query, 0);
final boolean usingBackpressure = maxQueuedBytes > 0;
final HttpResponseHandler<InputStream, InputStream> responseHandler = new HttpResponseHandler<InputStream, InputStream>() {
private final AtomicLong totalByteCount = new AtomicLong(0);
private final AtomicLong queuedByteCount = new AtomicLong(0);
private final AtomicLong channelSuspendedTime = new AtomicLong(0);
private final BlockingQueue<InputStreamHolder> queue = new LinkedBlockingQueue<>();
private final AtomicBoolean done = new AtomicBoolean(false);
private final AtomicReference<String> fail = new AtomicReference<>();
private final AtomicReference<TrafficCop> trafficCopRef = new AtomicReference<>();
private QueryMetrics<? super Query<T>> queryMetrics;
private long responseStartTimeNs;
private QueryMetrics<? super Query<T>> acquireResponseMetrics() {
if (queryMetrics == null) {
queryMetrics = toolChest.makeMetrics(query);
queryMetrics.server(host);
}
return queryMetrics;
}
/**
* Queue a buffer. Returns true if we should keep reading, false otherwise.
*/
private boolean enqueue(ChannelBuffer buffer, long chunkNum) throws InterruptedException {
// Increment queuedByteCount before queueing the object, so queuedByteCount is at least as high as
// the actual number of queued bytes at any particular time.
final InputStreamHolder holder = InputStreamHolder.fromChannelBuffer(buffer, chunkNum);
final long currentQueuedByteCount = queuedByteCount.addAndGet(holder.getLength());
queue.put(holder);
// True if we should keep reading.
return !usingBackpressure || currentQueuedByteCount < maxQueuedBytes;
}
private InputStream dequeue() throws InterruptedException {
final InputStreamHolder holder = queue.poll(checkQueryTimeout(), TimeUnit.MILLISECONDS);
if (holder == null) {
throw new QueryTimeoutException(StringUtils.nonStrictFormat("Query[%s] url[%s] timed out.", query.getId(), url));
}
final long currentQueuedByteCount = queuedByteCount.addAndGet(-holder.getLength());
if (usingBackpressure && currentQueuedByteCount < maxQueuedBytes) {
long backPressureTime = Preconditions.checkNotNull(trafficCopRef.get(), "No TrafficCop, how can this be?").resume(holder.getChunkNum());
channelSuspendedTime.addAndGet(backPressureTime);
}
return holder.getStream();
}
@Override
public ClientResponse<InputStream> handleResponse(HttpResponse response, TrafficCop trafficCop) {
trafficCopRef.set(trafficCop);
checkQueryTimeout();
checkTotalBytesLimit(response.getContent().readableBytes());
log.debug("Initial response from url[%s] for queryId[%s]", url, query.getId());
responseStartTimeNs = System.nanoTime();
acquireResponseMetrics().reportNodeTimeToFirstByte(responseStartTimeNs - requestStartTimeNs).emit(emitter);
final boolean continueReading;
try {
log.trace("Got a response from [%s] for query ID[%s], subquery ID[%s]", url, query.getId(), query.getSubQueryId());
final String responseContext = response.headers().get(QueryResource.HEADER_RESPONSE_CONTEXT);
context.addRemainingResponse(query.getMostSpecificId(), VAL_TO_REDUCE_REMAINING_RESPONSES);
// context may be null in case of error or query timeout
if (responseContext != null) {
context.merge(ResponseContext.deserialize(responseContext, objectMapper));
}
continueReading = enqueue(response.getContent(), 0L);
} catch (final IOException e) {
log.error(e, "Error parsing response context from url [%s]", url);
return ClientResponse.finished(new InputStream() {
@Override
public int read() throws IOException {
throw e;
}
});
} catch (InterruptedException e) {
log.error(e, "Queue appending interrupted");
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
totalByteCount.addAndGet(response.getContent().readableBytes());
return ClientResponse.finished(new SequenceInputStream(new Enumeration<InputStream>() {
@Override
public boolean hasMoreElements() {
if (fail.get() != null) {
throw new RE(fail.get());
}
checkQueryTimeout();
// Then the stream should be spouting good InputStreams.
synchronized (done) {
return !done.get() || !queue.isEmpty();
}
}
@Override
public InputStream nextElement() {
if (fail.get() != null) {
throw new RE(fail.get());
}
try {
return dequeue();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
}), continueReading);
}
@Override
public ClientResponse<InputStream> handleChunk(ClientResponse<InputStream> clientResponse, HttpChunk chunk, long chunkNum) {
checkQueryTimeout();
final ChannelBuffer channelBuffer = chunk.getContent();
final int bytes = channelBuffer.readableBytes();
checkTotalBytesLimit(bytes);
boolean continueReading = true;
if (bytes > 0) {
try {
continueReading = enqueue(channelBuffer, chunkNum);
} catch (InterruptedException e) {
log.error(e, "Unable to put finalizing input stream into Sequence queue for url [%s]", url);
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
totalByteCount.addAndGet(bytes);
}
return ClientResponse.finished(clientResponse.getObj(), continueReading);
}
@Override
public ClientResponse<InputStream> done(ClientResponse<InputStream> clientResponse) {
long stopTimeNs = System.nanoTime();
long nodeTimeNs = stopTimeNs - requestStartTimeNs;
final long nodeTimeMs = TimeUnit.NANOSECONDS.toMillis(nodeTimeNs);
log.debug("Completed queryId[%s] request to url[%s] with %,d bytes returned in %,d millis [%,f b/s].", query.getId(), url, totalByteCount.get(), nodeTimeMs, // Floating math; division by zero will yield Inf, not exception
totalByteCount.get() / (0.001 * nodeTimeMs));
QueryMetrics<? super Query<T>> responseMetrics = acquireResponseMetrics();
responseMetrics.reportNodeTime(nodeTimeNs);
responseMetrics.reportNodeBytes(totalByteCount.get());
if (usingBackpressure) {
responseMetrics.reportBackPressureTime(channelSuspendedTime.get());
}
responseMetrics.emit(emitter);
synchronized (done) {
try {
// An empty byte array is put at the end to give the SequenceInputStream.close() as something to close out
// after done is set to true, regardless of the rest of the stream's state.
queue.put(InputStreamHolder.fromChannelBuffer(ChannelBuffers.EMPTY_BUFFER, Long.MAX_VALUE));
} catch (InterruptedException e) {
log.error(e, "Unable to put finalizing input stream into Sequence queue for url [%s]", url);
Thread.currentThread().interrupt();
throw new RuntimeException(e);
} finally {
done.set(true);
}
}
return ClientResponse.finished(clientResponse.getObj());
}
@Override
public void exceptionCaught(final ClientResponse<InputStream> clientResponse, final Throwable e) {
String msg = StringUtils.format("Query[%s] url[%s] failed with exception msg [%s]", query.getId(), url, e.getMessage());
setupResponseReadFailure(msg, e);
}
private void setupResponseReadFailure(String msg, Throwable th) {
fail.set(msg);
queue.clear();
queue.offer(InputStreamHolder.fromStream(new InputStream() {
@Override
public int read() throws IOException {
if (th != null) {
throw new IOException(msg, th);
} else {
throw new IOException(msg);
}
}
}, -1, 0));
}
// Returns remaining timeout or throws exception if timeout already elapsed.
private long checkQueryTimeout() {
long timeLeft = timeoutAt - System.currentTimeMillis();
if (timeLeft <= 0) {
String msg = StringUtils.format("Query[%s] url[%s] timed out.", query.getId(), url);
setupResponseReadFailure(msg, null);
throw new QueryTimeoutException(msg);
} else {
return timeLeft;
}
}
private void checkTotalBytesLimit(long bytes) {
if (maxScatterGatherBytes < Long.MAX_VALUE && totalBytesGathered.addAndGet(bytes) > maxScatterGatherBytes) {
String msg = StringUtils.format("Query[%s] url[%s] max scatter-gather bytes limit reached.", query.getId(), url);
setupResponseReadFailure(msg, null);
throw new ResourceLimitExceededException(msg);
}
}
};
long timeLeft = timeoutAt - System.currentTimeMillis();
if (timeLeft <= 0) {
throw new QueryTimeoutException(StringUtils.nonStrictFormat("Query[%s] url[%s] timed out.", query.getId(), url));
}
future = httpClient.go(new Request(HttpMethod.POST, new URL(url)).setContent(objectMapper.writeValueAsBytes(QueryContexts.withTimeout(query, timeLeft))).setHeader(HttpHeaders.Names.CONTENT_TYPE, isSmile ? SmileMediaTypes.APPLICATION_JACKSON_SMILE : MediaType.APPLICATION_JSON), responseHandler, Duration.millis(timeLeft));
queryWatcher.registerQueryFuture(query, future);
openConnections.getAndIncrement();
Futures.addCallback(future, new FutureCallback<InputStream>() {
@Override
public void onSuccess(InputStream result) {
openConnections.getAndDecrement();
}
@Override
public void onFailure(Throwable t) {
openConnections.getAndDecrement();
if (future.isCancelled()) {
cancelQuery(query, cancelUrl);
}
}
}, // The callback is non-blocking and quick, so it's OK to schedule it using directExecutor()
Execs.directExecutor());
} catch (IOException e) {
throw new RuntimeException(e);
}
Sequence<T> retVal = new BaseSequence<>(new BaseSequence.IteratorMaker<T, JsonParserIterator<T>>() {
@Override
public JsonParserIterator<T> make() {
return new JsonParserIterator<T>(queryResultType, future, url, query, host, toolChest.decorateObjectMapper(objectMapper, query));
}
@Override
public void cleanup(JsonParserIterator<T> iterFromMake) {
CloseableUtils.closeAndWrapExceptions(iterFromMake);
}
});
// avoid the cost of de-serializing and then re-serializing again when adding to cache
if (!isBySegment) {
retVal = Sequences.map(retVal, toolChest.makePreComputeManipulatorFn(query, MetricManipulatorFns.deserializing()));
}
return retVal;
}
use of org.apache.druid.java.util.http.client.Request in project druid by druid-io.
the class DruidLeaderClientTest method testSimple.
@Test
public void testSimple() throws Exception {
DruidNodeDiscovery druidNodeDiscovery = EasyMock.createMock(DruidNodeDiscovery.class);
EasyMock.expect(druidNodeDiscovery.getAllNodes()).andReturn(ImmutableList.of(discoveryDruidNode));
DruidNodeDiscoveryProvider druidNodeDiscoveryProvider = EasyMock.createMock(DruidNodeDiscoveryProvider.class);
EasyMock.expect(druidNodeDiscoveryProvider.getForNodeRole(NodeRole.PEON)).andReturn(druidNodeDiscovery);
EasyMock.replay(druidNodeDiscovery, druidNodeDiscoveryProvider);
DruidLeaderClient druidLeaderClient = new DruidLeaderClient(httpClient, druidNodeDiscoveryProvider, NodeRole.PEON, "/simple/leader");
druidLeaderClient.start();
Request request = druidLeaderClient.makeRequest(HttpMethod.POST, "/simple/direct");
request.setContent("hello".getBytes(StandardCharsets.UTF_8));
Assert.assertEquals("hello", druidLeaderClient.go(request).getContent());
}
use of org.apache.druid.java.util.http.client.Request in project druid by druid-io.
the class IndexTaskClient method submitRequest.
/**
* Sends an HTTP request to the task of the specified {@code taskId} and returns a response if it succeeded.
*/
protected <IntermediateType, FinalType> FinalType submitRequest(String taskId, // nullable if content is empty
@Nullable String mediaType, HttpMethod method, String encodedPathSuffix, @Nullable String encodedQueryString, byte[] content, HttpResponseHandler<IntermediateType, FinalType> responseHandler, boolean retry) throws IOException, ChannelException, NoTaskLocationException {
final RetryPolicy retryPolicy = retryPolicyFactory.makeRetryPolicy();
while (true) {
String path = StringUtils.format("%s/%s/%s", BASE_PATH, StringUtils.urlEncode(taskId), encodedPathSuffix);
Optional<TaskStatus> status = taskInfoProvider.getTaskStatus(taskId);
if (!status.isPresent() || !status.get().isRunnable()) {
throw new TaskNotRunnableException(StringUtils.format("Aborting request because task [%s] is not runnable", taskId));
}
final TaskLocation location = taskInfoProvider.getTaskLocation(taskId);
if (location.equals(TaskLocation.unknown())) {
throw new NoTaskLocationException(StringUtils.format("No TaskLocation available for task [%s]", taskId));
}
final Request request = createRequest(taskId, location, path, encodedQueryString, method, mediaType, content);
Either<StringFullResponseHolder, FinalType> response = null;
try {
// Netty throws some annoying exceptions if a connection can't be opened, which happens relatively frequently
// for tasks that happen to still be starting up, so test the connection first to keep the logs clean.
checkConnection(request.getUrl().getHost(), request.getUrl().getPort());
response = submitRequest(request, responseHandler);
if (response.isValue()) {
return response.valueOrThrow();
} else {
final StringBuilder exceptionMessage = new StringBuilder();
final HttpResponseStatus httpResponseStatus = response.error().getStatus();
final String httpResponseContent = response.error().getContent();
exceptionMessage.append("Received server error with status [").append(httpResponseStatus).append("]");
if (!Strings.isNullOrEmpty(httpResponseContent)) {
final String choppedMessage = StringUtils.chop(StringUtils.nullToEmptyNonDruidDataString(httpResponseContent), 1000);
exceptionMessage.append("; first 1KB of body: ").append(choppedMessage);
}
if (httpResponseStatus.getCode() == 400) {
// don't bother retrying if it's a bad request
throw new IAE(exceptionMessage.toString());
} else {
throw new IOE(exceptionMessage.toString());
}
}
} catch (IOException | ChannelException e) {
// Since workers are free to move tasks around to different ports, there is a chance that a task may have been
// moved but our view of its location has not been updated yet from ZK. To detect this case, we send a header
// identifying our expected recipient in the request; if this doesn't correspond to the worker we messaged, the
// worker will return an HTTP 404 with its ID in the response header. If we get a mismatching task ID, then
// we will wait for a short period then retry the request indefinitely, expecting the task's location to
// eventually be updated.
final Duration delay;
if (response != null && !response.isValue() && response.error().getStatus().equals(HttpResponseStatus.NOT_FOUND)) {
String headerId = StringUtils.urlDecode(response.error().getResponse().headers().get(ChatHandlerResource.TASK_ID_HEADER));
if (headerId != null && !headerId.equals(taskId)) {
log.warn("Expected worker to have taskId [%s] but has taskId [%s], will retry in [%d]s", taskId, headerId, TASK_MISMATCH_RETRY_DELAY_SECONDS);
delay = Duration.standardSeconds(TASK_MISMATCH_RETRY_DELAY_SECONDS);
} else {
delay = retryPolicy.getAndIncrementRetryDelay();
}
} else {
delay = retryPolicy.getAndIncrementRetryDelay();
}
final String urlForLog = request.getUrl().toString();
if (!retry) {
// if retry=false, we probably aren't too concerned if the operation doesn't succeed (i.e. the request was
// for informational purposes only); log at INFO instead of WARN.
log.noStackTrace().info(e, "submitRequest failed for [%s]", urlForLog);
throw e;
} else if (delay == null) {
// When retrying, log the final failure at WARN level, since it is likely to be bad news.
log.warn(e, "submitRequest failed for [%s]", urlForLog);
throw e;
} else {
try {
final long sleepTime = delay.getMillis();
// When retrying, log non-final failures at INFO level.
log.noStackTrace().info(e, "submitRequest failed for [%s]; will try again in [%s]", urlForLog, new Duration(sleepTime).toString());
Thread.sleep(sleepTime);
} catch (InterruptedException e2) {
Thread.currentThread().interrupt();
e.addSuppressed(e2);
throw new RuntimeException(e);
}
}
} catch (NoTaskLocationException e) {
log.info("No TaskLocation available for task [%s], this task may not have been assigned to a worker yet " + "or may have already completed", taskId);
throw e;
} catch (Exception e) {
log.warn(e, "Exception while sending request");
throw e;
}
}
}
Aggregations