use of com.metamx.http.client.Request in project druid by druid-io.
the class KafkaIndexTaskClient method submitRequest.
private FullResponseHolder submitRequest(String id, HttpMethod method, String pathSuffix, String query, byte[] content, boolean retry) {
final RetryPolicy retryPolicy = retryPolicyFactory.makeRetryPolicy();
while (true) {
FullResponseHolder response = null;
Request request = null;
TaskLocation location = TaskLocation.unknown();
String path = String.format("%s/%s/%s", BASE_PATH, id, pathSuffix);
Optional<TaskStatus> status = taskInfoProvider.getTaskStatus(id);
if (!status.isPresent() || !status.get().isRunnable()) {
throw new TaskNotRunnableException(String.format("Aborting request because task [%s] is not runnable", id));
}
try {
location = taskInfoProvider.getTaskLocation(id);
if (location.equals(TaskLocation.unknown())) {
throw new NoTaskLocationException(String.format("No TaskLocation available for task [%s]", id));
}
// Netty throws some annoying exceptions if a connection can't be opened, which happens relatively frequently
// for tasks that happen to still be starting up, so test the connection first to keep the logs clean.
checkConnection(location.getHost(), location.getPort());
try {
URI serviceUri = new URI("http", null, location.getHost(), location.getPort(), path, query, null);
request = new Request(method, serviceUri.toURL());
// used to validate that we are talking to the correct worker
request.addHeader(ChatHandlerResource.TASK_ID_HEADER, id);
if (content.length > 0) {
request.setContent(MediaType.APPLICATION_JSON, content);
}
log.debug("HTTP %s: %s", method.getName(), serviceUri.toString());
response = httpClient.go(request, new FullResponseHandler(Charsets.UTF_8), httpTimeout).get();
} catch (Exception e) {
Throwables.propagateIfInstanceOf(e.getCause(), IOException.class);
Throwables.propagateIfInstanceOf(e.getCause(), ChannelException.class);
throw Throwables.propagate(e);
}
int responseCode = response.getStatus().getCode();
if (responseCode / 100 == 2) {
return response;
} else if (responseCode == 400) {
// don't bother retrying if it's a bad request
throw new IAE("Received 400 Bad Request with body: %s", response.getContent());
} else {
throw new IOException(String.format("Received status [%d]", responseCode));
}
} catch (IOException | ChannelException e) {
// Since workers are free to move tasks around to different ports, there is a chance that a task may have been
// moved but our view of its location has not been updated yet from ZK. To detect this case, we send a header
// identifying our expected recipient in the request; if this doesn't correspond to the worker we messaged, the
// worker will return an HTTP 404 with its ID in the response header. If we get a mismatching task ID, then
// we will wait for a short period then retry the request indefinitely, expecting the task's location to
// eventually be updated.
final Duration delay;
if (response != null && response.getStatus().equals(HttpResponseStatus.NOT_FOUND)) {
String headerId = response.getResponse().headers().get(ChatHandlerResource.TASK_ID_HEADER);
if (headerId != null && !headerId.equals(id)) {
log.warn("Expected worker to have taskId [%s] but has taskId [%s], will retry in [%d]s", id, headerId, TASK_MISMATCH_RETRY_DELAY_SECONDS);
delay = Duration.standardSeconds(TASK_MISMATCH_RETRY_DELAY_SECONDS);
} else {
delay = retryPolicy.getAndIncrementRetryDelay();
}
} else {
delay = retryPolicy.getAndIncrementRetryDelay();
}
String urlForLog = (request != null ? request.getUrl().toString() : String.format("http://%s:%d%s", location.getHost(), location.getPort(), path));
if (!retry) {
// if retry=false, we probably aren't too concerned if the operation doesn't succeed (i.e. the request was
// for informational purposes only) so don't log a scary stack trace
log.info("submitRequest failed for [%s], with message [%s]", urlForLog, e.getMessage());
Throwables.propagate(e);
} else if (delay == null) {
log.warn(e, "Retries exhausted for [%s], last exception:", urlForLog);
Throwables.propagate(e);
} else {
try {
final long sleepTime = delay.getMillis();
log.debug("Bad response HTTP [%s] from [%s]; will try again in [%s] (body/exception: [%s])", (response != null ? response.getStatus().getCode() : "no response"), urlForLog, new Duration(sleepTime).toString(), (response != null ? response.getContent() : e.getMessage()));
Thread.sleep(sleepTime);
} catch (InterruptedException e2) {
Throwables.propagate(e2);
}
}
} catch (NoTaskLocationException e) {
log.info("No TaskLocation available for task [%s], this task may not have been assigned to a worker yet or " + "may have already completed", id);
throw e;
} catch (Exception e) {
log.warn(e, "Exception while sending request");
throw e;
}
}
}
use of com.metamx.http.client.Request in project druid by druid-io.
the class KafkaIndexTaskClientTest method testGetStatusAsync.
@Test
public void testGetStatusAsync() throws Exception {
final int numRequests = TEST_IDS.size();
Capture<Request> captured = Capture.newInstance(CaptureType.ALL);
expect(responseHolder.getStatus()).andReturn(HttpResponseStatus.OK).anyTimes();
expect(responseHolder.getContent()).andReturn("\"READING\"").anyTimes();
expect(httpClient.go(capture(captured), anyObject(FullResponseHandler.class), eq(TEST_HTTP_TIMEOUT))).andReturn(Futures.immediateFuture(responseHolder)).times(numRequests);
replayAll();
List<URL> expectedUrls = Lists.newArrayList();
List<ListenableFuture<KafkaIndexTask.Status>> futures = Lists.newArrayList();
for (int i = 0; i < numRequests; i++) {
expectedUrls.add(new URL(String.format(URL_FORMATTER, TEST_HOST, TEST_PORT, TEST_IDS.get(i), "status")));
futures.add(client.getStatusAsync(TEST_IDS.get(i)));
}
List<KafkaIndexTask.Status> responses = Futures.allAsList(futures).get();
verifyAll();
List<Request> requests = captured.getValues();
Assert.assertEquals(numRequests, requests.size());
Assert.assertEquals(numRequests, responses.size());
for (int i = 0; i < numRequests; i++) {
Assert.assertEquals(HttpMethod.GET, requests.get(i).getMethod());
Assert.assertTrue("unexpectedURL", expectedUrls.contains(requests.get(i).getUrl()));
Assert.assertEquals(KafkaIndexTask.Status.READING, responses.get(i));
}
}
use of com.metamx.http.client.Request in project druid by druid-io.
the class DirectDruidClient method run.
@Override
public Sequence<T> run(final Query<T> query, final Map<String, Object> context) {
QueryToolChest<T, Query<T>> toolChest = warehouse.getToolChest(query);
boolean isBySegment = BaseQuery.getContextBySegment(query, false);
Pair<JavaType, JavaType> types = typesMap.get(query.getClass());
if (types == null) {
final TypeFactory typeFactory = objectMapper.getTypeFactory();
JavaType baseType = typeFactory.constructType(toolChest.getResultTypeReference());
JavaType bySegmentType = typeFactory.constructParametricType(Result.class, typeFactory.constructParametricType(BySegmentResultValueClass.class, baseType));
types = Pair.of(baseType, bySegmentType);
typesMap.put(query.getClass(), types);
}
final JavaType typeRef;
if (isBySegment) {
typeRef = types.rhs;
} else {
typeRef = types.lhs;
}
final ListenableFuture<InputStream> future;
final String url = String.format("http://%s/druid/v2/", host);
final String cancelUrl = String.format("http://%s/druid/v2/%s", host, query.getId());
try {
log.debug("Querying queryId[%s] url[%s]", query.getId(), url);
final long requestStartTime = System.currentTimeMillis();
final ServiceMetricEvent.Builder builder = toolChest.makeMetricBuilder(query);
builder.setDimension("server", host);
final HttpResponseHandler<InputStream, InputStream> responseHandler = new HttpResponseHandler<InputStream, InputStream>() {
private long responseStartTime;
private final AtomicLong byteCount = new AtomicLong(0);
private final BlockingQueue<InputStream> queue = new LinkedBlockingQueue<>();
private final AtomicBoolean done = new AtomicBoolean(false);
@Override
public ClientResponse<InputStream> handleResponse(HttpResponse response) {
log.debug("Initial response from url[%s] for queryId[%s]", url, query.getId());
responseStartTime = System.currentTimeMillis();
emitter.emit(builder.build("query/node/ttfb", responseStartTime - requestStartTime));
try {
final String responseContext = response.headers().get("X-Druid-Response-Context");
// context may be null in case of error or query timeout
if (responseContext != null) {
context.putAll(objectMapper.<Map<String, Object>>readValue(responseContext, new TypeReference<Map<String, Object>>() {
}));
}
queue.put(new ChannelBufferInputStream(response.getContent()));
} catch (final IOException e) {
log.error(e, "Error parsing response context from url [%s]", url);
return ClientResponse.<InputStream>finished(new InputStream() {
@Override
public int read() throws IOException {
throw e;
}
});
} catch (InterruptedException e) {
log.error(e, "Queue appending interrupted");
Thread.currentThread().interrupt();
throw Throwables.propagate(e);
}
byteCount.addAndGet(response.getContent().readableBytes());
return ClientResponse.<InputStream>finished(new SequenceInputStream(new Enumeration<InputStream>() {
@Override
public boolean hasMoreElements() {
// Then the stream should be spouting good InputStreams.
synchronized (done) {
return !done.get() || !queue.isEmpty();
}
}
@Override
public InputStream nextElement() {
try {
return queue.take();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw Throwables.propagate(e);
}
}
}));
}
@Override
public ClientResponse<InputStream> handleChunk(ClientResponse<InputStream> clientResponse, HttpChunk chunk) {
final ChannelBuffer channelBuffer = chunk.getContent();
final int bytes = channelBuffer.readableBytes();
if (bytes > 0) {
try {
queue.put(new ChannelBufferInputStream(channelBuffer));
} catch (InterruptedException e) {
log.error(e, "Unable to put finalizing input stream into Sequence queue for url [%s]", url);
Thread.currentThread().interrupt();
throw Throwables.propagate(e);
}
byteCount.addAndGet(bytes);
}
return clientResponse;
}
@Override
public ClientResponse<InputStream> done(ClientResponse<InputStream> clientResponse) {
long stopTime = System.currentTimeMillis();
log.debug("Completed queryId[%s] request to url[%s] with %,d bytes returned in %,d millis [%,f b/s].", query.getId(), url, byteCount.get(), stopTime - responseStartTime, byteCount.get() / (0.0001 * (stopTime - responseStartTime)));
emitter.emit(builder.build("query/node/time", stopTime - requestStartTime));
emitter.emit(builder.build("query/node/bytes", byteCount.get()));
synchronized (done) {
try {
// An empty byte array is put at the end to give the SequenceInputStream.close() as something to close out
// after done is set to true, regardless of the rest of the stream's state.
queue.put(ByteSource.empty().openStream());
} catch (InterruptedException e) {
log.error(e, "Unable to put finalizing input stream into Sequence queue for url [%s]", url);
Thread.currentThread().interrupt();
throw Throwables.propagate(e);
} catch (IOException e) {
// This should never happen
throw Throwables.propagate(e);
} finally {
done.set(true);
}
}
return ClientResponse.<InputStream>finished(clientResponse.getObj());
}
@Override
public void exceptionCaught(final ClientResponse<InputStream> clientResponse, final Throwable e) {
// Don't wait for lock in case the lock had something to do with the error
synchronized (done) {
done.set(true);
// Make a best effort to put a zero length buffer into the queue in case something is waiting on the take()
// If nothing is waiting on take(), this will be closed out anyways.
queue.offer(new InputStream() {
@Override
public int read() throws IOException {
throw new IOException(e);
}
});
}
}
};
future = httpClient.go(new Request(HttpMethod.POST, new URL(url)).setContent(objectMapper.writeValueAsBytes(query)).setHeader(HttpHeaders.Names.CONTENT_TYPE, isSmile ? SmileMediaTypes.APPLICATION_JACKSON_SMILE : MediaType.APPLICATION_JSON), responseHandler);
queryWatcher.registerQuery(query, future);
openConnections.getAndIncrement();
Futures.addCallback(future, new FutureCallback<InputStream>() {
@Override
public void onSuccess(InputStream result) {
openConnections.getAndDecrement();
}
@Override
public void onFailure(Throwable t) {
openConnections.getAndDecrement();
if (future.isCancelled()) {
// forward the cancellation to underlying queriable node
try {
StatusResponseHolder res = httpClient.go(new Request(HttpMethod.DELETE, new URL(cancelUrl)).setContent(objectMapper.writeValueAsBytes(query)).setHeader(HttpHeaders.Names.CONTENT_TYPE, isSmile ? SmileMediaTypes.APPLICATION_JACKSON_SMILE : MediaType.APPLICATION_JSON), new StatusResponseHandler(Charsets.UTF_8)).get();
if (res.getStatus().getCode() >= 500) {
throw new RE("Error cancelling query[%s]: queriable node returned status[%d] [%s].", res.getStatus().getCode(), res.getStatus().getReasonPhrase());
}
} catch (IOException | ExecutionException | InterruptedException e) {
Throwables.propagate(e);
}
}
}
});
} catch (IOException e) {
throw Throwables.propagate(e);
}
Sequence<T> retVal = new BaseSequence<>(new BaseSequence.IteratorMaker<T, JsonParserIterator<T>>() {
@Override
public JsonParserIterator<T> make() {
return new JsonParserIterator<T>(typeRef, future, url);
}
@Override
public void cleanup(JsonParserIterator<T> iterFromMake) {
CloseQuietly.close(iterFromMake);
}
});
// avoid the cost of de-serializing and then re-serializing again when adding to cache
if (!isBySegment) {
retVal = Sequences.map(retVal, toolChest.makePreComputeManipulatorFn(query, MetricManipulatorFns.deserializing()));
}
return retVal;
}
use of com.metamx.http.client.Request in project druid by druid-io.
the class LookupCoordinatorManager method updateAllOnHost.
void updateAllOnHost(final URL url, Map<String, Map<String, Object>> knownLookups) throws IOException, InterruptedException, ExecutionException {
final AtomicInteger returnCode = new AtomicInteger(0);
final AtomicReference<String> reasonString = new AtomicReference<>(null);
final byte[] bytes;
try {
if (LOG.isDebugEnabled()) {
LOG.debug("Loading up %d lookups to %s", knownLookups.size(), url);
}
bytes = smileMapper.writeValueAsBytes(knownLookups);
} catch (JsonProcessingException e) {
throw Throwables.propagate(e);
}
try (final InputStream result = httpClient.go(new Request(HttpMethod.POST, url).addHeader(HttpHeaders.Names.ACCEPT, SmileMediaTypes.APPLICATION_JACKSON_SMILE).addHeader(HttpHeaders.Names.CONTENT_TYPE, SmileMediaTypes.APPLICATION_JACKSON_SMILE).setContent(bytes), makeResponseHandler(returnCode, reasonString), lookupCoordinatorManagerConfig.getHostUpdateTimeout()).get()) {
if (!httpStatusIsSuccess(returnCode.get())) {
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
try {
StreamUtils.copyAndClose(result, baos);
} catch (IOException e2) {
LOG.warn(e2, "Error reading response");
}
throw new IOException(String.format("Bad update request to [%s] : [%d] : [%s] Response: [%s]", url, returnCode.get(), reasonString.get(), StringUtils.fromUtf8(baos.toByteArray())));
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Update on [%s], Status: %s reason: [%s]", url, returnCode.get(), reasonString.get());
}
final Map<String, Object> resultMap = smileMapper.readValue(result, MAP_STRING_OBJ_TYPE);
final Object missingValuesObject = resultMap.get(LookupModule.FAILED_UPDATES_KEY);
if (null == missingValuesObject) {
throw new IAE("Update result did not have field for [%s]", LookupModule.FAILED_UPDATES_KEY);
}
final Map<String, Object> missingValues = smileMapper.convertValue(missingValuesObject, MAP_STRING_OBJ_TYPE);
if (!missingValues.isEmpty()) {
throw new IAE("Lookups failed to update: %s", smileMapper.writeValueAsString(missingValues.keySet()));
} else {
LOG.debug("Updated all lookups on [%s]", url);
}
}
}
}
use of com.metamx.http.client.Request in project druid by druid-io.
the class CoordinatorRuleManager method poll.
@SuppressWarnings("unchecked")
public void poll() {
try {
String url = getRuleURL();
if (url == null) {
return;
}
FullResponseHolder response = httpClient.go(new Request(HttpMethod.GET, new URL(url)), responseHandler).get();
if (response.getStatus().equals(HttpResponseStatus.FOUND)) {
url = response.getResponse().headers().get("Location");
log.info("Redirecting rule request to [%s]", url);
response = httpClient.go(new Request(HttpMethod.GET, new URL(url)), responseHandler).get();
}
ConcurrentHashMap<String, List<Rule>> newRules = new ConcurrentHashMap<>((Map<String, List<Rule>>) jsonMapper.readValue(response.getContent(), new TypeReference<Map<String, List<Rule>>>() {
}));
log.info("Got [%,d] rules", newRules.size());
rules.set(newRules);
} catch (Exception e) {
log.error(e, "Exception while polling for rules");
}
}
Aggregations