use of org.apache.druid.java.util.http.client.response.ClientResponse in project druid by druid-io.
the class DirectDruidClient method run.
@Override
public Sequence<T> run(final QueryPlus<T> queryPlus, final ResponseContext context) {
final Query<T> query = queryPlus.getQuery();
QueryToolChest<T, Query<T>> toolChest = warehouse.getToolChest(query);
boolean isBySegment = QueryContexts.isBySegment(query);
final JavaType queryResultType = isBySegment ? toolChest.getBySegmentResultType() : toolChest.getBaseResultType();
final ListenableFuture<InputStream> future;
final String url = scheme + "://" + host + "/druid/v2/";
final String cancelUrl = url + query.getId();
try {
log.debug("Querying queryId[%s] url[%s]", query.getId(), url);
final long requestStartTimeNs = System.nanoTime();
final long timeoutAt = query.getContextValue(QUERY_FAIL_TIME);
final long maxScatterGatherBytes = QueryContexts.getMaxScatterGatherBytes(query);
final AtomicLong totalBytesGathered = context.getTotalBytes();
final long maxQueuedBytes = QueryContexts.getMaxQueuedBytes(query, 0);
final boolean usingBackpressure = maxQueuedBytes > 0;
final HttpResponseHandler<InputStream, InputStream> responseHandler = new HttpResponseHandler<InputStream, InputStream>() {
private final AtomicLong totalByteCount = new AtomicLong(0);
private final AtomicLong queuedByteCount = new AtomicLong(0);
private final AtomicLong channelSuspendedTime = new AtomicLong(0);
private final BlockingQueue<InputStreamHolder> queue = new LinkedBlockingQueue<>();
private final AtomicBoolean done = new AtomicBoolean(false);
private final AtomicReference<String> fail = new AtomicReference<>();
private final AtomicReference<TrafficCop> trafficCopRef = new AtomicReference<>();
private QueryMetrics<? super Query<T>> queryMetrics;
private long responseStartTimeNs;
private QueryMetrics<? super Query<T>> acquireResponseMetrics() {
if (queryMetrics == null) {
queryMetrics = toolChest.makeMetrics(query);
queryMetrics.server(host);
}
return queryMetrics;
}
/**
* Queue a buffer. Returns true if we should keep reading, false otherwise.
*/
private boolean enqueue(ChannelBuffer buffer, long chunkNum) throws InterruptedException {
// Increment queuedByteCount before queueing the object, so queuedByteCount is at least as high as
// the actual number of queued bytes at any particular time.
final InputStreamHolder holder = InputStreamHolder.fromChannelBuffer(buffer, chunkNum);
final long currentQueuedByteCount = queuedByteCount.addAndGet(holder.getLength());
queue.put(holder);
// True if we should keep reading.
return !usingBackpressure || currentQueuedByteCount < maxQueuedBytes;
}
private InputStream dequeue() throws InterruptedException {
final InputStreamHolder holder = queue.poll(checkQueryTimeout(), TimeUnit.MILLISECONDS);
if (holder == null) {
throw new QueryTimeoutException(StringUtils.nonStrictFormat("Query[%s] url[%s] timed out.", query.getId(), url));
}
final long currentQueuedByteCount = queuedByteCount.addAndGet(-holder.getLength());
if (usingBackpressure && currentQueuedByteCount < maxQueuedBytes) {
long backPressureTime = Preconditions.checkNotNull(trafficCopRef.get(), "No TrafficCop, how can this be?").resume(holder.getChunkNum());
channelSuspendedTime.addAndGet(backPressureTime);
}
return holder.getStream();
}
@Override
public ClientResponse<InputStream> handleResponse(HttpResponse response, TrafficCop trafficCop) {
trafficCopRef.set(trafficCop);
checkQueryTimeout();
checkTotalBytesLimit(response.getContent().readableBytes());
log.debug("Initial response from url[%s] for queryId[%s]", url, query.getId());
responseStartTimeNs = System.nanoTime();
acquireResponseMetrics().reportNodeTimeToFirstByte(responseStartTimeNs - requestStartTimeNs).emit(emitter);
final boolean continueReading;
try {
log.trace("Got a response from [%s] for query ID[%s], subquery ID[%s]", url, query.getId(), query.getSubQueryId());
final String responseContext = response.headers().get(QueryResource.HEADER_RESPONSE_CONTEXT);
context.addRemainingResponse(query.getMostSpecificId(), VAL_TO_REDUCE_REMAINING_RESPONSES);
// context may be null in case of error or query timeout
if (responseContext != null) {
context.merge(ResponseContext.deserialize(responseContext, objectMapper));
}
continueReading = enqueue(response.getContent(), 0L);
} catch (final IOException e) {
log.error(e, "Error parsing response context from url [%s]", url);
return ClientResponse.finished(new InputStream() {
@Override
public int read() throws IOException {
throw e;
}
});
} catch (InterruptedException e) {
log.error(e, "Queue appending interrupted");
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
totalByteCount.addAndGet(response.getContent().readableBytes());
return ClientResponse.finished(new SequenceInputStream(new Enumeration<InputStream>() {
@Override
public boolean hasMoreElements() {
if (fail.get() != null) {
throw new RE(fail.get());
}
checkQueryTimeout();
// Then the stream should be spouting good InputStreams.
synchronized (done) {
return !done.get() || !queue.isEmpty();
}
}
@Override
public InputStream nextElement() {
if (fail.get() != null) {
throw new RE(fail.get());
}
try {
return dequeue();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
}), continueReading);
}
@Override
public ClientResponse<InputStream> handleChunk(ClientResponse<InputStream> clientResponse, HttpChunk chunk, long chunkNum) {
checkQueryTimeout();
final ChannelBuffer channelBuffer = chunk.getContent();
final int bytes = channelBuffer.readableBytes();
checkTotalBytesLimit(bytes);
boolean continueReading = true;
if (bytes > 0) {
try {
continueReading = enqueue(channelBuffer, chunkNum);
} catch (InterruptedException e) {
log.error(e, "Unable to put finalizing input stream into Sequence queue for url [%s]", url);
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
totalByteCount.addAndGet(bytes);
}
return ClientResponse.finished(clientResponse.getObj(), continueReading);
}
@Override
public ClientResponse<InputStream> done(ClientResponse<InputStream> clientResponse) {
long stopTimeNs = System.nanoTime();
long nodeTimeNs = stopTimeNs - requestStartTimeNs;
final long nodeTimeMs = TimeUnit.NANOSECONDS.toMillis(nodeTimeNs);
log.debug("Completed queryId[%s] request to url[%s] with %,d bytes returned in %,d millis [%,f b/s].", query.getId(), url, totalByteCount.get(), nodeTimeMs, // Floating math; division by zero will yield Inf, not exception
totalByteCount.get() / (0.001 * nodeTimeMs));
QueryMetrics<? super Query<T>> responseMetrics = acquireResponseMetrics();
responseMetrics.reportNodeTime(nodeTimeNs);
responseMetrics.reportNodeBytes(totalByteCount.get());
if (usingBackpressure) {
responseMetrics.reportBackPressureTime(channelSuspendedTime.get());
}
responseMetrics.emit(emitter);
synchronized (done) {
try {
// An empty byte array is put at the end to give the SequenceInputStream.close() as something to close out
// after done is set to true, regardless of the rest of the stream's state.
queue.put(InputStreamHolder.fromChannelBuffer(ChannelBuffers.EMPTY_BUFFER, Long.MAX_VALUE));
} catch (InterruptedException e) {
log.error(e, "Unable to put finalizing input stream into Sequence queue for url [%s]", url);
Thread.currentThread().interrupt();
throw new RuntimeException(e);
} finally {
done.set(true);
}
}
return ClientResponse.finished(clientResponse.getObj());
}
@Override
public void exceptionCaught(final ClientResponse<InputStream> clientResponse, final Throwable e) {
String msg = StringUtils.format("Query[%s] url[%s] failed with exception msg [%s]", query.getId(), url, e.getMessage());
setupResponseReadFailure(msg, e);
}
private void setupResponseReadFailure(String msg, Throwable th) {
fail.set(msg);
queue.clear();
queue.offer(InputStreamHolder.fromStream(new InputStream() {
@Override
public int read() throws IOException {
if (th != null) {
throw new IOException(msg, th);
} else {
throw new IOException(msg);
}
}
}, -1, 0));
}
// Returns remaining timeout or throws exception if timeout already elapsed.
private long checkQueryTimeout() {
long timeLeft = timeoutAt - System.currentTimeMillis();
if (timeLeft <= 0) {
String msg = StringUtils.format("Query[%s] url[%s] timed out.", query.getId(), url);
setupResponseReadFailure(msg, null);
throw new QueryTimeoutException(msg);
} else {
return timeLeft;
}
}
private void checkTotalBytesLimit(long bytes) {
if (maxScatterGatherBytes < Long.MAX_VALUE && totalBytesGathered.addAndGet(bytes) > maxScatterGatherBytes) {
String msg = StringUtils.format("Query[%s] url[%s] max scatter-gather bytes limit reached.", query.getId(), url);
setupResponseReadFailure(msg, null);
throw new ResourceLimitExceededException(msg);
}
}
};
long timeLeft = timeoutAt - System.currentTimeMillis();
if (timeLeft <= 0) {
throw new QueryTimeoutException(StringUtils.nonStrictFormat("Query[%s] url[%s] timed out.", query.getId(), url));
}
future = httpClient.go(new Request(HttpMethod.POST, new URL(url)).setContent(objectMapper.writeValueAsBytes(QueryContexts.withTimeout(query, timeLeft))).setHeader(HttpHeaders.Names.CONTENT_TYPE, isSmile ? SmileMediaTypes.APPLICATION_JACKSON_SMILE : MediaType.APPLICATION_JSON), responseHandler, Duration.millis(timeLeft));
queryWatcher.registerQueryFuture(query, future);
openConnections.getAndIncrement();
Futures.addCallback(future, new FutureCallback<InputStream>() {
@Override
public void onSuccess(InputStream result) {
openConnections.getAndDecrement();
}
@Override
public void onFailure(Throwable t) {
openConnections.getAndDecrement();
if (future.isCancelled()) {
cancelQuery(query, cancelUrl);
}
}
}, // The callback is non-blocking and quick, so it's OK to schedule it using directExecutor()
Execs.directExecutor());
} catch (IOException e) {
throw new RuntimeException(e);
}
Sequence<T> retVal = new BaseSequence<>(new BaseSequence.IteratorMaker<T, JsonParserIterator<T>>() {
@Override
public JsonParserIterator<T> make() {
return new JsonParserIterator<T>(queryResultType, future, url, query, host, toolChest.decorateObjectMapper(objectMapper, query));
}
@Override
public void cleanup(JsonParserIterator<T> iterFromMake) {
CloseableUtils.closeAndWrapExceptions(iterFromMake);
}
});
// avoid the cost of de-serializing and then re-serializing again when adding to cache
if (!isBySegment) {
retVal = Sequences.map(retVal, toolChest.makePreComputeManipulatorFn(query, MetricManipulatorFns.deserializing()));
}
return retVal;
}
use of org.apache.druid.java.util.http.client.response.ClientResponse in project druid by druid-io.
the class NettyHttpClient method go.
@Override
public <Intermediate, Final> ListenableFuture<Final> go(final Request request, final HttpResponseHandler<Intermediate, Final> handler, final Duration requestReadTimeout) {
final HttpMethod method = request.getMethod();
final URL url = request.getUrl();
final Multimap<String, String> headers = request.getHeaders();
final String requestDesc = method + " " + url;
if (log.isDebugEnabled()) {
log.debug("[%s] starting", requestDesc);
}
// Block while acquiring a channel from the pool, then complete the request asynchronously.
final Channel channel;
final String hostKey = getPoolKey(url);
final ResourceContainer<ChannelFuture> channelResourceContainer = pool.take(hostKey);
final ChannelFuture channelFuture = channelResourceContainer.get().awaitUninterruptibly();
if (!channelFuture.isSuccess()) {
// Some other poor sap will have to deal with it...
channelResourceContainer.returnResource();
return Futures.immediateFailedFuture(new ChannelException("Faulty channel in resource pool", channelFuture.getCause()));
} else {
channel = channelFuture.getChannel();
// In case we get a channel that never had its readability turned back on.
channel.setReadable(true);
}
final String urlFile = StringUtils.nullToEmptyNonDruidDataString(url.getFile());
final HttpRequest httpRequest = new DefaultHttpRequest(HttpVersion.HTTP_1_1, method, urlFile.isEmpty() ? "/" : urlFile);
if (!headers.containsKey(HttpHeaders.Names.HOST)) {
httpRequest.headers().add(HttpHeaders.Names.HOST, getHost(url));
}
// If Accept-Encoding is set in the Request, use that. Otherwise use the default from "compressionCodec".
if (!headers.containsKey(HttpHeaders.Names.ACCEPT_ENCODING)) {
httpRequest.headers().set(HttpHeaders.Names.ACCEPT_ENCODING, compressionCodec.getEncodingString());
}
for (Map.Entry<String, Collection<String>> entry : headers.asMap().entrySet()) {
String key = entry.getKey();
for (String obj : entry.getValue()) {
httpRequest.headers().add(key, obj);
}
}
if (request.hasContent()) {
httpRequest.setContent(request.getContent());
}
final long readTimeout = getReadTimeout(requestReadTimeout);
final SettableFuture<Final> retVal = SettableFuture.create();
if (readTimeout > 0) {
channel.getPipeline().addLast(READ_TIMEOUT_HANDLER_NAME, new ReadTimeoutHandler(timer, readTimeout, TimeUnit.MILLISECONDS));
}
channel.getPipeline().addLast(LAST_HANDLER_NAME, new SimpleChannelUpstreamHandler() {
private volatile ClientResponse<Intermediate> response = null;
// Chunk number most recently assigned.
private long currentChunkNum = 0;
// Suspend and resume watermarks (respectively: last chunk number that triggered a suspend, and that was
// provided to the TrafficCop's resume method). Synchronized access since they are not always accessed
// from an I/O thread. (TrafficCops can be called from any thread.)
private final Object watermarkLock = new Object();
private long suspendWatermark = -1;
private long resumeWatermark = -1;
@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) {
if (log.isDebugEnabled()) {
log.debug("[%s] messageReceived: %s", requestDesc, e.getMessage());
}
try {
Object msg = e.getMessage();
if (msg instanceof HttpResponse) {
HttpResponse httpResponse = (HttpResponse) msg;
if (log.isDebugEnabled()) {
log.debug("[%s] Got response: %s", requestDesc, httpResponse.getStatus());
}
HttpResponseHandler.TrafficCop trafficCop = resumeChunkNum -> {
synchronized (watermarkLock) {
resumeWatermark = Math.max(resumeWatermark, resumeChunkNum);
if (suspendWatermark >= 0 && resumeWatermark >= suspendWatermark) {
suspendWatermark = -1;
channel.setReadable(true);
long backPressureDuration = System.nanoTime() - backPressureStartTimeNs;
log.debug("[%s] Resumed reads from channel (chunkNum = %,d).", requestDesc, resumeChunkNum);
return backPressureDuration;
}
}
// If we didn't resume, don't know if backpressure was happening
return 0;
};
response = handler.handleResponse(httpResponse, trafficCop);
if (response.isFinished()) {
retVal.set((Final) response.getObj());
}
assert currentChunkNum == 0;
possiblySuspendReads(response);
if (!httpResponse.isChunked()) {
finishRequest();
}
} else if (msg instanceof HttpChunk) {
HttpChunk httpChunk = (HttpChunk) msg;
if (log.isDebugEnabled()) {
log.debug("[%s] Got chunk: %sB, last=%s", requestDesc, httpChunk.getContent().readableBytes(), httpChunk.isLast());
}
if (httpChunk.isLast()) {
finishRequest();
} else {
response = handler.handleChunk(response, httpChunk, ++currentChunkNum);
if (response.isFinished() && !retVal.isDone()) {
retVal.set((Final) response.getObj());
}
possiblySuspendReads(response);
}
} else {
throw new ISE("Unknown message type[%s]", msg.getClass());
}
} catch (Exception ex) {
log.warn(ex, "[%s] Exception thrown while processing message, closing channel.", requestDesc);
if (!retVal.isDone()) {
retVal.set(null);
}
channel.close();
channelResourceContainer.returnResource();
throw ex;
}
}
private void possiblySuspendReads(ClientResponse<?> response) {
if (!response.isContinueReading()) {
synchronized (watermarkLock) {
suspendWatermark = Math.max(suspendWatermark, currentChunkNum);
if (suspendWatermark > resumeWatermark) {
channel.setReadable(false);
backPressureStartTimeNs = System.nanoTime();
log.debug("[%s] Suspended reads from channel (chunkNum = %,d).", requestDesc, currentChunkNum);
}
}
}
}
private void finishRequest() {
ClientResponse<Final> finalResponse = handler.done(response);
if (!finalResponse.isFinished() || !finalResponse.isContinueReading()) {
throw new ISE("[%s] Didn't get a completed ClientResponse Object from [%s] (finished = %s, continueReading = %s)", requestDesc, handler.getClass(), finalResponse.isFinished(), finalResponse.isContinueReading());
}
if (!retVal.isDone()) {
retVal.set(finalResponse.getObj());
}
removeHandlers();
channel.setReadable(true);
channelResourceContainer.returnResource();
}
@Override
public void exceptionCaught(ChannelHandlerContext context, ExceptionEvent event) {
if (log.isDebugEnabled()) {
final Throwable cause = event.getCause();
if (cause == null) {
log.debug("[%s] Caught exception", requestDesc);
} else {
log.debug(cause, "[%s] Caught exception", requestDesc);
}
}
retVal.setException(event.getCause());
// response is non-null if we received initial chunk and then exception occurs
if (response != null) {
handler.exceptionCaught(response, event.getCause());
}
try {
if (channel.isOpen()) {
channel.close();
}
} catch (Exception e) {
log.warn(e, "Error while closing channel");
} finally {
channelResourceContainer.returnResource();
}
}
@Override
public void channelDisconnected(ChannelHandlerContext context, ChannelStateEvent event) {
if (log.isDebugEnabled()) {
log.debug("[%s] Channel disconnected", requestDesc);
}
// response is non-null if we received initial chunk and then exception occurs
if (response != null) {
handler.exceptionCaught(response, new ChannelException("Channel disconnected"));
}
channel.close();
channelResourceContainer.returnResource();
if (!retVal.isDone()) {
log.warn("[%s] Channel disconnected before response complete", requestDesc);
retVal.setException(new ChannelException("Channel disconnected"));
}
}
private void removeHandlers() {
if (readTimeout > 0) {
channel.getPipeline().remove(READ_TIMEOUT_HANDLER_NAME);
}
channel.getPipeline().remove(LAST_HANDLER_NAME);
}
});
channel.write(httpRequest).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) {
if (!future.isSuccess()) {
channel.close();
channelResourceContainer.returnResource();
if (!retVal.isDone()) {
retVal.setException(new ChannelException(StringUtils.format("[%s] Failed to write request to channel", requestDesc), future.getCause()));
}
}
}
});
return retVal;
}
Aggregations