use of org.jboss.netty.handler.codec.http.HttpChunk in project druid by druid-io.
the class DirectDruidClient method run.
@Override
public Sequence<T> run(final Query<T> query, final Map<String, Object> context) {
QueryToolChest<T, Query<T>> toolChest = warehouse.getToolChest(query);
boolean isBySegment = BaseQuery.getContextBySegment(query, false);
Pair<JavaType, JavaType> types = typesMap.get(query.getClass());
if (types == null) {
final TypeFactory typeFactory = objectMapper.getTypeFactory();
JavaType baseType = typeFactory.constructType(toolChest.getResultTypeReference());
JavaType bySegmentType = typeFactory.constructParametricType(Result.class, typeFactory.constructParametricType(BySegmentResultValueClass.class, baseType));
types = Pair.of(baseType, bySegmentType);
typesMap.put(query.getClass(), types);
}
final JavaType typeRef;
if (isBySegment) {
typeRef = types.rhs;
} else {
typeRef = types.lhs;
}
final ListenableFuture<InputStream> future;
final String url = String.format("http://%s/druid/v2/", host);
final String cancelUrl = String.format("http://%s/druid/v2/%s", host, query.getId());
try {
log.debug("Querying queryId[%s] url[%s]", query.getId(), url);
final long requestStartTime = System.currentTimeMillis();
final ServiceMetricEvent.Builder builder = toolChest.makeMetricBuilder(query);
builder.setDimension("server", host);
final HttpResponseHandler<InputStream, InputStream> responseHandler = new HttpResponseHandler<InputStream, InputStream>() {
private long responseStartTime;
private final AtomicLong byteCount = new AtomicLong(0);
private final BlockingQueue<InputStream> queue = new LinkedBlockingQueue<>();
private final AtomicBoolean done = new AtomicBoolean(false);
@Override
public ClientResponse<InputStream> handleResponse(HttpResponse response) {
log.debug("Initial response from url[%s] for queryId[%s]", url, query.getId());
responseStartTime = System.currentTimeMillis();
emitter.emit(builder.build("query/node/ttfb", responseStartTime - requestStartTime));
try {
final String responseContext = response.headers().get("X-Druid-Response-Context");
// context may be null in case of error or query timeout
if (responseContext != null) {
context.putAll(objectMapper.<Map<String, Object>>readValue(responseContext, new TypeReference<Map<String, Object>>() {
}));
}
queue.put(new ChannelBufferInputStream(response.getContent()));
} catch (final IOException e) {
log.error(e, "Error parsing response context from url [%s]", url);
return ClientResponse.<InputStream>finished(new InputStream() {
@Override
public int read() throws IOException {
throw e;
}
});
} catch (InterruptedException e) {
log.error(e, "Queue appending interrupted");
Thread.currentThread().interrupt();
throw Throwables.propagate(e);
}
byteCount.addAndGet(response.getContent().readableBytes());
return ClientResponse.<InputStream>finished(new SequenceInputStream(new Enumeration<InputStream>() {
@Override
public boolean hasMoreElements() {
// Then the stream should be spouting good InputStreams.
synchronized (done) {
return !done.get() || !queue.isEmpty();
}
}
@Override
public InputStream nextElement() {
try {
return queue.take();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw Throwables.propagate(e);
}
}
}));
}
@Override
public ClientResponse<InputStream> handleChunk(ClientResponse<InputStream> clientResponse, HttpChunk chunk) {
final ChannelBuffer channelBuffer = chunk.getContent();
final int bytes = channelBuffer.readableBytes();
if (bytes > 0) {
try {
queue.put(new ChannelBufferInputStream(channelBuffer));
} catch (InterruptedException e) {
log.error(e, "Unable to put finalizing input stream into Sequence queue for url [%s]", url);
Thread.currentThread().interrupt();
throw Throwables.propagate(e);
}
byteCount.addAndGet(bytes);
}
return clientResponse;
}
@Override
public ClientResponse<InputStream> done(ClientResponse<InputStream> clientResponse) {
long stopTime = System.currentTimeMillis();
log.debug("Completed queryId[%s] request to url[%s] with %,d bytes returned in %,d millis [%,f b/s].", query.getId(), url, byteCount.get(), stopTime - responseStartTime, byteCount.get() / (0.0001 * (stopTime - responseStartTime)));
emitter.emit(builder.build("query/node/time", stopTime - requestStartTime));
emitter.emit(builder.build("query/node/bytes", byteCount.get()));
synchronized (done) {
try {
// An empty byte array is put at the end to give the SequenceInputStream.close() as something to close out
// after done is set to true, regardless of the rest of the stream's state.
queue.put(ByteSource.empty().openStream());
} catch (InterruptedException e) {
log.error(e, "Unable to put finalizing input stream into Sequence queue for url [%s]", url);
Thread.currentThread().interrupt();
throw Throwables.propagate(e);
} catch (IOException e) {
// This should never happen
throw Throwables.propagate(e);
} finally {
done.set(true);
}
}
return ClientResponse.<InputStream>finished(clientResponse.getObj());
}
@Override
public void exceptionCaught(final ClientResponse<InputStream> clientResponse, final Throwable e) {
// Don't wait for lock in case the lock had something to do with the error
synchronized (done) {
done.set(true);
// Make a best effort to put a zero length buffer into the queue in case something is waiting on the take()
// If nothing is waiting on take(), this will be closed out anyways.
queue.offer(new InputStream() {
@Override
public int read() throws IOException {
throw new IOException(e);
}
});
}
}
};
future = httpClient.go(new Request(HttpMethod.POST, new URL(url)).setContent(objectMapper.writeValueAsBytes(query)).setHeader(HttpHeaders.Names.CONTENT_TYPE, isSmile ? SmileMediaTypes.APPLICATION_JACKSON_SMILE : MediaType.APPLICATION_JSON), responseHandler);
queryWatcher.registerQuery(query, future);
openConnections.getAndIncrement();
Futures.addCallback(future, new FutureCallback<InputStream>() {
@Override
public void onSuccess(InputStream result) {
openConnections.getAndDecrement();
}
@Override
public void onFailure(Throwable t) {
openConnections.getAndDecrement();
if (future.isCancelled()) {
// forward the cancellation to underlying queriable node
try {
StatusResponseHolder res = httpClient.go(new Request(HttpMethod.DELETE, new URL(cancelUrl)).setContent(objectMapper.writeValueAsBytes(query)).setHeader(HttpHeaders.Names.CONTENT_TYPE, isSmile ? SmileMediaTypes.APPLICATION_JACKSON_SMILE : MediaType.APPLICATION_JSON), new StatusResponseHandler(Charsets.UTF_8)).get();
if (res.getStatus().getCode() >= 500) {
throw new RE("Error cancelling query[%s]: queriable node returned status[%d] [%s].", res.getStatus().getCode(), res.getStatus().getReasonPhrase());
}
} catch (IOException | ExecutionException | InterruptedException e) {
Throwables.propagate(e);
}
}
}
});
} catch (IOException e) {
throw Throwables.propagate(e);
}
Sequence<T> retVal = new BaseSequence<>(new BaseSequence.IteratorMaker<T, JsonParserIterator<T>>() {
@Override
public JsonParserIterator<T> make() {
return new JsonParserIterator<T>(typeRef, future, url);
}
@Override
public void cleanup(JsonParserIterator<T> iterFromMake) {
CloseQuietly.close(iterFromMake);
}
});
// avoid the cost of de-serializing and then re-serializing again when adding to cache
if (!isBySegment) {
retVal = Sequences.map(retVal, toolChest.makePreComputeManipulatorFn(query, MetricManipulatorFns.deserializing()));
}
return retVal;
}
use of org.jboss.netty.handler.codec.http.HttpChunk in project camel by apache.
the class HttpClientChannelHandler method messageReceived.
@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent messageEvent) throws Exception {
// store response, as this channel handler is created per pipeline
Object msg = messageEvent.getMessage();
// it may be a chunked message
if (msg instanceof HttpChunk) {
HttpChunk chunk = (HttpChunk) msg;
if (LOG.isTraceEnabled()) {
LOG.trace("HttpChunk received: {} isLast: {}", chunk, chunk.isLast());
}
if (msg instanceof HttpChunkTrailer) {
// chunk trailer only has headers
HttpChunkTrailer trailer = (HttpChunkTrailer) msg;
for (Map.Entry<String, String> entry : trailer.trailingHeaders()) {
if (LOG.isTraceEnabled()) {
LOG.trace("Adding trailing header {}={}", entry.getKey(), entry.getValue());
}
response.headers().add(entry.getKey(), entry.getValue());
}
} else {
// append chunked content
buffer.writeBytes(chunk.getContent());
if (LOG.isTraceEnabled()) {
LOG.trace("Wrote {} bytes to chunk buffer", buffer.writerIndex());
}
}
if (chunk.isLast()) {
// the content is a copy of the buffer with the actual data we wrote to it
int end = buffer.writerIndex();
ChannelBuffer copy = buffer.copy(0, end);
// the copy must not be readable when the content was chunked, so set the index to the end
copy.setIndex(end, end);
response.setContent(copy);
// we get the all the content now, so call super to process the received message
super.messageReceived(ctx, messageEvent);
}
} else if (msg instanceof HttpResponse) {
response = (HttpResponse) msg;
Exchange exchange = super.getExchange(ctx);
if (!HttpHeaders.isKeepAlive(response)) {
// just want to make sure we close the channel if the keepAlive is not true
exchange.setProperty(NettyConstants.NETTY_CLOSE_CHANNEL_WHEN_COMPLETE, true);
}
if (LOG.isTraceEnabled()) {
LOG.trace("HttpResponse received: {} chunked:", response, response.isChunked());
}
if (response.getStatus().getCode() == HttpResponseStatus.CONTINUE.getCode()) {
if (LOG.isTraceEnabled()) {
LOG.trace("HttpResponse received: {}: {}", response, response.getStatus());
}
} else if (!response.isChunked()) {
// the response is not chunked so we have all the content
super.messageReceived(ctx, messageEvent);
} else {
// the response is chunkced so use a dynamic buffer to receive the content in chunks
buffer = ChannelBuffers.dynamicBuffer();
}
} else {
// ignore not supported message
if (LOG.isTraceEnabled() && msg != null) {
LOG.trace("Ignoring non supported response message of type {} -> {}", msg.getClass(), msg);
}
}
}
use of org.jboss.netty.handler.codec.http.HttpChunk in project cdap by caskdata.
the class AuthenticationChannelHandler method messageReceived.
/**
* Decode the AccessTokenIdentifier passed as a header and set it in a ThreadLocal.
* Returns a 401 if the identifier is malformed.
*/
@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) throws Exception {
Object message = e.getMessage();
if (message instanceof HttpRequest) {
// TODO: authenticate the user using user id - CDAP-688
HttpRequest request = (HttpRequest) message;
currentUserId = request.getHeader(Constants.Security.Headers.USER_ID);
currentUserIP = request.getHeader(Constants.Security.Headers.USER_IP);
SecurityRequestContext.setUserId(currentUserId);
SecurityRequestContext.setUserIP(currentUserIP);
} else if (message instanceof HttpChunk) {
SecurityRequestContext.setUserId(currentUserId);
SecurityRequestContext.setUserIP(currentUserIP);
}
super.messageReceived(ctx, e);
}
use of org.jboss.netty.handler.codec.http.HttpChunk in project vcell by virtualcell.
the class HttpResponseHandler method messageReceived.
@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) throws Exception {
if (clientTaskStatusSupport != null && clientTaskStatusSupport.isInterrupted()) {
ctx.getChannel().close();
return;
}
if (!readingChunks) {
HttpResponse response = (HttpResponse) e.getMessage();
if (!response.getHeaderNames().isEmpty()) {
if (clientTaskStatusSupport != null) {
clientTaskStatusSupport.setMessage("downloading " + NumberUtils.formatNumBytes(contentLength) + " from " + serverHost);
clientTaskStatusSupport.setProgress(0);
} else {
System.out.println("downloading " + contentLength + " bytes from " + serverHost);
}
}
if (response.isChunked()) {
readingChunks = true;
} else {
ChannelBuffer content = response.getContent();
if (content.readable()) {
responseContent.append(content.toString(CharsetUtil.UTF_8));
clientTaskStatusSupport.setProgress(100);
}
}
} else {
HttpChunk chunk = (HttpChunk) e.getMessage();
if (chunk.isLast()) {
readingChunks = false;
} else {
responseContent.append(chunk.getContent().toString(CharsetUtil.UTF_8));
if (clientTaskStatusSupport != null) {
clientTaskStatusSupport.setMessage("downloaded " + NumberUtils.formatNumBytes(responseContent.length()) + " from " + serverHost);
} else {
System.out.println("downloaded " + responseContent.length() + " of " + NumberUtils.formatNumBytes(responseContent.length()) + " from " + serverHost);
}
}
}
}
use of org.jboss.netty.handler.codec.http.HttpChunk in project databus by linkedin.
the class FooterAwareHttpChunkAggregator method messageReceived.
@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) throws Exception {
Object msg = e.getMessage();
HttpMessage currentMessage = this.currentMessage;
if (msg instanceof HttpMessage) {
HttpMessage m = (HttpMessage) msg;
if (m.isChunked()) {
// A chunked message - remove 'Transfer-Encoding' header,
// initialize the cumulative buffer, and wait for incoming chunks.
List<String> encodings = m.getHeaders(HttpHeaders.Names.TRANSFER_ENCODING);
encodings.remove(HttpHeaders.Values.CHUNKED);
if (encodings.isEmpty()) {
m.removeHeader(HttpHeaders.Names.TRANSFER_ENCODING);
}
m.setContent(ChannelBuffers.dynamicBuffer(e.getChannel().getConfig().getBufferFactory()));
this.currentMessage = m;
} else {
// Not a chunked message - pass through.
this.currentMessage = null;
ctx.sendUpstream(e);
}
} else if (msg instanceof HttpChunk) {
// Sanity check
if (currentMessage == null) {
throw new IllegalStateException("received " + HttpChunk.class.getSimpleName() + " without " + HttpMessage.class.getSimpleName());
}
// Merge the received chunk into the content of the current message.
HttpChunk chunk = (HttpChunk) msg;
ChannelBuffer content = currentMessage.getContent();
if (content.readableBytes() > maxContentLength - chunk.getContent().readableBytes()) {
throw new TooLongFrameException("HTTP content length exceeded " + maxContentLength + " bytes.");
}
content.writeBytes(chunk.getContent());
if (chunk.isLast()) {
this.currentMessage = null;
currentMessage.setHeader(HttpHeaders.Names.CONTENT_LENGTH, String.valueOf(content.readableBytes()));
if (chunk instanceof HttpChunkTrailer) {
HttpChunkTrailer chunkTrailer = (HttpChunkTrailer) chunk;
for (Entry<String, String> footer : chunkTrailer.getHeaders()) {
currentMessage.setHeader(footer.getKey(), footer.getValue());
}
}
Channels.fireMessageReceived(ctx, currentMessage, e.getRemoteAddress());
}
} else {
// Neither HttpMessage or HttpChunk
ctx.sendUpstream(e);
}
}
Aggregations