Search in sources :

Example 51 with HttpResponse

use of org.jboss.netty.handler.codec.http.HttpResponse in project hadoop by apache.

the class TestShuffleHandler method testGetMapOutputInfo.

@Test(timeout = 100000)
public void testGetMapOutputInfo() throws Exception {
    final ArrayList<Throwable> failures = new ArrayList<Throwable>(1);
    Configuration conf = new Configuration();
    conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY, 0);
    conf.setInt(ShuffleHandler.MAX_SHUFFLE_CONNECTIONS, 3);
    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "simple");
    UserGroupInformation.setConfiguration(conf);
    File absLogDir = new File("target", TestShuffleHandler.class.getSimpleName() + "LocDir").getAbsoluteFile();
    conf.set(YarnConfiguration.NM_LOCAL_DIRS, absLogDir.getAbsolutePath());
    ApplicationId appId = ApplicationId.newInstance(12345, 1);
    String appAttemptId = "attempt_12345_1_m_1_0";
    String user = "randomUser";
    String reducerId = "0";
    List<File> fileMap = new ArrayList<File>();
    createShuffleHandlerFiles(absLogDir, user, appId.toString(), appAttemptId, conf, fileMap);
    ShuffleHandler shuffleHandler = new ShuffleHandler() {

        @Override
        protected Shuffle getShuffle(Configuration conf) {
            // replace the shuffle handler with one stubbed for testing
            return new Shuffle(conf) {

                @Override
                protected void populateHeaders(List<String> mapIds, String outputBaseStr, String user, int reduce, HttpRequest request, HttpResponse response, boolean keepAliveParam, Map<String, MapOutputInfo> infoMap) throws IOException {
                    // Only set response headers and skip everything else
                    // send some dummy value for content-length
                    super.setResponseHeaders(response, keepAliveParam, 100);
                }

                @Override
                protected void verifyRequest(String appid, ChannelHandlerContext ctx, HttpRequest request, HttpResponse response, URL requestUri) throws IOException {
                // Do nothing.
                }

                @Override
                protected void sendError(ChannelHandlerContext ctx, String message, HttpResponseStatus status) {
                    if (failures.size() == 0) {
                        failures.add(new Error(message));
                        ctx.getChannel().close();
                    }
                }

                @Override
                protected ChannelFuture sendMapOutput(ChannelHandlerContext ctx, Channel ch, String user, String mapId, int reduce, MapOutputInfo info) throws IOException {
                    // send a shuffle header
                    ShuffleHeader header = new ShuffleHeader("attempt_12345_1_m_1_0", 5678, 5678, 1);
                    DataOutputBuffer dob = new DataOutputBuffer();
                    header.write(dob);
                    return ch.write(wrappedBuffer(dob.getData(), 0, dob.getLength()));
                }
            };
        }
    };
    shuffleHandler.init(conf);
    try {
        shuffleHandler.start();
        DataOutputBuffer outputBuffer = new DataOutputBuffer();
        outputBuffer.reset();
        Token<JobTokenIdentifier> jt = new Token<JobTokenIdentifier>("identifier".getBytes(), "password".getBytes(), new Text(user), new Text("shuffleService"));
        jt.write(outputBuffer);
        shuffleHandler.initializeApplication(new ApplicationInitializationContext(user, appId, ByteBuffer.wrap(outputBuffer.getData(), 0, outputBuffer.getLength())));
        URL url = new URL("http://127.0.0.1:" + shuffleHandler.getConfig().get(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY) + "/mapOutput?job=job_12345_0001&reduce=" + reducerId + "&map=attempt_12345_1_m_1_0");
        HttpURLConnection conn = (HttpURLConnection) url.openConnection();
        conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_NAME, ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
        conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_VERSION, ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
        conn.connect();
        try {
            DataInputStream is = new DataInputStream(conn.getInputStream());
            ShuffleHeader header = new ShuffleHeader();
            header.readFields(is);
            is.close();
        } catch (EOFException e) {
        // ignore
        }
        Assert.assertEquals("sendError called due to shuffle error", 0, failures.size());
    } finally {
        shuffleHandler.stop();
        FileUtil.fullyDelete(absLogDir);
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) HttpResponseStatus(org.jboss.netty.handler.codec.http.HttpResponseStatus) ArrayList(java.util.ArrayList) ChannelHandlerContext(org.jboss.netty.channel.ChannelHandlerContext) Token(org.apache.hadoop.security.token.Token) URL(java.net.URL) HttpURLConnection(java.net.HttpURLConnection) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) EOFException(java.io.EOFException) List(java.util.List) ArrayList(java.util.ArrayList) ApplicationInitializationContext(org.apache.hadoop.yarn.server.api.ApplicationInitializationContext) HttpRequest(org.jboss.netty.handler.codec.http.HttpRequest) SocketChannel(org.jboss.netty.channel.socket.SocketChannel) Channel(org.jboss.netty.channel.Channel) AbstractChannel(org.jboss.netty.channel.AbstractChannel) ShuffleHeader(org.apache.hadoop.mapreduce.task.reduce.ShuffleHeader) DefaultHttpResponse(org.jboss.netty.handler.codec.http.DefaultHttpResponse) HttpResponse(org.jboss.netty.handler.codec.http.HttpResponse) JobTokenIdentifier(org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier) Text(org.apache.hadoop.io.Text) DataInputStream(java.io.DataInputStream) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) File(java.io.File) Map(java.util.Map) Test(org.junit.Test)

Example 52 with HttpResponse

use of org.jboss.netty.handler.codec.http.HttpResponse in project hadoop by apache.

the class TestShuffleHandler method testClientClosesConnection.

/**
   * Verify client prematurely closing a connection.
   *
   * @throws Exception exception.
   */
@Test(timeout = 10000)
public void testClientClosesConnection() throws Exception {
    final ArrayList<Throwable> failures = new ArrayList<Throwable>(1);
    Configuration conf = new Configuration();
    conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY, 0);
    ShuffleHandler shuffleHandler = new ShuffleHandler() {

        @Override
        protected Shuffle getShuffle(Configuration conf) {
            // replace the shuffle handler with one stubbed for testing
            return new Shuffle(conf) {

                @Override
                protected MapOutputInfo getMapOutputInfo(String mapId, int reduce, String jobId, String user) throws IOException {
                    return null;
                }

                @Override
                protected void populateHeaders(List<String> mapIds, String jobId, String user, int reduce, HttpRequest request, HttpResponse response, boolean keepAliveParam, Map<String, MapOutputInfo> infoMap) throws IOException {
                    // Only set response headers and skip everything else
                    // send some dummy value for content-length
                    super.setResponseHeaders(response, keepAliveParam, 100);
                }

                @Override
                protected void verifyRequest(String appid, ChannelHandlerContext ctx, HttpRequest request, HttpResponse response, URL requestUri) throws IOException {
                }

                @Override
                protected ChannelFuture sendMapOutput(ChannelHandlerContext ctx, Channel ch, String user, String mapId, int reduce, MapOutputInfo info) throws IOException {
                    // send a shuffle header and a lot of data down the channel
                    // to trigger a broken pipe
                    ShuffleHeader header = new ShuffleHeader("attempt_12345_1_m_1_0", 5678, 5678, 1);
                    DataOutputBuffer dob = new DataOutputBuffer();
                    header.write(dob);
                    ch.write(wrappedBuffer(dob.getData(), 0, dob.getLength()));
                    dob = new DataOutputBuffer();
                    for (int i = 0; i < 100000; ++i) {
                        header.write(dob);
                    }
                    return ch.write(wrappedBuffer(dob.getData(), 0, dob.getLength()));
                }

                @Override
                protected void sendError(ChannelHandlerContext ctx, HttpResponseStatus status) {
                    if (failures.size() == 0) {
                        failures.add(new Error());
                        ctx.getChannel().close();
                    }
                }

                @Override
                protected void sendError(ChannelHandlerContext ctx, String message, HttpResponseStatus status) {
                    if (failures.size() == 0) {
                        failures.add(new Error());
                        ctx.getChannel().close();
                    }
                }
            };
        }
    };
    shuffleHandler.init(conf);
    shuffleHandler.start();
    // simulate a reducer that closes early by reading a single shuffle header
    // then closing the connection
    URL url = new URL("http://127.0.0.1:" + shuffleHandler.getConfig().get(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY) + "/mapOutput?job=job_12345_1&reduce=1&map=attempt_12345_1_m_1_0");
    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
    conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_NAME, ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
    conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_VERSION, ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
    conn.connect();
    DataInputStream input = new DataInputStream(conn.getInputStream());
    Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
    Assert.assertEquals("close", conn.getHeaderField(HttpHeader.CONNECTION.asString()));
    ShuffleHeader header = new ShuffleHeader();
    header.readFields(input);
    input.close();
    shuffleHandler.stop();
    Assert.assertTrue("sendError called when client closed connection", failures.size() == 0);
}
Also used : HttpRequest(org.jboss.netty.handler.codec.http.HttpRequest) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) HttpResponseStatus(org.jboss.netty.handler.codec.http.HttpResponseStatus) SocketChannel(org.jboss.netty.channel.socket.SocketChannel) Channel(org.jboss.netty.channel.Channel) AbstractChannel(org.jboss.netty.channel.AbstractChannel) ShuffleHeader(org.apache.hadoop.mapreduce.task.reduce.ShuffleHeader) ArrayList(java.util.ArrayList) DefaultHttpResponse(org.jboss.netty.handler.codec.http.DefaultHttpResponse) HttpResponse(org.jboss.netty.handler.codec.http.HttpResponse) ChannelHandlerContext(org.jboss.netty.channel.ChannelHandlerContext) DataInputStream(java.io.DataInputStream) URL(java.net.URL) HttpURLConnection(java.net.HttpURLConnection) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) List(java.util.List) ArrayList(java.util.ArrayList) Map(java.util.Map) Test(org.junit.Test)

Example 53 with HttpResponse

use of org.jboss.netty.handler.codec.http.HttpResponse in project hadoop by apache.

the class TestShuffleHandler method testMapFileAccess.

/**
   * Validate the ownership of the map-output files being pulled in. The
   * local-file-system owner of the file should match the user component in the
   *
   * @throws Exception exception
   */
@Test(timeout = 100000)
public void testMapFileAccess() throws IOException {
    // This will run only in NativeIO is enabled as SecureIOUtils need it
    assumeTrue(NativeIO.isAvailable());
    Configuration conf = new Configuration();
    conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY, 0);
    conf.setInt(ShuffleHandler.MAX_SHUFFLE_CONNECTIONS, 3);
    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
    UserGroupInformation.setConfiguration(conf);
    File absLogDir = new File("target", TestShuffleHandler.class.getSimpleName() + "LocDir").getAbsoluteFile();
    conf.set(YarnConfiguration.NM_LOCAL_DIRS, absLogDir.getAbsolutePath());
    ApplicationId appId = ApplicationId.newInstance(12345, 1);
    LOG.info(appId.toString());
    String appAttemptId = "attempt_12345_1_m_1_0";
    String user = "randomUser";
    String reducerId = "0";
    List<File> fileMap = new ArrayList<File>();
    createShuffleHandlerFiles(absLogDir, user, appId.toString(), appAttemptId, conf, fileMap);
    ShuffleHandler shuffleHandler = new ShuffleHandler() {

        @Override
        protected Shuffle getShuffle(Configuration conf) {
            // replace the shuffle handler with one stubbed for testing
            return new Shuffle(conf) {

                @Override
                protected void verifyRequest(String appid, ChannelHandlerContext ctx, HttpRequest request, HttpResponse response, URL requestUri) throws IOException {
                // Do nothing.
                }
            };
        }
    };
    shuffleHandler.init(conf);
    try {
        shuffleHandler.start();
        DataOutputBuffer outputBuffer = new DataOutputBuffer();
        outputBuffer.reset();
        Token<JobTokenIdentifier> jt = new Token<JobTokenIdentifier>("identifier".getBytes(), "password".getBytes(), new Text(user), new Text("shuffleService"));
        jt.write(outputBuffer);
        shuffleHandler.initializeApplication(new ApplicationInitializationContext(user, appId, ByteBuffer.wrap(outputBuffer.getData(), 0, outputBuffer.getLength())));
        URL url = new URL("http://127.0.0.1:" + shuffleHandler.getConfig().get(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY) + "/mapOutput?job=job_12345_0001&reduce=" + reducerId + "&map=attempt_12345_1_m_1_0");
        HttpURLConnection conn = (HttpURLConnection) url.openConnection();
        conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_NAME, ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
        conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_VERSION, ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
        conn.connect();
        byte[] byteArr = new byte[10000];
        try {
            DataInputStream is = new DataInputStream(conn.getInputStream());
            is.readFully(byteArr);
        } catch (EOFException e) {
        // ignore
        }
        // Retrieve file owner name
        FileInputStream is = new FileInputStream(fileMap.get(0));
        String owner = NativeIO.POSIX.getFstat(is.getFD()).getOwner();
        is.close();
        String message = "Owner '" + owner + "' for path " + fileMap.get(0).getAbsolutePath() + " did not match expected owner '" + user + "'";
        Assert.assertTrue((new String(byteArr)).contains(message));
    } finally {
        shuffleHandler.stop();
        FileUtil.fullyDelete(absLogDir);
    }
}
Also used : HttpRequest(org.jboss.netty.handler.codec.http.HttpRequest) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) ArrayList(java.util.ArrayList) DefaultHttpResponse(org.jboss.netty.handler.codec.http.DefaultHttpResponse) HttpResponse(org.jboss.netty.handler.codec.http.HttpResponse) JobTokenIdentifier(org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier) ChannelHandlerContext(org.jboss.netty.channel.ChannelHandlerContext) Token(org.apache.hadoop.security.token.Token) Text(org.apache.hadoop.io.Text) DataInputStream(java.io.DataInputStream) URL(java.net.URL) FileInputStream(java.io.FileInputStream) HttpURLConnection(java.net.HttpURLConnection) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) EOFException(java.io.EOFException) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) File(java.io.File) ApplicationInitializationContext(org.apache.hadoop.yarn.server.api.ApplicationInitializationContext) Test(org.junit.Test)

Example 54 with HttpResponse

use of org.jboss.netty.handler.codec.http.HttpResponse in project druid by druid-io.

the class DirectDruidClient method run.

@Override
public Sequence<T> run(final Query<T> query, final Map<String, Object> context) {
    QueryToolChest<T, Query<T>> toolChest = warehouse.getToolChest(query);
    boolean isBySegment = BaseQuery.getContextBySegment(query, false);
    Pair<JavaType, JavaType> types = typesMap.get(query.getClass());
    if (types == null) {
        final TypeFactory typeFactory = objectMapper.getTypeFactory();
        JavaType baseType = typeFactory.constructType(toolChest.getResultTypeReference());
        JavaType bySegmentType = typeFactory.constructParametricType(Result.class, typeFactory.constructParametricType(BySegmentResultValueClass.class, baseType));
        types = Pair.of(baseType, bySegmentType);
        typesMap.put(query.getClass(), types);
    }
    final JavaType typeRef;
    if (isBySegment) {
        typeRef = types.rhs;
    } else {
        typeRef = types.lhs;
    }
    final ListenableFuture<InputStream> future;
    final String url = String.format("http://%s/druid/v2/", host);
    final String cancelUrl = String.format("http://%s/druid/v2/%s", host, query.getId());
    try {
        log.debug("Querying queryId[%s] url[%s]", query.getId(), url);
        final long requestStartTime = System.currentTimeMillis();
        final ServiceMetricEvent.Builder builder = toolChest.makeMetricBuilder(query);
        builder.setDimension("server", host);
        final HttpResponseHandler<InputStream, InputStream> responseHandler = new HttpResponseHandler<InputStream, InputStream>() {

            private long responseStartTime;

            private final AtomicLong byteCount = new AtomicLong(0);

            private final BlockingQueue<InputStream> queue = new LinkedBlockingQueue<>();

            private final AtomicBoolean done = new AtomicBoolean(false);

            @Override
            public ClientResponse<InputStream> handleResponse(HttpResponse response) {
                log.debug("Initial response from url[%s] for queryId[%s]", url, query.getId());
                responseStartTime = System.currentTimeMillis();
                emitter.emit(builder.build("query/node/ttfb", responseStartTime - requestStartTime));
                try {
                    final String responseContext = response.headers().get("X-Druid-Response-Context");
                    // context may be null in case of error or query timeout
                    if (responseContext != null) {
                        context.putAll(objectMapper.<Map<String, Object>>readValue(responseContext, new TypeReference<Map<String, Object>>() {
                        }));
                    }
                    queue.put(new ChannelBufferInputStream(response.getContent()));
                } catch (final IOException e) {
                    log.error(e, "Error parsing response context from url [%s]", url);
                    return ClientResponse.<InputStream>finished(new InputStream() {

                        @Override
                        public int read() throws IOException {
                            throw e;
                        }
                    });
                } catch (InterruptedException e) {
                    log.error(e, "Queue appending interrupted");
                    Thread.currentThread().interrupt();
                    throw Throwables.propagate(e);
                }
                byteCount.addAndGet(response.getContent().readableBytes());
                return ClientResponse.<InputStream>finished(new SequenceInputStream(new Enumeration<InputStream>() {

                    @Override
                    public boolean hasMoreElements() {
                        // Then the stream should be spouting good InputStreams.
                        synchronized (done) {
                            return !done.get() || !queue.isEmpty();
                        }
                    }

                    @Override
                    public InputStream nextElement() {
                        try {
                            return queue.take();
                        } catch (InterruptedException e) {
                            Thread.currentThread().interrupt();
                            throw Throwables.propagate(e);
                        }
                    }
                }));
            }

            @Override
            public ClientResponse<InputStream> handleChunk(ClientResponse<InputStream> clientResponse, HttpChunk chunk) {
                final ChannelBuffer channelBuffer = chunk.getContent();
                final int bytes = channelBuffer.readableBytes();
                if (bytes > 0) {
                    try {
                        queue.put(new ChannelBufferInputStream(channelBuffer));
                    } catch (InterruptedException e) {
                        log.error(e, "Unable to put finalizing input stream into Sequence queue for url [%s]", url);
                        Thread.currentThread().interrupt();
                        throw Throwables.propagate(e);
                    }
                    byteCount.addAndGet(bytes);
                }
                return clientResponse;
            }

            @Override
            public ClientResponse<InputStream> done(ClientResponse<InputStream> clientResponse) {
                long stopTime = System.currentTimeMillis();
                log.debug("Completed queryId[%s] request to url[%s] with %,d bytes returned in %,d millis [%,f b/s].", query.getId(), url, byteCount.get(), stopTime - responseStartTime, byteCount.get() / (0.0001 * (stopTime - responseStartTime)));
                emitter.emit(builder.build("query/node/time", stopTime - requestStartTime));
                emitter.emit(builder.build("query/node/bytes", byteCount.get()));
                synchronized (done) {
                    try {
                        // An empty byte array is put at the end to give the SequenceInputStream.close() as something to close out
                        // after done is set to true, regardless of the rest of the stream's state.
                        queue.put(ByteSource.empty().openStream());
                    } catch (InterruptedException e) {
                        log.error(e, "Unable to put finalizing input stream into Sequence queue for url [%s]", url);
                        Thread.currentThread().interrupt();
                        throw Throwables.propagate(e);
                    } catch (IOException e) {
                        // This should never happen
                        throw Throwables.propagate(e);
                    } finally {
                        done.set(true);
                    }
                }
                return ClientResponse.<InputStream>finished(clientResponse.getObj());
            }

            @Override
            public void exceptionCaught(final ClientResponse<InputStream> clientResponse, final Throwable e) {
                // Don't wait for lock in case the lock had something to do with the error
                synchronized (done) {
                    done.set(true);
                    // Make a best effort to put a zero length buffer into the queue in case something is waiting on the take()
                    // If nothing is waiting on take(), this will be closed out anyways.
                    queue.offer(new InputStream() {

                        @Override
                        public int read() throws IOException {
                            throw new IOException(e);
                        }
                    });
                }
            }
        };
        future = httpClient.go(new Request(HttpMethod.POST, new URL(url)).setContent(objectMapper.writeValueAsBytes(query)).setHeader(HttpHeaders.Names.CONTENT_TYPE, isSmile ? SmileMediaTypes.APPLICATION_JACKSON_SMILE : MediaType.APPLICATION_JSON), responseHandler);
        queryWatcher.registerQuery(query, future);
        openConnections.getAndIncrement();
        Futures.addCallback(future, new FutureCallback<InputStream>() {

            @Override
            public void onSuccess(InputStream result) {
                openConnections.getAndDecrement();
            }

            @Override
            public void onFailure(Throwable t) {
                openConnections.getAndDecrement();
                if (future.isCancelled()) {
                    // forward the cancellation to underlying queriable node
                    try {
                        StatusResponseHolder res = httpClient.go(new Request(HttpMethod.DELETE, new URL(cancelUrl)).setContent(objectMapper.writeValueAsBytes(query)).setHeader(HttpHeaders.Names.CONTENT_TYPE, isSmile ? SmileMediaTypes.APPLICATION_JACKSON_SMILE : MediaType.APPLICATION_JSON), new StatusResponseHandler(Charsets.UTF_8)).get();
                        if (res.getStatus().getCode() >= 500) {
                            throw new RE("Error cancelling query[%s]: queriable node returned status[%d] [%s].", res.getStatus().getCode(), res.getStatus().getReasonPhrase());
                        }
                    } catch (IOException | ExecutionException | InterruptedException e) {
                        Throwables.propagate(e);
                    }
                }
            }
        });
    } catch (IOException e) {
        throw Throwables.propagate(e);
    }
    Sequence<T> retVal = new BaseSequence<>(new BaseSequence.IteratorMaker<T, JsonParserIterator<T>>() {

        @Override
        public JsonParserIterator<T> make() {
            return new JsonParserIterator<T>(typeRef, future, url);
        }

        @Override
        public void cleanup(JsonParserIterator<T> iterFromMake) {
            CloseQuietly.close(iterFromMake);
        }
    });
    // avoid the cost of de-serializing and then re-serializing again when adding to cache
    if (!isBySegment) {
        retVal = Sequences.map(retVal, toolChest.makePreComputeManipulatorFn(query, MetricManipulatorFns.deserializing()));
    }
    return retVal;
}
Also used : ClientResponse(com.metamx.http.client.response.ClientResponse) BaseQuery(io.druid.query.BaseQuery) Query(io.druid.query.Query) QueryInterruptedException(io.druid.query.QueryInterruptedException) URL(java.net.URL) ChannelBuffer(org.jboss.netty.buffer.ChannelBuffer) StatusResponseHolder(com.metamx.http.client.response.StatusResponseHolder) TypeReference(com.fasterxml.jackson.core.type.TypeReference) ChannelBufferInputStream(org.jboss.netty.buffer.ChannelBufferInputStream) BlockingQueue(java.util.concurrent.BlockingQueue) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) Enumeration(java.util.Enumeration) ChannelBufferInputStream(org.jboss.netty.buffer.ChannelBufferInputStream) SequenceInputStream(java.io.SequenceInputStream) InputStream(java.io.InputStream) Request(com.metamx.http.client.Request) BySegmentResultValueClass(io.druid.query.BySegmentResultValueClass) HttpResponse(org.jboss.netty.handler.codec.http.HttpResponse) IOException(java.io.IOException) BaseSequence(io.druid.java.util.common.guava.BaseSequence) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) JavaType(com.fasterxml.jackson.databind.JavaType) AtomicLong(java.util.concurrent.atomic.AtomicLong) RE(io.druid.java.util.common.RE) SequenceInputStream(java.io.SequenceInputStream) ServiceMetricEvent(com.metamx.emitter.service.ServiceMetricEvent) StatusResponseHandler(com.metamx.http.client.response.StatusResponseHandler) TypeFactory(com.fasterxml.jackson.databind.type.TypeFactory) HttpResponseHandler(com.metamx.http.client.response.HttpResponseHandler) HttpChunk(org.jboss.netty.handler.codec.http.HttpChunk)

Example 55 with HttpResponse

use of org.jboss.netty.handler.codec.http.HttpResponse in project graylog2-server by Graylog2.

the class GELFHttpHandlerTest method testWithoutKeepalive.

@Test
public void testWithoutKeepalive() throws Exception {
    when(headers.get(HttpHeaders.Names.CONNECTION)).thenReturn(HttpHeaders.Values.CLOSE);
    HttpTransport.Handler handler = new HttpTransport.Handler(true);
    handler.messageReceived(ctx, evt);
    ArgumentCaptor<HttpResponse> argument = ArgumentCaptor.forClass(HttpResponse.class);
    verify(channel).write(argument.capture());
    verify(ctx, atMost(1)).sendUpstream(any(ChannelEvent.class));
    HttpResponse response = argument.getValue();
    assertEquals(response.headers().get(HttpHeaders.Names.CONNECTION), HttpHeaders.Values.CLOSE);
}
Also used : HttpResponse(org.jboss.netty.handler.codec.http.HttpResponse) ChannelEvent(org.jboss.netty.channel.ChannelEvent) Test(org.junit.Test)

Aggregations

HttpResponse (org.jboss.netty.handler.codec.http.HttpResponse)143 DefaultHttpResponse (org.jboss.netty.handler.codec.http.DefaultHttpResponse)111 ChannelBuffer (org.jboss.netty.buffer.ChannelBuffer)61 HttpChunk (org.jboss.netty.handler.codec.http.HttpChunk)53 Test (org.testng.annotations.Test)51 DefaultHttpChunk (org.jboss.netty.handler.codec.http.DefaultHttpChunk)47 HttpRequest (org.jboss.netty.handler.codec.http.HttpRequest)37 Channel (org.jboss.netty.channel.Channel)34 DefaultHttpChunkTrailer (org.jboss.netty.handler.codec.http.DefaultHttpChunkTrailer)30 HttpChunkTrailer (org.jboss.netty.handler.codec.http.HttpChunkTrailer)28 BootstrapDatabaseTooOldException (com.linkedin.databus2.core.container.request.BootstrapDatabaseTooOldException)25 InetSocketAddress (java.net.InetSocketAddress)25 ChannelFuture (org.jboss.netty.channel.ChannelFuture)25 DefaultHttpRequest (org.jboss.netty.handler.codec.http.DefaultHttpRequest)25 Checkpoint (com.linkedin.databus.core.Checkpoint)24 SocketAddress (java.net.SocketAddress)23 ConditionCheck (com.linkedin.databus2.test.ConditionCheck)20 Test (org.junit.Test)20 Logger (org.apache.log4j.Logger)16 ChannelPipeline (org.jboss.netty.channel.ChannelPipeline)16