use of com.metamx.http.client.Request in project druid by druid-io.
the class DirectDruidClientTest method testQueryInterruptionExceptionLogMessage.
@Test
public void testQueryInterruptionExceptionLogMessage() throws JsonProcessingException {
HttpClient httpClient = EasyMock.createMock(HttpClient.class);
SettableFuture<Object> interruptionFuture = SettableFuture.create();
Capture<Request> capturedRequest = EasyMock.newCapture();
String hostName = "localhost:8080";
EasyMock.expect(httpClient.go(EasyMock.capture(capturedRequest), EasyMock.<HttpResponseHandler>anyObject())).andReturn(interruptionFuture).anyTimes();
EasyMock.replay(httpClient);
DataSegment dataSegment = new DataSegment("test", new Interval("2013-01-01/2013-01-02"), new DateTime("2013-01-01").toString(), Maps.<String, Object>newHashMap(), Lists.<String>newArrayList(), Lists.<String>newArrayList(), NoneShardSpec.instance(), 0, 0L);
final ServerSelector serverSelector = new ServerSelector(dataSegment, new HighestPriorityTierSelectorStrategy(new ConnectionCountServerSelectorStrategy()));
DirectDruidClient client1 = new DirectDruidClient(new ReflectionQueryToolChestWarehouse(), QueryRunnerTestHelper.NOOP_QUERYWATCHER, new DefaultObjectMapper(), httpClient, hostName, new NoopServiceEmitter());
QueryableDruidServer queryableDruidServer = new QueryableDruidServer(new DruidServer("test1", hostName, 0, "historical", DruidServer.DEFAULT_TIER, 0), client1);
serverSelector.addServerAndUpdateSegment(queryableDruidServer, dataSegment);
TimeBoundaryQuery query = Druids.newTimeBoundaryQueryBuilder().dataSource("test").build();
HashMap<String, List> context = Maps.newHashMap();
interruptionFuture.set(new ByteArrayInputStream("{\"error\":\"testing1\",\"errorMessage\":\"testing2\"}".getBytes()));
Sequence results = client1.run(query, context);
QueryInterruptedException actualException = null;
try {
Sequences.toList(results, Lists.newArrayList());
} catch (QueryInterruptedException e) {
actualException = e;
}
Assert.assertNotNull(actualException);
Assert.assertEquals("testing1", actualException.getErrorCode());
Assert.assertEquals("testing2", actualException.getMessage());
Assert.assertEquals(hostName, actualException.getHost());
EasyMock.verify(httpClient);
}
use of com.metamx.http.client.Request in project druid by druid-io.
the class DirectDruidClientTest method testRun.
@Test
public void testRun() throws Exception {
HttpClient httpClient = EasyMock.createMock(HttpClient.class);
final URL url = new URL("http://foo/druid/v2/");
SettableFuture<InputStream> futureResult = SettableFuture.create();
Capture<Request> capturedRequest = EasyMock.newCapture();
EasyMock.expect(httpClient.go(EasyMock.capture(capturedRequest), EasyMock.<HttpResponseHandler>anyObject())).andReturn(futureResult).times(1);
SettableFuture futureException = SettableFuture.create();
EasyMock.expect(httpClient.go(EasyMock.capture(capturedRequest), EasyMock.<HttpResponseHandler>anyObject())).andReturn(futureException).times(1);
EasyMock.expect(httpClient.go(EasyMock.capture(capturedRequest), EasyMock.<HttpResponseHandler>anyObject())).andReturn(SettableFuture.create()).atLeastOnce();
EasyMock.replay(httpClient);
final ServerSelector serverSelector = new ServerSelector(new DataSegment("test", new Interval("2013-01-01/2013-01-02"), new DateTime("2013-01-01").toString(), Maps.<String, Object>newHashMap(), Lists.<String>newArrayList(), Lists.<String>newArrayList(), NoneShardSpec.instance(), 0, 0L), new HighestPriorityTierSelectorStrategy(new ConnectionCountServerSelectorStrategy()));
DirectDruidClient client1 = new DirectDruidClient(new ReflectionQueryToolChestWarehouse(), QueryRunnerTestHelper.NOOP_QUERYWATCHER, new DefaultObjectMapper(), httpClient, "foo", new NoopServiceEmitter());
DirectDruidClient client2 = new DirectDruidClient(new ReflectionQueryToolChestWarehouse(), QueryRunnerTestHelper.NOOP_QUERYWATCHER, new DefaultObjectMapper(), httpClient, "foo2", new NoopServiceEmitter());
QueryableDruidServer queryableDruidServer1 = new QueryableDruidServer(new DruidServer("test1", "localhost", 0, "historical", DruidServer.DEFAULT_TIER, 0), client1);
serverSelector.addServerAndUpdateSegment(queryableDruidServer1, serverSelector.getSegment());
QueryableDruidServer queryableDruidServer2 = new QueryableDruidServer(new DruidServer("test1", "localhost", 0, "historical", DruidServer.DEFAULT_TIER, 0), client2);
serverSelector.addServerAndUpdateSegment(queryableDruidServer2, serverSelector.getSegment());
TimeBoundaryQuery query = Druids.newTimeBoundaryQueryBuilder().dataSource("test").build();
HashMap<String, List> context = Maps.newHashMap();
Sequence s1 = client1.run(query, context);
Assert.assertTrue(capturedRequest.hasCaptured());
Assert.assertEquals(url, capturedRequest.getValue().getUrl());
Assert.assertEquals(HttpMethod.POST, capturedRequest.getValue().getMethod());
Assert.assertEquals(1, client1.getNumOpenConnections());
// simulate read timeout
Sequence s2 = client1.run(query, context);
Assert.assertEquals(2, client1.getNumOpenConnections());
futureException.setException(new ReadTimeoutException());
Assert.assertEquals(1, client1.getNumOpenConnections());
// subsequent connections should work
Sequence s3 = client1.run(query, context);
Sequence s4 = client1.run(query, context);
Sequence s5 = client1.run(query, context);
Assert.assertTrue(client1.getNumOpenConnections() == 4);
// produce result for first connection
futureResult.set(new ByteArrayInputStream("[{\"timestamp\":\"2014-01-01T01:02:03Z\", \"result\": 42.0}]".getBytes()));
List<Result> results = Sequences.toList(s1, Lists.<Result>newArrayList());
Assert.assertEquals(1, results.size());
Assert.assertEquals(new DateTime("2014-01-01T01:02:03Z"), results.get(0).getTimestamp());
Assert.assertEquals(3, client1.getNumOpenConnections());
client2.run(query, context);
client2.run(query, context);
Assert.assertTrue(client2.getNumOpenConnections() == 2);
Assert.assertTrue(serverSelector.pick() == queryableDruidServer2);
EasyMock.verify(httpClient);
}
use of com.metamx.http.client.Request in project druid by druid-io.
the class KafkaIndexTaskClientTest method testGetStartTime.
@Test
public void testGetStartTime() throws Exception {
client = new TestableKafkaIndexTaskClient(httpClient, objectMapper, taskInfoProvider, 2);
DateTime now = DateTime.now();
Capture<Request> captured = Capture.newInstance();
expect(responseHolder.getStatus()).andReturn(HttpResponseStatus.NOT_FOUND).times(3).andReturn(HttpResponseStatus.OK);
expect(responseHolder.getResponse()).andReturn(response);
expect(response.headers()).andReturn(headers);
expect(headers.get("X-Druid-Task-Id")).andReturn(null);
expect(responseHolder.getContent()).andReturn(String.valueOf(now.getMillis())).anyTimes();
expect(httpClient.go(capture(captured), anyObject(FullResponseHandler.class), eq(TEST_HTTP_TIMEOUT))).andReturn(Futures.immediateFuture(responseHolder)).times(2);
replayAll();
DateTime results = client.getStartTime(TEST_ID);
verifyAll();
Request request = captured.getValue();
Assert.assertEquals(HttpMethod.GET, request.getMethod());
Assert.assertEquals(new URL("http://test-host:1234/druid/worker/v1/chat/test-id/time/start"), request.getUrl());
Assert.assertTrue(request.getHeaders().get("X-Druid-Task-Id").contains("test-id"));
Assert.assertEquals(now, results);
}
use of com.metamx.http.client.Request in project druid by druid-io.
the class KafkaIndexTaskClientTest method testSetEndOffsetsAsyncWithResume.
@Test
public void testSetEndOffsetsAsyncWithResume() throws Exception {
final Map<Integer, Long> endOffsets = ImmutableMap.of(0, 15L, 1, 120L);
final int numRequests = TEST_IDS.size();
Capture<Request> captured = Capture.newInstance(CaptureType.ALL);
expect(responseHolder.getStatus()).andReturn(HttpResponseStatus.OK).anyTimes();
expect(httpClient.go(capture(captured), anyObject(FullResponseHandler.class), eq(TEST_HTTP_TIMEOUT))).andReturn(Futures.immediateFuture(responseHolder)).times(numRequests);
replayAll();
List<URL> expectedUrls = Lists.newArrayList();
List<ListenableFuture<Boolean>> futures = Lists.newArrayList();
for (int i = 0; i < numRequests; i++) {
expectedUrls.add(new URL(String.format(URL_FORMATTER, TEST_HOST, TEST_PORT, TEST_IDS.get(i), "offsets/end?resume=true")));
futures.add(client.setEndOffsetsAsync(TEST_IDS.get(i), endOffsets, true));
}
List<Boolean> responses = Futures.allAsList(futures).get();
verifyAll();
List<Request> requests = captured.getValues();
Assert.assertEquals(numRequests, requests.size());
Assert.assertEquals(numRequests, responses.size());
for (int i = 0; i < numRequests; i++) {
Assert.assertEquals(HttpMethod.POST, requests.get(i).getMethod());
Assert.assertTrue("unexpectedURL", expectedUrls.contains(requests.get(i).getUrl()));
Assert.assertTrue(responses.get(i));
}
}
use of com.metamx.http.client.Request in project druid by druid-io.
the class KafkaIndexTaskClientTest method testGetEndOffsetsAsync.
@Test
public void testGetEndOffsetsAsync() throws Exception {
final int numRequests = TEST_IDS.size();
Capture<Request> captured = Capture.newInstance(CaptureType.ALL);
expect(responseHolder.getStatus()).andReturn(HttpResponseStatus.OK).anyTimes();
expect(responseHolder.getContent()).andReturn("{\"0\":\"1\"}").anyTimes();
expect(httpClient.go(capture(captured), anyObject(FullResponseHandler.class), eq(TEST_HTTP_TIMEOUT))).andReturn(Futures.immediateFuture(responseHolder)).times(numRequests);
replayAll();
List<URL> expectedUrls = Lists.newArrayList();
List<ListenableFuture<Map<Integer, Long>>> futures = Lists.newArrayList();
for (int i = 0; i < numRequests; i++) {
expectedUrls.add(new URL(String.format(URL_FORMATTER, TEST_HOST, TEST_PORT, TEST_IDS.get(i), "offsets/end")));
futures.add(client.getEndOffsetsAsync(TEST_IDS.get(i)));
}
List<Map<Integer, Long>> responses = Futures.allAsList(futures).get();
verifyAll();
List<Request> requests = captured.getValues();
Assert.assertEquals(numRequests, requests.size());
Assert.assertEquals(numRequests, responses.size());
for (int i = 0; i < numRequests; i++) {
Assert.assertEquals(HttpMethod.GET, requests.get(i).getMethod());
Assert.assertTrue("unexpectedURL", expectedUrls.contains(requests.get(i).getUrl()));
Assert.assertEquals(Maps.newLinkedHashMap(ImmutableMap.of(0, 1L)), responses.get(i));
}
}
Aggregations