use of com.metamx.http.client.HttpClient in project druid by druid-io.
the class RemoteTaskRunnerFactoryTest method testExecNotSharedBetweenRunners.
@Test
public void testExecNotSharedBetweenRunners() {
final AtomicInteger executorCount = new AtomicInteger(0);
RemoteTaskRunnerConfig config = new RemoteTaskRunnerConfig();
IndexerZkConfig indexerZkConfig = new IndexerZkConfig(new ZkPathsConfig() {
@Override
public String getBase() {
return basePath;
}
}, null, null, null, null, null);
HttpClient httpClient = EasyMock.createMock(HttpClient.class);
Supplier<WorkerBehaviorConfig> workerBehaviorConfig = EasyMock.createMock(Supplier.class);
ScheduledExecutorFactory executorFactory = new ScheduledExecutorFactory() {
@Override
public ScheduledExecutorService create(int i, String s) {
executorCount.incrementAndGet();
return ScheduledExecutors.fixed(i, s);
}
};
SimpleWorkerResourceManagementConfig resourceManagementConfig = new SimpleWorkerResourceManagementConfig();
ResourceManagementSchedulerConfig resourceManagementSchedulerConfig = new ResourceManagementSchedulerConfig() {
@Override
public boolean isDoAutoscale() {
return true;
}
};
RemoteTaskRunnerFactory factory = new RemoteTaskRunnerFactory(cf, config, indexerZkConfig, jsonMapper, httpClient, workerBehaviorConfig, executorFactory, resourceManagementSchedulerConfig, new SimpleWorkerResourceManagementStrategy(resourceManagementConfig, workerBehaviorConfig, resourceManagementSchedulerConfig, executorFactory));
Assert.assertEquals(1, executorCount.get());
RemoteTaskRunner remoteTaskRunner1 = factory.build();
Assert.assertEquals(2, executorCount.get());
RemoteTaskRunner remoteTaskRunner2 = factory.build();
Assert.assertEquals(3, executorCount.get());
}
use of com.metamx.http.client.HttpClient in project hive by apache.
the class DruidQueryRecordReader method initialize.
public void initialize(InputSplit split, Configuration conf) throws IOException {
HiveDruidSplit hiveDruidSplit = (HiveDruidSplit) split;
// Create query
query = createQuery(hiveDruidSplit.getDruidQuery());
// Execute query
if (LOG.isInfoEnabled()) {
LOG.info("Retrieving from druid using query:\n " + query);
}
final Lifecycle lifecycle = new Lifecycle();
final int numConnection = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_DRUID_NUM_HTTP_CONNECTION);
final Period readTimeout = new Period(HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_DRUID_HTTP_READ_TIMEOUT));
HttpClient client = HttpClientInit.createClient(HttpClientConfig.builder().withReadTimeout(readTimeout.toStandardDuration()).withNumConnections(numConnection).build(), lifecycle);
try {
lifecycle.start();
} catch (Exception e) {
LOG.error("Issues with lifecycle start", e);
}
InputStream response;
try {
response = DruidStorageHandlerUtils.submitRequest(client, DruidStorageHandlerUtils.createRequest(hiveDruidSplit.getLocations()[0], query));
} catch (Exception e) {
lifecycle.stop();
throw new IOException(org.apache.hadoop.util.StringUtils.stringifyException(e));
}
// Retrieve results
List<R> resultsList;
try {
resultsList = createResultsList(response);
} catch (IOException e) {
response.close();
throw e;
} finally {
lifecycle.stop();
}
if (resultsList == null || resultsList.isEmpty()) {
return;
}
results = resultsList.iterator();
}
use of com.metamx.http.client.HttpClient in project hive by apache.
the class DruidSerDe method submitMetadataRequest.
/* Submits the request and returns */
protected SegmentAnalysis submitMetadataRequest(String address, SegmentMetadataQuery query) throws SerDeException, IOException {
final Lifecycle lifecycle = new Lifecycle();
HttpClient client = HttpClientInit.createClient(HttpClientConfig.builder().withNumConnections(numConnection).withReadTimeout(readTimeout.toStandardDuration()).build(), lifecycle);
InputStream response;
try {
lifecycle.start();
response = DruidStorageHandlerUtils.submitRequest(client, DruidStorageHandlerUtils.createRequest(address, query));
} catch (Exception e) {
throw new SerDeException(StringUtils.stringifyException(e));
} finally {
lifecycle.stop();
}
// Retrieve results
List<SegmentAnalysis> resultsList;
try {
resultsList = DruidStorageHandlerUtils.SMILE_MAPPER.readValue(response, new TypeReference<List<SegmentAnalysis>>() {
});
} catch (Exception e) {
response.close();
throw new SerDeException(StringUtils.stringifyException(e));
}
if (resultsList == null || resultsList.isEmpty()) {
throw new SerDeException("Connected to Druid but could not retrieve datasource information");
}
if (resultsList.size() != 1) {
throw new SerDeException("Information about segments should have been merged");
}
return resultsList.get(0);
}
use of com.metamx.http.client.HttpClient in project druid by druid-io.
the class DirectDruidClientTest method testQueryInterruptionExceptionLogMessage.
@Test
public void testQueryInterruptionExceptionLogMessage() throws JsonProcessingException {
HttpClient httpClient = EasyMock.createMock(HttpClient.class);
SettableFuture<Object> interruptionFuture = SettableFuture.create();
Capture<Request> capturedRequest = EasyMock.newCapture();
String hostName = "localhost:8080";
EasyMock.expect(httpClient.go(EasyMock.capture(capturedRequest), EasyMock.<HttpResponseHandler>anyObject())).andReturn(interruptionFuture).anyTimes();
EasyMock.replay(httpClient);
DataSegment dataSegment = new DataSegment("test", new Interval("2013-01-01/2013-01-02"), new DateTime("2013-01-01").toString(), Maps.<String, Object>newHashMap(), Lists.<String>newArrayList(), Lists.<String>newArrayList(), NoneShardSpec.instance(), 0, 0L);
final ServerSelector serverSelector = new ServerSelector(dataSegment, new HighestPriorityTierSelectorStrategy(new ConnectionCountServerSelectorStrategy()));
DirectDruidClient client1 = new DirectDruidClient(new ReflectionQueryToolChestWarehouse(), QueryRunnerTestHelper.NOOP_QUERYWATCHER, new DefaultObjectMapper(), httpClient, hostName, new NoopServiceEmitter());
QueryableDruidServer queryableDruidServer = new QueryableDruidServer(new DruidServer("test1", hostName, 0, "historical", DruidServer.DEFAULT_TIER, 0), client1);
serverSelector.addServerAndUpdateSegment(queryableDruidServer, dataSegment);
TimeBoundaryQuery query = Druids.newTimeBoundaryQueryBuilder().dataSource("test").build();
HashMap<String, List> context = Maps.newHashMap();
interruptionFuture.set(new ByteArrayInputStream("{\"error\":\"testing1\",\"errorMessage\":\"testing2\"}".getBytes()));
Sequence results = client1.run(query, context);
QueryInterruptedException actualException = null;
try {
Sequences.toList(results, Lists.newArrayList());
} catch (QueryInterruptedException e) {
actualException = e;
}
Assert.assertNotNull(actualException);
Assert.assertEquals("testing1", actualException.getErrorCode());
Assert.assertEquals("testing2", actualException.getMessage());
Assert.assertEquals(hostName, actualException.getHost());
EasyMock.verify(httpClient);
}
use of com.metamx.http.client.HttpClient in project druid by druid-io.
the class DirectDruidClientTest method testRun.
@Test
public void testRun() throws Exception {
HttpClient httpClient = EasyMock.createMock(HttpClient.class);
final URL url = new URL("http://foo/druid/v2/");
SettableFuture<InputStream> futureResult = SettableFuture.create();
Capture<Request> capturedRequest = EasyMock.newCapture();
EasyMock.expect(httpClient.go(EasyMock.capture(capturedRequest), EasyMock.<HttpResponseHandler>anyObject())).andReturn(futureResult).times(1);
SettableFuture futureException = SettableFuture.create();
EasyMock.expect(httpClient.go(EasyMock.capture(capturedRequest), EasyMock.<HttpResponseHandler>anyObject())).andReturn(futureException).times(1);
EasyMock.expect(httpClient.go(EasyMock.capture(capturedRequest), EasyMock.<HttpResponseHandler>anyObject())).andReturn(SettableFuture.create()).atLeastOnce();
EasyMock.replay(httpClient);
final ServerSelector serverSelector = new ServerSelector(new DataSegment("test", new Interval("2013-01-01/2013-01-02"), new DateTime("2013-01-01").toString(), Maps.<String, Object>newHashMap(), Lists.<String>newArrayList(), Lists.<String>newArrayList(), NoneShardSpec.instance(), 0, 0L), new HighestPriorityTierSelectorStrategy(new ConnectionCountServerSelectorStrategy()));
DirectDruidClient client1 = new DirectDruidClient(new ReflectionQueryToolChestWarehouse(), QueryRunnerTestHelper.NOOP_QUERYWATCHER, new DefaultObjectMapper(), httpClient, "foo", new NoopServiceEmitter());
DirectDruidClient client2 = new DirectDruidClient(new ReflectionQueryToolChestWarehouse(), QueryRunnerTestHelper.NOOP_QUERYWATCHER, new DefaultObjectMapper(), httpClient, "foo2", new NoopServiceEmitter());
QueryableDruidServer queryableDruidServer1 = new QueryableDruidServer(new DruidServer("test1", "localhost", 0, "historical", DruidServer.DEFAULT_TIER, 0), client1);
serverSelector.addServerAndUpdateSegment(queryableDruidServer1, serverSelector.getSegment());
QueryableDruidServer queryableDruidServer2 = new QueryableDruidServer(new DruidServer("test1", "localhost", 0, "historical", DruidServer.DEFAULT_TIER, 0), client2);
serverSelector.addServerAndUpdateSegment(queryableDruidServer2, serverSelector.getSegment());
TimeBoundaryQuery query = Druids.newTimeBoundaryQueryBuilder().dataSource("test").build();
HashMap<String, List> context = Maps.newHashMap();
Sequence s1 = client1.run(query, context);
Assert.assertTrue(capturedRequest.hasCaptured());
Assert.assertEquals(url, capturedRequest.getValue().getUrl());
Assert.assertEquals(HttpMethod.POST, capturedRequest.getValue().getMethod());
Assert.assertEquals(1, client1.getNumOpenConnections());
// simulate read timeout
Sequence s2 = client1.run(query, context);
Assert.assertEquals(2, client1.getNumOpenConnections());
futureException.setException(new ReadTimeoutException());
Assert.assertEquals(1, client1.getNumOpenConnections());
// subsequent connections should work
Sequence s3 = client1.run(query, context);
Sequence s4 = client1.run(query, context);
Sequence s5 = client1.run(query, context);
Assert.assertTrue(client1.getNumOpenConnections() == 4);
// produce result for first connection
futureResult.set(new ByteArrayInputStream("[{\"timestamp\":\"2014-01-01T01:02:03Z\", \"result\": 42.0}]".getBytes()));
List<Result> results = Sequences.toList(s1, Lists.<Result>newArrayList());
Assert.assertEquals(1, results.size());
Assert.assertEquals(new DateTime("2014-01-01T01:02:03Z"), results.get(0).getTimestamp());
Assert.assertEquals(3, client1.getNumOpenConnections());
client2.run(query, context);
client2.run(query, context);
Assert.assertTrue(client2.getNumOpenConnections() == 2);
Assert.assertTrue(serverSelector.pick() == queryableDruidServer2);
EasyMock.verify(httpClient);
}
Aggregations