use of com.metamx.http.client.HttpClient in project druid by druid-io.
the class RemoteTaskActionClientTest method setUp.
@Before
public void setUp() {
httpClient = createMock(HttpClient.class);
selector = createMock(ServerDiscoverySelector.class);
server = new Server() {
@Override
public String getScheme() {
return "http";
}
@Override
public int getPort() {
return 8080;
}
@Override
public String getHost() {
return "localhost";
}
@Override
public String getAddress() {
return "localhost";
}
};
long now = System.currentTimeMillis();
result = Arrays.asList(new TaskLock("groupId", "dataSource", new Interval(now - 30 * 1000, now), "version"));
}
use of com.metamx.http.client.HttpClient in project druid by druid-io.
the class RemoteTaskRunnerFactoryTest method testExecNotSharedBetweenRunners.
@Test
public void testExecNotSharedBetweenRunners() {
final AtomicInteger executorCount = new AtomicInteger(0);
RemoteTaskRunnerConfig config = new RemoteTaskRunnerConfig();
IndexerZkConfig indexerZkConfig = new IndexerZkConfig(new ZkPathsConfig() {
@Override
public String getBase() {
return basePath;
}
}, null, null, null, null, null);
HttpClient httpClient = EasyMock.createMock(HttpClient.class);
Supplier<WorkerBehaviorConfig> workerBehaviorConfig = EasyMock.createMock(Supplier.class);
ScheduledExecutorFactory executorFactory = new ScheduledExecutorFactory() {
@Override
public ScheduledExecutorService create(int i, String s) {
executorCount.incrementAndGet();
return ScheduledExecutors.fixed(i, s);
}
};
SimpleWorkerResourceManagementConfig resourceManagementConfig = new SimpleWorkerResourceManagementConfig();
ResourceManagementSchedulerConfig resourceManagementSchedulerConfig = new ResourceManagementSchedulerConfig() {
@Override
public boolean isDoAutoscale() {
return true;
}
};
RemoteTaskRunnerFactory factory = new RemoteTaskRunnerFactory(cf, config, indexerZkConfig, jsonMapper, httpClient, workerBehaviorConfig, executorFactory, resourceManagementSchedulerConfig, new SimpleWorkerResourceManagementStrategy(resourceManagementConfig, workerBehaviorConfig, resourceManagementSchedulerConfig, executorFactory));
Assert.assertEquals(1, executorCount.get());
RemoteTaskRunner remoteTaskRunner1 = factory.build();
Assert.assertEquals(2, executorCount.get());
RemoteTaskRunner remoteTaskRunner2 = factory.build();
Assert.assertEquals(3, executorCount.get());
}
use of com.metamx.http.client.HttpClient in project druid by druid-io.
the class DirectDruidClientTest method testCancel.
@Test
public void testCancel() throws Exception {
HttpClient httpClient = EasyMock.createStrictMock(HttpClient.class);
Capture<Request> capturedRequest = EasyMock.newCapture();
ListenableFuture<Object> cancelledFuture = Futures.immediateCancelledFuture();
SettableFuture<Object> cancellationFuture = SettableFuture.create();
EasyMock.expect(httpClient.go(EasyMock.capture(capturedRequest), EasyMock.<HttpResponseHandler>anyObject())).andReturn(cancelledFuture).once();
EasyMock.expect(httpClient.go(EasyMock.capture(capturedRequest), EasyMock.<HttpResponseHandler>anyObject())).andReturn(cancellationFuture).once();
EasyMock.replay(httpClient);
final ServerSelector serverSelector = new ServerSelector(new DataSegment("test", new Interval("2013-01-01/2013-01-02"), new DateTime("2013-01-01").toString(), Maps.<String, Object>newHashMap(), Lists.<String>newArrayList(), Lists.<String>newArrayList(), NoneShardSpec.instance(), 0, 0L), new HighestPriorityTierSelectorStrategy(new ConnectionCountServerSelectorStrategy()));
DirectDruidClient client1 = new DirectDruidClient(new ReflectionQueryToolChestWarehouse(), QueryRunnerTestHelper.NOOP_QUERYWATCHER, new DefaultObjectMapper(), httpClient, "foo", new NoopServiceEmitter());
QueryableDruidServer queryableDruidServer1 = new QueryableDruidServer(new DruidServer("test1", "localhost", 0, "historical", DruidServer.DEFAULT_TIER, 0), client1);
serverSelector.addServerAndUpdateSegment(queryableDruidServer1, serverSelector.getSegment());
TimeBoundaryQuery query = Druids.newTimeBoundaryQueryBuilder().dataSource("test").build();
HashMap<String, List> context = Maps.newHashMap();
cancellationFuture.set(new StatusResponseHolder(HttpResponseStatus.OK, new StringBuilder("cancelled")));
Sequence results = client1.run(query, context);
Assert.assertEquals(HttpMethod.DELETE, capturedRequest.getValue().getMethod());
Assert.assertEquals(0, client1.getNumOpenConnections());
QueryInterruptedException exception = null;
try {
Sequences.toList(results, Lists.newArrayList());
} catch (QueryInterruptedException e) {
exception = e;
}
Assert.assertNotNull(exception);
EasyMock.verify(httpClient);
}
use of com.metamx.http.client.HttpClient in project druid by druid-io.
the class JettyQosTest method testQoS.
@Test(timeout = 60_000L)
public void testQoS() throws Exception {
final int fastThreads = 20;
final int slowThreads = 15;
final int slowRequestsPerThread = 5;
final int fastRequestsPerThread = 200;
final HttpClient fastClient = new ClientHolder(fastThreads).getClient();
final HttpClient slowClient = new ClientHolder(slowThreads).getClient();
final ExecutorService fastPool = Execs.multiThreaded(fastThreads, "fast-%d");
final ExecutorService slowPool = Execs.multiThreaded(slowThreads, "slow-%d");
final CountDownLatch latch = new CountDownLatch(fastThreads * fastRequestsPerThread);
final AtomicLong fastCount = new AtomicLong();
final AtomicLong slowCount = new AtomicLong();
final AtomicLong fastElapsed = new AtomicLong();
final AtomicLong slowElapsed = new AtomicLong();
for (int i = 0; i < slowThreads; i++) {
slowPool.submit(new Runnable() {
@Override
public void run() {
for (int i = 0; i < slowRequestsPerThread; i++) {
long startTime = System.currentTimeMillis();
try {
ListenableFuture<StatusResponseHolder> go = slowClient.go(new Request(HttpMethod.GET, new URL("http://localhost:" + port + "/slow/hello")), new StatusResponseHandler(Charset.defaultCharset()));
go.get();
slowCount.incrementAndGet();
slowElapsed.addAndGet(System.currentTimeMillis() - startTime);
} catch (InterruptedException e) {
// BE COOL
} catch (Exception e) {
e.printStackTrace();
throw Throwables.propagate(e);
}
}
}
});
}
// wait for jetty server pool to completely fill up
while (server.getThreadPool().getIdleThreads() != 0) {
Thread.sleep(25);
}
for (int i = 0; i < fastThreads; i++) {
fastPool.submit(new Runnable() {
@Override
public void run() {
for (int i = 0; i < fastRequestsPerThread; i++) {
long startTime = System.currentTimeMillis();
try {
ListenableFuture<StatusResponseHolder> go = fastClient.go(new Request(HttpMethod.GET, new URL("http://localhost:" + port + "/default")), new StatusResponseHandler(Charset.defaultCharset()));
go.get();
fastCount.incrementAndGet();
fastElapsed.addAndGet(System.currentTimeMillis() - startTime);
latch.countDown();
} catch (InterruptedException e) {
// BE COOL
} catch (Exception e) {
e.printStackTrace();
throw Throwables.propagate(e);
}
}
}
});
}
// Wait for all fast requests to be served
latch.await();
slowPool.shutdownNow();
fastPool.shutdown();
// check that fast requests finished quickly
Assert.assertTrue(fastElapsed.get() / fastCount.get() < 500);
}
use of com.metamx.http.client.HttpClient in project hive by apache.
the class DruidQueryBasedInputFormat method splitSelectQuery.
/* Method that splits Select query depending on the threshold so read can be
* parallelized. We will only contact the Druid broker to obtain all results. */
private static HiveDruidSplit[] splitSelectQuery(Configuration conf, String address, SelectQuery query, Path dummyPath) throws IOException {
final int selectThreshold = (int) HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_DRUID_SELECT_THRESHOLD);
final int numConnection = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_DRUID_NUM_HTTP_CONNECTION);
final Period readTimeout = new Period(HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_DRUID_HTTP_READ_TIMEOUT));
final boolean isFetch = query.getContextBoolean(Constants.DRUID_QUERY_FETCH, false);
if (isFetch) {
// If it has a limit, we use it and we do not split the query
return new HiveDruidSplit[] { new HiveDruidSplit(DruidStorageHandlerUtils.JSON_MAPPER.writeValueAsString(query), dummyPath, new String[] { address }) };
}
// We do not have the number of rows, thus we need to execute a
// Segment Metadata query to obtain number of rows
SegmentMetadataQueryBuilder metadataBuilder = new Druids.SegmentMetadataQueryBuilder();
metadataBuilder.dataSource(query.getDataSource());
metadataBuilder.intervals(query.getIntervals());
metadataBuilder.merge(true);
metadataBuilder.analysisTypes();
SegmentMetadataQuery metadataQuery = metadataBuilder.build();
Lifecycle lifecycle = new Lifecycle();
HttpClient client = HttpClientInit.createClient(HttpClientConfig.builder().withNumConnections(numConnection).withReadTimeout(readTimeout.toStandardDuration()).build(), lifecycle);
try {
lifecycle.start();
} catch (Exception e) {
LOG.error("Lifecycle start issue");
throw new IOException(org.apache.hadoop.util.StringUtils.stringifyException(e));
}
InputStream response;
try {
response = DruidStorageHandlerUtils.submitRequest(client, DruidStorageHandlerUtils.createRequest(address, metadataQuery));
} catch (Exception e) {
lifecycle.stop();
throw new IOException(org.apache.hadoop.util.StringUtils.stringifyException(e));
}
// Retrieve results
List<SegmentAnalysis> metadataList;
try {
metadataList = DruidStorageHandlerUtils.SMILE_MAPPER.readValue(response, new TypeReference<List<SegmentAnalysis>>() {
});
} catch (Exception e) {
response.close();
throw new IOException(org.apache.hadoop.util.StringUtils.stringifyException(e));
} finally {
lifecycle.stop();
}
if (metadataList == null) {
throw new IOException("Connected to Druid but could not retrieve datasource information");
}
if (metadataList.isEmpty()) {
// There are no rows for that time range, we can submit query as it is
return new HiveDruidSplit[] { new HiveDruidSplit(DruidStorageHandlerUtils.JSON_MAPPER.writeValueAsString(query), dummyPath, new String[] { address }) };
}
if (metadataList.size() != 1) {
throw new IOException("Information about segments should have been merged");
}
final long numRows = metadataList.get(0).getNumRows();
query = query.withPagingSpec(PagingSpec.newSpec(Integer.MAX_VALUE));
if (numRows <= selectThreshold) {
// We are not going to split it
return new HiveDruidSplit[] { new HiveDruidSplit(DruidStorageHandlerUtils.JSON_MAPPER.writeValueAsString(query), dummyPath, new String[] { address }) };
}
// If the query does not specify a timestamp, we obtain the total time using
// a Time Boundary query. Then, we use the information to split the query
// following the Select threshold configuration property
final List<Interval> intervals = new ArrayList<>();
if (query.getIntervals().size() == 1 && query.getIntervals().get(0).withChronology(ISOChronology.getInstanceUTC()).equals(DruidTable.DEFAULT_INTERVAL)) {
// Default max and min, we should execute a time boundary query to get a
// more precise range
TimeBoundaryQueryBuilder timeBuilder = new Druids.TimeBoundaryQueryBuilder();
timeBuilder.dataSource(query.getDataSource());
TimeBoundaryQuery timeQuery = timeBuilder.build();
lifecycle = new Lifecycle();
client = HttpClientInit.createClient(HttpClientConfig.builder().withNumConnections(numConnection).withReadTimeout(readTimeout.toStandardDuration()).build(), lifecycle);
try {
lifecycle.start();
} catch (Exception e) {
LOG.error("Lifecycle start issue");
throw new IOException(org.apache.hadoop.util.StringUtils.stringifyException(e));
}
try {
response = DruidStorageHandlerUtils.submitRequest(client, DruidStorageHandlerUtils.createRequest(address, timeQuery));
} catch (Exception e) {
lifecycle.stop();
throw new IOException(org.apache.hadoop.util.StringUtils.stringifyException(e));
}
// Retrieve results
List<Result<TimeBoundaryResultValue>> timeList;
try {
timeList = DruidStorageHandlerUtils.SMILE_MAPPER.readValue(response, new TypeReference<List<Result<TimeBoundaryResultValue>>>() {
});
} catch (Exception e) {
response.close();
throw new IOException(org.apache.hadoop.util.StringUtils.stringifyException(e));
} finally {
lifecycle.stop();
}
if (timeList == null || timeList.isEmpty()) {
throw new IOException("Connected to Druid but could not retrieve time boundary information");
}
if (timeList.size() != 1) {
throw new IOException("We should obtain a single time boundary");
}
intervals.add(new Interval(timeList.get(0).getValue().getMinTime().getMillis(), timeList.get(0).getValue().getMaxTime().getMillis(), ISOChronology.getInstanceUTC()));
} else {
intervals.addAll(query.getIntervals());
}
// Create (numRows/default threshold) input splits
int numSplits = (int) Math.ceil((double) numRows / selectThreshold);
List<List<Interval>> newIntervals = createSplitsIntervals(intervals, numSplits);
HiveDruidSplit[] splits = new HiveDruidSplit[numSplits];
for (int i = 0; i < numSplits; i++) {
// Create partial Select query
final SelectQuery partialQuery = query.withQuerySegmentSpec(new MultipleIntervalSegmentSpec(newIntervals.get(i)));
splits[i] = new HiveDruidSplit(DruidStorageHandlerUtils.JSON_MAPPER.writeValueAsString(partialQuery), dummyPath, new String[] { address });
}
return splits;
}
Aggregations