use of java.util.concurrent.atomic.AtomicLong in project druid by druid-io.
the class DirectDruidClient method run.
@Override
public Sequence<T> run(final Query<T> query, final Map<String, Object> context) {
QueryToolChest<T, Query<T>> toolChest = warehouse.getToolChest(query);
boolean isBySegment = BaseQuery.getContextBySegment(query, false);
Pair<JavaType, JavaType> types = typesMap.get(query.getClass());
if (types == null) {
final TypeFactory typeFactory = objectMapper.getTypeFactory();
JavaType baseType = typeFactory.constructType(toolChest.getResultTypeReference());
JavaType bySegmentType = typeFactory.constructParametricType(Result.class, typeFactory.constructParametricType(BySegmentResultValueClass.class, baseType));
types = Pair.of(baseType, bySegmentType);
typesMap.put(query.getClass(), types);
}
final JavaType typeRef;
if (isBySegment) {
typeRef = types.rhs;
} else {
typeRef = types.lhs;
}
final ListenableFuture<InputStream> future;
final String url = String.format("http://%s/druid/v2/", host);
final String cancelUrl = String.format("http://%s/druid/v2/%s", host, query.getId());
try {
log.debug("Querying queryId[%s] url[%s]", query.getId(), url);
final long requestStartTime = System.currentTimeMillis();
final ServiceMetricEvent.Builder builder = toolChest.makeMetricBuilder(query);
builder.setDimension("server", host);
final HttpResponseHandler<InputStream, InputStream> responseHandler = new HttpResponseHandler<InputStream, InputStream>() {
private long responseStartTime;
private final AtomicLong byteCount = new AtomicLong(0);
private final BlockingQueue<InputStream> queue = new LinkedBlockingQueue<>();
private final AtomicBoolean done = new AtomicBoolean(false);
@Override
public ClientResponse<InputStream> handleResponse(HttpResponse response) {
log.debug("Initial response from url[%s] for queryId[%s]", url, query.getId());
responseStartTime = System.currentTimeMillis();
emitter.emit(builder.build("query/node/ttfb", responseStartTime - requestStartTime));
try {
final String responseContext = response.headers().get("X-Druid-Response-Context");
// context may be null in case of error or query timeout
if (responseContext != null) {
context.putAll(objectMapper.<Map<String, Object>>readValue(responseContext, new TypeReference<Map<String, Object>>() {
}));
}
queue.put(new ChannelBufferInputStream(response.getContent()));
} catch (final IOException e) {
log.error(e, "Error parsing response context from url [%s]", url);
return ClientResponse.<InputStream>finished(new InputStream() {
@Override
public int read() throws IOException {
throw e;
}
});
} catch (InterruptedException e) {
log.error(e, "Queue appending interrupted");
Thread.currentThread().interrupt();
throw Throwables.propagate(e);
}
byteCount.addAndGet(response.getContent().readableBytes());
return ClientResponse.<InputStream>finished(new SequenceInputStream(new Enumeration<InputStream>() {
@Override
public boolean hasMoreElements() {
// Then the stream should be spouting good InputStreams.
synchronized (done) {
return !done.get() || !queue.isEmpty();
}
}
@Override
public InputStream nextElement() {
try {
return queue.take();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw Throwables.propagate(e);
}
}
}));
}
@Override
public ClientResponse<InputStream> handleChunk(ClientResponse<InputStream> clientResponse, HttpChunk chunk) {
final ChannelBuffer channelBuffer = chunk.getContent();
final int bytes = channelBuffer.readableBytes();
if (bytes > 0) {
try {
queue.put(new ChannelBufferInputStream(channelBuffer));
} catch (InterruptedException e) {
log.error(e, "Unable to put finalizing input stream into Sequence queue for url [%s]", url);
Thread.currentThread().interrupt();
throw Throwables.propagate(e);
}
byteCount.addAndGet(bytes);
}
return clientResponse;
}
@Override
public ClientResponse<InputStream> done(ClientResponse<InputStream> clientResponse) {
long stopTime = System.currentTimeMillis();
log.debug("Completed queryId[%s] request to url[%s] with %,d bytes returned in %,d millis [%,f b/s].", query.getId(), url, byteCount.get(), stopTime - responseStartTime, byteCount.get() / (0.0001 * (stopTime - responseStartTime)));
emitter.emit(builder.build("query/node/time", stopTime - requestStartTime));
emitter.emit(builder.build("query/node/bytes", byteCount.get()));
synchronized (done) {
try {
// An empty byte array is put at the end to give the SequenceInputStream.close() as something to close out
// after done is set to true, regardless of the rest of the stream's state.
queue.put(ByteSource.empty().openStream());
} catch (InterruptedException e) {
log.error(e, "Unable to put finalizing input stream into Sequence queue for url [%s]", url);
Thread.currentThread().interrupt();
throw Throwables.propagate(e);
} catch (IOException e) {
// This should never happen
throw Throwables.propagate(e);
} finally {
done.set(true);
}
}
return ClientResponse.<InputStream>finished(clientResponse.getObj());
}
@Override
public void exceptionCaught(final ClientResponse<InputStream> clientResponse, final Throwable e) {
// Don't wait for lock in case the lock had something to do with the error
synchronized (done) {
done.set(true);
// Make a best effort to put a zero length buffer into the queue in case something is waiting on the take()
// If nothing is waiting on take(), this will be closed out anyways.
queue.offer(new InputStream() {
@Override
public int read() throws IOException {
throw new IOException(e);
}
});
}
}
};
future = httpClient.go(new Request(HttpMethod.POST, new URL(url)).setContent(objectMapper.writeValueAsBytes(query)).setHeader(HttpHeaders.Names.CONTENT_TYPE, isSmile ? SmileMediaTypes.APPLICATION_JACKSON_SMILE : MediaType.APPLICATION_JSON), responseHandler);
queryWatcher.registerQuery(query, future);
openConnections.getAndIncrement();
Futures.addCallback(future, new FutureCallback<InputStream>() {
@Override
public void onSuccess(InputStream result) {
openConnections.getAndDecrement();
}
@Override
public void onFailure(Throwable t) {
openConnections.getAndDecrement();
if (future.isCancelled()) {
// forward the cancellation to underlying queriable node
try {
StatusResponseHolder res = httpClient.go(new Request(HttpMethod.DELETE, new URL(cancelUrl)).setContent(objectMapper.writeValueAsBytes(query)).setHeader(HttpHeaders.Names.CONTENT_TYPE, isSmile ? SmileMediaTypes.APPLICATION_JACKSON_SMILE : MediaType.APPLICATION_JSON), new StatusResponseHandler(Charsets.UTF_8)).get();
if (res.getStatus().getCode() >= 500) {
throw new RE("Error cancelling query[%s]: queriable node returned status[%d] [%s].", res.getStatus().getCode(), res.getStatus().getReasonPhrase());
}
} catch (IOException | ExecutionException | InterruptedException e) {
Throwables.propagate(e);
}
}
}
});
} catch (IOException e) {
throw Throwables.propagate(e);
}
Sequence<T> retVal = new BaseSequence<>(new BaseSequence.IteratorMaker<T, JsonParserIterator<T>>() {
@Override
public JsonParserIterator<T> make() {
return new JsonParserIterator<T>(typeRef, future, url);
}
@Override
public void cleanup(JsonParserIterator<T> iterFromMake) {
CloseQuietly.close(iterFromMake);
}
});
// avoid the cost of de-serializing and then re-serializing again when adding to cache
if (!isBySegment) {
retVal = Sequences.map(retVal, toolChest.makePreComputeManipulatorFn(query, MetricManipulatorFns.deserializing()));
}
return retVal;
}
use of java.util.concurrent.atomic.AtomicLong in project druid by druid-io.
the class ServerManager method getQueryRunnerForSegments.
@Override
public <T> QueryRunner<T> getQueryRunnerForSegments(Query<T> query, Iterable<SegmentDescriptor> specs) {
final QueryRunnerFactory<T, Query<T>> factory = conglomerate.findFactory(query);
if (factory == null) {
log.makeAlert("Unknown query type, [%s]", query.getClass()).addData("dataSource", query.getDataSource()).emit();
return new NoopQueryRunner<T>();
}
final QueryToolChest<T, Query<T>> toolChest = factory.getToolchest();
String dataSourceName = getDataSourceName(query.getDataSource());
final VersionedIntervalTimeline<String, ReferenceCountingSegment> timeline = dataSources.get(dataSourceName);
if (timeline == null) {
return new NoopQueryRunner<T>();
}
final Function<Query<T>, ServiceMetricEvent.Builder> builderFn = getBuilderFn(toolChest);
final AtomicLong cpuTimeAccumulator = new AtomicLong(0L);
FunctionalIterable<QueryRunner<T>> queryRunners = FunctionalIterable.create(specs).transformCat(new Function<SegmentDescriptor, Iterable<QueryRunner<T>>>() {
@Override
@SuppressWarnings("unchecked")
public Iterable<QueryRunner<T>> apply(SegmentDescriptor input) {
final PartitionHolder<ReferenceCountingSegment> entry = timeline.findEntry(input.getInterval(), input.getVersion());
if (entry == null) {
return Arrays.<QueryRunner<T>>asList(new ReportTimelineMissingSegmentQueryRunner<T>(input));
}
final PartitionChunk<ReferenceCountingSegment> chunk = entry.getChunk(input.getPartitionNumber());
if (chunk == null) {
return Arrays.<QueryRunner<T>>asList(new ReportTimelineMissingSegmentQueryRunner<T>(input));
}
final ReferenceCountingSegment adapter = chunk.getObject();
return Arrays.asList(buildAndDecorateQueryRunner(factory, toolChest, adapter, input, builderFn, cpuTimeAccumulator));
}
});
return CPUTimeMetricQueryRunner.safeBuild(new FinalizeResultsQueryRunner<>(toolChest.mergeResults(factory.mergeRunners(exec, queryRunners)), toolChest), builderFn, emitter, cpuTimeAccumulator, true);
}
use of java.util.concurrent.atomic.AtomicLong in project druid by druid-io.
the class CacheDistributionTest method testDistribution.
// run to get a sense of cache key distribution for different ketama reps / hash functions
@Test
public void testDistribution() throws Exception {
KetamaNodeLocator locator = new KetamaNodeLocator(ImmutableList.of(dummyNode("druid-cache.0001", 11211), dummyNode("druid-cache.0002", 11211), dummyNode("druid-cache.0003", 11211), dummyNode("druid-cache.0004", 11211), dummyNode("druid-cache.0005", 11211)), hash, new DefaultKetamaNodeLocatorConfiguration() {
@Override
public int getNodeRepetitions() {
return reps;
}
});
Map<MemcachedNode, AtomicLong> counter = Maps.newHashMap();
long t = 0;
for (int i = 0; i < KEY_COUNT; ++i) {
final String k = DigestUtils.sha1Hex("abc" + i) + ":" + DigestUtils.sha1Hex("xyz" + i);
long t0 = System.nanoTime();
MemcachedNode node = locator.getPrimary(k);
t += System.nanoTime() - t0;
if (counter.containsKey(node)) {
counter.get(node).incrementAndGet();
} else {
counter.put(node, new AtomicLong(1));
}
}
long min = Long.MAX_VALUE;
long max = 0;
System.out.printf("%25s\t%5d\t", hash, reps);
for (AtomicLong count : counter.values()) {
System.out.printf("%10d\t", count.get());
min = Math.min(min, count.get());
max = Math.max(max, count.get());
}
System.out.printf("%7.2f\t%5.0f\n", (double) min / (double) max, (double) t / KEY_COUNT);
}
use of java.util.concurrent.atomic.AtomicLong in project druid by druid-io.
the class JettyQosTest method testQoS.
@Test(timeout = 60_000L)
public void testQoS() throws Exception {
final int fastThreads = 20;
final int slowThreads = 15;
final int slowRequestsPerThread = 5;
final int fastRequestsPerThread = 200;
final HttpClient fastClient = new ClientHolder(fastThreads).getClient();
final HttpClient slowClient = new ClientHolder(slowThreads).getClient();
final ExecutorService fastPool = Execs.multiThreaded(fastThreads, "fast-%d");
final ExecutorService slowPool = Execs.multiThreaded(slowThreads, "slow-%d");
final CountDownLatch latch = new CountDownLatch(fastThreads * fastRequestsPerThread);
final AtomicLong fastCount = new AtomicLong();
final AtomicLong slowCount = new AtomicLong();
final AtomicLong fastElapsed = new AtomicLong();
final AtomicLong slowElapsed = new AtomicLong();
for (int i = 0; i < slowThreads; i++) {
slowPool.submit(new Runnable() {
@Override
public void run() {
for (int i = 0; i < slowRequestsPerThread; i++) {
long startTime = System.currentTimeMillis();
try {
ListenableFuture<StatusResponseHolder> go = slowClient.go(new Request(HttpMethod.GET, new URL("http://localhost:" + port + "/slow/hello")), new StatusResponseHandler(Charset.defaultCharset()));
go.get();
slowCount.incrementAndGet();
slowElapsed.addAndGet(System.currentTimeMillis() - startTime);
} catch (InterruptedException e) {
// BE COOL
} catch (Exception e) {
e.printStackTrace();
throw Throwables.propagate(e);
}
}
}
});
}
// wait for jetty server pool to completely fill up
while (server.getThreadPool().getIdleThreads() != 0) {
Thread.sleep(25);
}
for (int i = 0; i < fastThreads; i++) {
fastPool.submit(new Runnable() {
@Override
public void run() {
for (int i = 0; i < fastRequestsPerThread; i++) {
long startTime = System.currentTimeMillis();
try {
ListenableFuture<StatusResponseHolder> go = fastClient.go(new Request(HttpMethod.GET, new URL("http://localhost:" + port + "/default")), new StatusResponseHandler(Charset.defaultCharset()));
go.get();
fastCount.incrementAndGet();
fastElapsed.addAndGet(System.currentTimeMillis() - startTime);
latch.countDown();
} catch (InterruptedException e) {
// BE COOL
} catch (Exception e) {
e.printStackTrace();
throw Throwables.propagate(e);
}
}
}
});
}
// Wait for all fast requests to be served
latch.await();
slowPool.shutdownNow();
fastPool.shutdown();
// check that fast requests finished quickly
Assert.assertTrue(fastElapsed.get() / fastCount.get() < 500);
}
use of java.util.concurrent.atomic.AtomicLong in project druid by druid-io.
the class JettyTest method testTimeouts.
@Test
// this test will deadlock if it hits an issue, so ignored by default
@Ignore
public void testTimeouts() throws Exception {
// test for request timeouts properly not locking up all threads
final Executor executor = Executors.newFixedThreadPool(100);
final AtomicLong count = new AtomicLong(0);
final CountDownLatch latch = new CountDownLatch(1000);
for (int i = 0; i < 10000; i++) {
executor.execute(new Runnable() {
@Override
public void run() {
executor.execute(new Runnable() {
@Override
public void run() {
long startTime = System.currentTimeMillis();
long startTime2 = 0;
try {
ListenableFuture<StatusResponseHolder> go = client.go(new Request(HttpMethod.GET, new URL("http://localhost:" + port + "/slow/hello")), new StatusResponseHandler(Charset.defaultCharset()));
startTime2 = System.currentTimeMillis();
go.get();
} catch (Exception e) {
e.printStackTrace();
} finally {
System.out.println("Response time client" + (System.currentTimeMillis() - startTime) + "time taken for getting future" + (System.currentTimeMillis() - startTime2) + "Counter " + count.incrementAndGet());
latch.countDown();
}
}
});
}
});
}
latch.await();
}
Aggregations