use of org.apache.druid.java.util.common.guava.SequenceWrapper in project druid by druid-io.
the class ForegroundCachePopulator method wrap.
@Override
public <T, CacheType> Sequence<T> wrap(final Sequence<T> sequence, final Function<T, CacheType> cacheFn, final Cache cache, final Cache.NamedKey cacheKey) {
final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
final MutableBoolean tooBig = new MutableBoolean(false);
final JsonGenerator jsonGenerator;
try {
jsonGenerator = objectMapper.getFactory().createGenerator(bytes);
} catch (IOException e) {
throw new RuntimeException(e);
}
return Sequences.wrap(Sequences.map(sequence, input -> {
if (!tooBig.isTrue()) {
try {
jsonGenerator.writeObject(cacheFn.apply(input));
// typically just a few KB, and we don't want to waste cycles flushing.
if (maxEntrySize > 0 && bytes.size() > maxEntrySize) {
tooBig.setValue(true);
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
return input;
}), new SequenceWrapper() {
@Override
public void after(final boolean isDone, final Throwable thrown) throws Exception {
jsonGenerator.close();
if (isDone) {
// Check tooBig, then check maxEntrySize one more time, after closing/flushing jsonGenerator.
if (tooBig.isTrue() || (maxEntrySize > 0 && bytes.size() > maxEntrySize)) {
cachePopulatorStats.incrementOversized();
return;
}
try {
cache.put(cacheKey, bytes.toByteArray());
cachePopulatorStats.incrementOk();
} catch (Exception e) {
log.warn(e, "Unable to write to cache");
cachePopulatorStats.incrementError();
}
}
}
});
}
use of org.apache.druid.java.util.common.guava.SequenceWrapper in project druid by druid-io.
the class MetricsEmittingQueryRunner method run.
@Override
public Sequence<T> run(final QueryPlus<T> queryPlus, final ResponseContext responseContext) {
QueryPlus<T> queryWithMetrics = queryPlus.withQueryMetrics(queryToolChest);
final QueryMetrics<?> queryMetrics = queryWithMetrics.getQueryMetrics();
applyCustomDimensions.accept(queryMetrics);
return Sequences.wrap(// `startTime = System.nanoTime();` (see below).
new LazySequence<>(() -> queryRunner.run(queryWithMetrics, responseContext)), new SequenceWrapper() {
private long startTimeNs;
@Override
public void before() {
startTimeNs = System.nanoTime();
}
@Override
public void after(boolean isDone, Throwable thrown) {
if (thrown != null) {
queryMetrics.status("failed");
} else if (!isDone) {
queryMetrics.status("short");
}
long timeTakenNs = System.nanoTime() - startTimeNs;
reportMetric.accept(queryMetrics, timeTakenNs);
if (creationTimeNs > 0) {
queryMetrics.reportWaitTime(startTimeNs - creationTimeNs);
}
try {
queryMetrics.emit(emitter);
} catch (Exception e) {
// Query should not fail, because of emitter failure. Swallowing the exception.
log.error("Failure while trying to emit [%s] with stacktrace [%s]", emitter.toString(), e);
}
}
});
}
use of org.apache.druid.java.util.common.guava.SequenceWrapper in project druid by druid-io.
the class SpecificSegmentQueryRunner method run.
@Override
public Sequence<T> run(final QueryPlus<T> input, final ResponseContext responseContext) {
final QueryPlus<T> queryPlus = input.withQuery(Queries.withSpecificSegments(input.getQuery(), Collections.singletonList(specificSpec.getDescriptor())));
final Query<T> query = queryPlus.getQuery();
final Thread currThread = Thread.currentThread();
final String currThreadName = currThread.getName();
final String newName = query.getType() + "_" + query.getDataSource() + "_" + query.getIntervals();
final Sequence<T> baseSequence = doNamed(currThread, currThreadName, newName, () -> base.run(queryPlus, responseContext));
Sequence<T> segmentMissingCatchingSequence = new Sequence<T>() {
@Override
public <OutType> OutType accumulate(final OutType initValue, final Accumulator<OutType, T> accumulator) {
try {
return baseSequence.accumulate(initValue, accumulator);
} catch (SegmentMissingException e) {
appendMissingSegment(responseContext);
return initValue;
}
}
@Override
public <OutType> Yielder<OutType> toYielder(final OutType initValue, final YieldingAccumulator<OutType, T> accumulator) {
try {
return makeYielder(baseSequence.toYielder(initValue, accumulator));
} catch (SegmentMissingException e) {
appendMissingSegment(responseContext);
return Yielders.done(initValue, null);
}
}
private <OutType> Yielder<OutType> makeYielder(final Yielder<OutType> yielder) {
return new Yielder<OutType>() {
@Override
public OutType get() {
return yielder.get();
}
@Override
public Yielder<OutType> next(final OutType initValue) {
try {
return yielder.next(initValue);
} catch (SegmentMissingException e) {
appendMissingSegment(responseContext);
return Yielders.done(initValue, null);
}
}
@Override
public boolean isDone() {
return yielder.isDone();
}
@Override
public void close() throws IOException {
yielder.close();
}
};
}
};
return Sequences.wrap(segmentMissingCatchingSequence, new SequenceWrapper() {
@Override
public <RetType> RetType wrap(Supplier<RetType> sequenceProcessing) {
return doNamed(currThread, currThreadName, newName, sequenceProcessing);
}
});
}
use of org.apache.druid.java.util.common.guava.SequenceWrapper in project druid by druid-io.
the class CPUTimeMetricQueryRunner method run.
@Override
public Sequence<T> run(final QueryPlus<T> queryPlus, final ResponseContext responseContext) {
final long startRun = JvmUtils.getCurrentThreadCpuTime();
final QueryPlus<T> queryWithMetrics = queryPlus.withQueryMetrics(queryToolChest);
final Sequence<T> baseSequence = delegate.run(queryWithMetrics, responseContext);
cpuTimeAccumulator.addAndGet(JvmUtils.getCurrentThreadCpuTime() - startRun);
return Sequences.wrap(baseSequence, new SequenceWrapper() {
@Override
public <RetType> RetType wrap(Supplier<RetType> sequenceProcessing) {
final long start = JvmUtils.getCurrentThreadCpuTime();
try {
return sequenceProcessing.get();
} finally {
cpuTimeAccumulator.addAndGet(JvmUtils.getCurrentThreadCpuTime() - start);
}
}
@Override
public void after(boolean isDone, Throwable thrown) {
if (report) {
final long cpuTimeNs = cpuTimeAccumulator.get();
if (cpuTimeNs > 0) {
responseContext.addCpuNanos(cpuTimeNs);
queryWithMetrics.getQueryMetrics().reportCpuTime(cpuTimeNs).emit(emitter);
}
}
}
});
}
use of org.apache.druid.java.util.common.guava.SequenceWrapper in project druid by druid-io.
the class BackgroundCachePopulatorTest method before.
@Before
public void before() {
this.backgroundCachePopulator = new BackgroundCachePopulator(Execs.multiThreaded(2, "CachingQueryRunnerTest-%d"), JSON_MAPPER, new CachePopulatorStats(), -1);
TopNQueryBuilder builder = new TopNQueryBuilder().dataSource("ds").dimension("top_dim").metric("imps").threshold(3).intervals("2011-01-05/2011-01-10").aggregators(AGGS).granularity(Granularities.ALL);
this.query = builder.build();
this.toolchest = new TopNQueryQueryToolChest(new TopNQueryConfig());
List<Result> expectedRes = makeTopNResults(false, OBJECTS);
this.closable = new AssertingClosable();
final Sequence resultSeq = Sequences.wrap(Sequences.simple(expectedRes), new SequenceWrapper() {
@Override
public void before() {
Assert.assertFalse(closable.isClosed());
}
@Override
public void after(boolean isDone, Throwable thrown) {
closable.close();
}
});
this.baseRunner = (queryPlus, responseContext) -> resultSeq;
this.cache = new Cache() {
private final ConcurrentMap<NamedKey, byte[]> baseMap = new ConcurrentHashMap<>();
@Override
public byte[] get(NamedKey key) {
return baseMap.get(key);
}
@Override
public void put(NamedKey key, byte[] value) {
baseMap.put(key, value);
}
@Override
public Map<NamedKey, byte[]> getBulk(Iterable<NamedKey> keys) {
return null;
}
@Override
public void close(String namespace) {
}
@Override
public void close() {
}
@Override
public CacheStats getStats() {
return null;
}
@Override
public boolean isLocal() {
return true;
}
@Override
public void doMonitor(ServiceEmitter emitter) {
}
};
}
Aggregations