use of org.apache.druid.segment.ReferenceCountingSegment in project druid by druid-io.
the class ServerManagerTest method testReferenceCounting.
@Test
public void testReferenceCounting() throws Exception {
loadQueryable("test", "3", Intervals.of("2011-04-04/2011-04-05"));
Future future = assertQueryable(Granularities.DAY, "test", Intervals.of("2011-04-04/2011-04-06"), ImmutableList.of(new Pair<String, Interval>("3", Intervals.of("2011-04-04/2011-04-05"))));
queryNotifyLatch.await(1000, TimeUnit.MILLISECONDS);
Assert.assertEquals(1, factory.getSegmentReferences().size());
for (ReferenceCountingSegment referenceCountingSegment : factory.getSegmentReferences()) {
Assert.assertEquals(1, referenceCountingSegment.getNumReferences());
}
queryWaitYieldLatch.countDown();
Assert.assertTrue(factory.getAdapters().size() == 1);
for (SegmentForTesting segmentForTesting : factory.getAdapters()) {
Assert.assertFalse(segmentForTesting.isClosed());
}
queryWaitLatch.countDown();
future.get();
dropQueryable("test", "3", Intervals.of("2011-04-04/2011-04-05"));
for (SegmentForTesting segmentForTesting : factory.getAdapters()) {
Assert.assertTrue(segmentForTesting.isClosed());
}
}
use of org.apache.druid.segment.ReferenceCountingSegment in project druid by druid-io.
the class ServerManagerTest method testReferenceCountingWhileQueryExecuting.
@Test
public void testReferenceCountingWhileQueryExecuting() throws Exception {
loadQueryable("test", "3", Intervals.of("2011-04-04/2011-04-05"));
Future future = assertQueryable(Granularities.DAY, "test", Intervals.of("2011-04-04/2011-04-06"), ImmutableList.of(new Pair<String, Interval>("3", Intervals.of("2011-04-04/2011-04-05"))));
queryNotifyLatch.await(1000, TimeUnit.MILLISECONDS);
Assert.assertEquals(1, factory.getSegmentReferences().size());
for (ReferenceCountingSegment referenceCountingSegment : factory.getSegmentReferences()) {
Assert.assertEquals(1, referenceCountingSegment.getNumReferences());
}
queryWaitYieldLatch.countDown();
Assert.assertEquals(1, factory.getAdapters().size());
for (SegmentForTesting segmentForTesting : factory.getAdapters()) {
Assert.assertFalse(segmentForTesting.isClosed());
}
dropQueryable("test", "3", Intervals.of("2011-04-04/2011-04-05"));
for (SegmentForTesting segmentForTesting : factory.getAdapters()) {
Assert.assertFalse(segmentForTesting.isClosed());
}
queryWaitLatch.countDown();
future.get();
for (SegmentForTesting segmentForTesting : factory.getAdapters()) {
Assert.assertTrue(segmentForTesting.isClosed());
}
}
use of org.apache.druid.segment.ReferenceCountingSegment in project druid by druid-io.
the class ServerManager method getQueryRunnerForSegments.
@Override
public <T> QueryRunner<T> getQueryRunnerForSegments(Query<T> query, Iterable<SegmentDescriptor> specs) {
final QueryRunnerFactory<T, Query<T>> factory = conglomerate.findFactory(query);
if (factory == null) {
final QueryUnsupportedException e = new QueryUnsupportedException(StringUtils.format("Unknown query type, [%s]", query.getClass()));
log.makeAlert(e, "Error while executing a query[%s]", query.getId()).addData("dataSource", query.getDataSource()).emit();
throw e;
}
final QueryToolChest<T, Query<T>> toolChest = factory.getToolchest();
final DataSourceAnalysis analysis = DataSourceAnalysis.forDataSource(query.getDataSource());
final AtomicLong cpuTimeAccumulator = new AtomicLong(0L);
final VersionedIntervalTimeline<String, ReferenceCountingSegment> timeline;
final Optional<VersionedIntervalTimeline<String, ReferenceCountingSegment>> maybeTimeline = segmentManager.getTimeline(analysis);
// Make sure this query type can handle the subquery, if present.
if (analysis.isQuery() && !toolChest.canPerformSubquery(((QueryDataSource) analysis.getDataSource()).getQuery())) {
throw new ISE("Cannot handle subquery: %s", analysis.getDataSource());
}
if (maybeTimeline.isPresent()) {
timeline = maybeTimeline.get();
} else {
return new ReportTimelineMissingSegmentQueryRunner<>(Lists.newArrayList(specs));
}
// segmentMapFn maps each base Segment into a joined Segment if necessary.
final Function<SegmentReference, SegmentReference> segmentMapFn = joinableFactoryWrapper.createSegmentMapFn(analysis.getJoinBaseTableFilter().map(Filters::toFilter).orElse(null), analysis.getPreJoinableClauses(), cpuTimeAccumulator, analysis.getBaseQuery().orElse(query));
// We compute the join cache key here itself so it doesn't need to be re-computed for every segment
final Optional<byte[]> cacheKeyPrefix = analysis.isJoin() ? joinableFactoryWrapper.computeJoinDataSourceCacheKey(analysis) : Optional.of(StringUtils.EMPTY_BYTES);
final FunctionalIterable<QueryRunner<T>> queryRunners = FunctionalIterable.create(specs).transformCat(descriptor -> Collections.singletonList(buildQueryRunnerForSegment(query, descriptor, factory, toolChest, timeline, segmentMapFn, cpuTimeAccumulator, cacheKeyPrefix)));
return CPUTimeMetricQueryRunner.safeBuild(new FinalizeResultsQueryRunner<>(toolChest.mergeResults(factory.mergeRunners(queryProcessingPool, queryRunners)), toolChest), toolChest, emitter, cpuTimeAccumulator, true);
}
use of org.apache.druid.segment.ReferenceCountingSegment in project druid by druid-io.
the class ServerManager method getQueryRunnerForIntervals.
@Override
public <T> QueryRunner<T> getQueryRunnerForIntervals(Query<T> query, Iterable<Interval> intervals) {
final DataSourceAnalysis analysis = DataSourceAnalysis.forDataSource(query.getDataSource());
final VersionedIntervalTimeline<String, ReferenceCountingSegment> timeline;
final Optional<VersionedIntervalTimeline<String, ReferenceCountingSegment>> maybeTimeline = segmentManager.getTimeline(analysis);
if (maybeTimeline.isPresent()) {
timeline = maybeTimeline.get();
} else {
// we must find.
return new NoopQueryRunner<>();
}
FunctionalIterable<SegmentDescriptor> segmentDescriptors = FunctionalIterable.create(intervals).transformCat(timeline::lookup).transformCat(holder -> {
if (holder == null) {
return null;
}
return FunctionalIterable.create(holder.getObject()).transform(partitionChunk -> new SegmentDescriptor(holder.getInterval(), holder.getVersion(), partitionChunk.getChunkNumber()));
});
return getQueryRunnerForSegments(query, segmentDescriptors);
}
use of org.apache.druid.segment.ReferenceCountingSegment in project druid by druid-io.
the class SegmentManager method loadSegment.
/**
* Load a single segment.
*
* @param segment segment to load
* @param lazy whether to lazy load columns metadata
* @param loadFailed callBack to execute when segment lazy load failed
*
* @return true if the segment was newly loaded, false if it was already loaded
*
* @throws SegmentLoadingException if the segment cannot be loaded
*/
public boolean loadSegment(final DataSegment segment, boolean lazy, SegmentLazyLoadFailCallback loadFailed) throws SegmentLoadingException {
final ReferenceCountingSegment adapter = getSegmentReference(segment, lazy, loadFailed);
final SettableSupplier<Boolean> resultSupplier = new SettableSupplier<>();
// compute() is used to ensure that the operation for a data source is executed atomically
dataSources.compute(segment.getDataSource(), (k, v) -> {
final DataSourceState dataSourceState = v == null ? new DataSourceState() : v;
final VersionedIntervalTimeline<String, ReferenceCountingSegment> loadedIntervals = dataSourceState.getTimeline();
final PartitionChunk<ReferenceCountingSegment> entry = loadedIntervals.findChunk(segment.getInterval(), segment.getVersion(), segment.getShardSpec().getPartitionNum());
if (entry != null) {
log.warn("Told to load an adapter for segment[%s] that already exists", segment.getId());
resultSupplier.set(false);
} else {
IndexedTable table = adapter.as(IndexedTable.class);
if (table != null) {
if (dataSourceState.isEmpty() || dataSourceState.numSegments == dataSourceState.tablesLookup.size()) {
dataSourceState.tablesLookup.put(segment.getId(), new ReferenceCountingIndexedTable(table));
} else {
log.error("Cannot load segment[%s] with IndexedTable, no existing segments are joinable", segment.getId());
}
} else if (dataSourceState.tablesLookup.size() > 0) {
log.error("Cannot load segment[%s] without IndexedTable, all existing segments are joinable", segment.getId());
}
loadedIntervals.add(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(adapter));
dataSourceState.addSegment(segment);
resultSupplier.set(true);
}
return dataSourceState;
});
return resultSupplier.get();
}
Aggregations