use of org.apache.druid.segment.incremental.IncrementalIndex in project druid by druid-io.
the class GroupByMergedQueryRunner method run.
@Override
public Sequence<T> run(final QueryPlus<T> queryPlus, final ResponseContext responseContext) {
final GroupByQuery query = (GroupByQuery) queryPlus.getQuery();
final GroupByQueryConfig querySpecificConfig = configSupplier.get().withOverrides(query);
final boolean isSingleThreaded = querySpecificConfig.isSingleThreaded();
final Pair<IncrementalIndex, Accumulator<IncrementalIndex, T>> indexAccumulatorPair = GroupByQueryHelper.createIndexAccumulatorPair(query, null, querySpecificConfig);
final Pair<Queue, Accumulator<Queue, T>> bySegmentAccumulatorPair = GroupByQueryHelper.createBySegmentAccumulatorPair();
final boolean bySegment = QueryContexts.isBySegment(query);
final int priority = QueryContexts.getPriority(query);
final QueryPlus<T> threadSafeQueryPlus = queryPlus.withoutThreadUnsafeState();
final List<ListenableFuture<Void>> futures = Lists.newArrayList(Iterables.transform(queryables, new Function<QueryRunner<T>, ListenableFuture<Void>>() {
@Override
public ListenableFuture<Void> apply(final QueryRunner<T> input) {
if (input == null) {
throw new ISE("Null queryRunner! Looks to be some segment unmapping action happening");
}
ListenableFuture<Void> future = queryProcessingPool.submitRunnerTask(new AbstractPrioritizedQueryRunnerCallable<Void, T>(priority, input) {
@Override
public Void call() {
try {
if (bySegment) {
input.run(threadSafeQueryPlus, responseContext).accumulate(bySegmentAccumulatorPair.lhs, bySegmentAccumulatorPair.rhs);
} else {
input.run(threadSafeQueryPlus, responseContext).accumulate(indexAccumulatorPair.lhs, indexAccumulatorPair.rhs);
}
return null;
} catch (QueryInterruptedException e) {
throw new RuntimeException(e);
} catch (Exception e) {
log.error(e, "Exception with one of the sequences!");
Throwables.propagateIfPossible(e);
throw new RuntimeException(e);
}
}
});
if (isSingleThreaded) {
waitForFutureCompletion(query, ImmutableList.of(future), indexAccumulatorPair.lhs);
}
return future;
}
}));
if (!isSingleThreaded) {
waitForFutureCompletion(query, futures, indexAccumulatorPair.lhs);
}
if (bySegment) {
return Sequences.simple(bySegmentAccumulatorPair.lhs);
}
return Sequences.withBaggage(Sequences.simple(Iterables.transform(indexAccumulatorPair.lhs.iterableWithPostAggregations(null, query.isDescending()), new Function<Row, T>() {
@Override
public T apply(Row input) {
return (T) input;
}
})), indexAccumulatorPair.lhs);
}
use of org.apache.druid.segment.incremental.IncrementalIndex in project druid by druid-io.
the class EmptyIndexTest method testEmptyIndex.
@Test
public void testEmptyIndex() throws Exception {
File tmpDir = File.createTempFile("emptyIndex", "");
if (!tmpDir.delete()) {
throw new IllegalStateException("tmp delete failed");
}
if (!tmpDir.mkdir()) {
throw new IllegalStateException("tmp mkdir failed");
}
try {
IncrementalIndex emptyIndex = new OnheapIncrementalIndex.Builder().setSimpleTestingIndexSchema().setMaxRowCount(1000).build();
IncrementalIndexAdapter emptyIndexAdapter = new IncrementalIndexAdapter(Intervals.of("2012-08-01/P3D"), emptyIndex, new ConciseBitmapFactory());
TestHelper.getTestIndexMergerV9(segmentWriteOutMediumFactory).merge(Collections.singletonList(emptyIndexAdapter), true, new AggregatorFactory[0], tmpDir, new IndexSpec(), -1);
QueryableIndex emptyQueryableIndex = TestHelper.getTestIndexIO().loadIndex(tmpDir);
Assert.assertEquals("getDimensionNames", 0, Iterables.size(emptyQueryableIndex.getAvailableDimensions()));
Assert.assertEquals("getMetricNames", 0, emptyQueryableIndex.getColumnNames().size());
Assert.assertEquals("getDataInterval", Intervals.of("2012-08-01/P3D"), emptyQueryableIndex.getDataInterval());
Assert.assertEquals("getReadOnlyTimestamps", 0, emptyQueryableIndex.getColumnHolder(ColumnHolder.TIME_COLUMN_NAME).getLength());
} finally {
FileUtils.deleteDirectory(tmpDir);
}
}
use of org.apache.druid.segment.incremental.IncrementalIndex in project druid by druid-io.
the class IndexBuilder method buildMMappedIndex.
public QueryableIndex buildMMappedIndex() {
Preconditions.checkNotNull(indexMerger, "indexMerger");
Preconditions.checkNotNull(tmpDir, "tmpDir");
try (final IncrementalIndex incrementalIndex = buildIncrementalIndex()) {
return indexIO.loadIndex(indexMerger.persist(incrementalIndex, new File(tmpDir, StringUtils.format("testIndex-%s", ThreadLocalRandom.current().nextInt(Integer.MAX_VALUE))), indexSpec, null));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
use of org.apache.druid.segment.incremental.IncrementalIndex in project druid by druid-io.
the class IndexBuilder method buildIncrementalIndexWithRows.
private static IncrementalIndex buildIncrementalIndexWithRows(IncrementalIndexSchema schema, int maxRows, Iterable<InputRow> rows) {
Preconditions.checkNotNull(schema, "schema");
final IncrementalIndex incrementalIndex = new OnheapIncrementalIndex.Builder().setIndexSchema(schema).setMaxRowCount(maxRows).build();
for (InputRow row : rows) {
try {
incrementalIndex.add(row);
} catch (IndexSizeExceededException e) {
throw new RuntimeException(e);
}
}
return incrementalIndex;
}
use of org.apache.druid.segment.incremental.IncrementalIndex in project druid by druid-io.
the class IndexMergerRollupTest method testStringFirstLastRollup.
private void testStringFirstLastRollup(AggregatorFactory[] aggregatorFactories) throws Exception {
List<Map<String, Object>> eventsList = Arrays.asList(new HashMap<String, Object>() {
{
put("d", "d1");
put("m", "m1");
}
}, new HashMap<String, Object>() {
{
put("d", "d1");
put("m", "m2");
}
});
final File tempDir = temporaryFolder.newFolder();
List<QueryableIndex> indexes = new ArrayList<>();
Instant time = Instant.now();
for (Map<String, Object> events : eventsList) {
IncrementalIndex toPersist = IncrementalIndexTest.createIndex(aggregatorFactories);
toPersist.add(new MapBasedInputRow(time.toEpochMilli(), ImmutableList.of("d"), events));
indexes.add(indexIO.loadIndex(indexMerger.persist(toPersist, tempDir, indexSpec, null)));
}
File indexFile = indexMerger.mergeQueryableIndex(indexes, true, aggregatorFactories, tempDir, indexSpec, null, -1);
try (QueryableIndex mergedIndex = indexIO.loadIndex(indexFile)) {
Assert.assertEquals("Number of rows should be 1", 1, mergedIndex.getNumRows());
}
}
Aggregations