use of org.apache.druid.segment.QueryableIndex in project druid by druid-io.
the class UseIndexesStrategy method getExecutionPlan.
@Override
public List<SearchQueryExecutor> getExecutionPlan(SearchQuery query, Segment segment) {
final ImmutableList.Builder<SearchQueryExecutor> builder = ImmutableList.builder();
final QueryableIndex index = segment.asQueryableIndex();
final StorageAdapter adapter = segment.asStorageAdapter();
final List<DimensionSpec> searchDims = getDimsToSearch(adapter.getAvailableDimensions(), query.getDimensions());
if (index != null) {
// pair of bitmap dims and non-bitmap dims
final Pair<List<DimensionSpec>, List<DimensionSpec>> pair = partitionDimensionList(adapter, searchDims);
final List<DimensionSpec> bitmapSuppDims = pair.lhs;
final List<DimensionSpec> nonBitmapSuppDims = pair.rhs;
if (bitmapSuppDims.size() > 0) {
final BitmapIndexSelector selector = new ColumnSelectorBitmapIndexSelector(index.getBitmapFactoryForDimensions(), VirtualColumns.EMPTY, index);
// from the non-bitmap-support filter, and then use it to compute the filtered result by intersecting bitmaps.
if (filter == null || filter.supportsBitmapIndex(selector)) {
final ImmutableBitmap timeFilteredBitmap = makeTimeFilteredBitmap(index, segment, filter, interval);
builder.add(new IndexOnlyExecutor(query, segment, timeFilteredBitmap, bitmapSuppDims));
} else {
// Fall back to cursor-based execution strategy
nonBitmapSuppDims.addAll(bitmapSuppDims);
}
}
if (nonBitmapSuppDims.size() > 0) {
builder.add(new CursorBasedExecutor(query, segment, filter, interval, nonBitmapSuppDims));
}
} else {
builder.add(new CursorBasedExecutor(query, segment, filter, interval, searchDims));
}
return builder.build();
}
use of org.apache.druid.segment.QueryableIndex in project druid by druid-io.
the class AggregationTestHelper method createIndex.
public void createIndex(Iterator rows, InputRowParser parser, final AggregatorFactory[] metrics, File outDir, long minTimestamp, Granularity gran, boolean deserializeComplexMetrics, int maxRowCount, boolean rollup) throws Exception {
IncrementalIndex index = null;
List<File> toMerge = new ArrayList<>();
try {
index = new OnheapIncrementalIndex.Builder().setIndexSchema(new IncrementalIndexSchema.Builder().withMinTimestamp(minTimestamp).withDimensionsSpec(parser.getParseSpec().getDimensionsSpec()).withQueryGranularity(gran).withMetrics(metrics).withRollup(rollup).build()).setDeserializeComplexMetrics(deserializeComplexMetrics).setMaxRowCount(maxRowCount).build();
while (rows.hasNext()) {
Object row = rows.next();
if (!index.canAppendRow()) {
File tmp = tempFolder.newFolder();
toMerge.add(tmp);
indexMerger.persist(index, tmp, new IndexSpec(), null);
index.close();
index = new OnheapIncrementalIndex.Builder().setIndexSchema(new IncrementalIndexSchema.Builder().withMinTimestamp(minTimestamp).withDimensionsSpec(parser.getParseSpec().getDimensionsSpec()).withQueryGranularity(gran).withMetrics(metrics).withRollup(rollup).build()).setDeserializeComplexMetrics(deserializeComplexMetrics).setMaxRowCount(maxRowCount).build();
}
if (row instanceof String && parser instanceof StringInputRowParser) {
// Note: this is required because StringInputRowParser is InputRowParser<ByteBuffer> as opposed to
// InputRowsParser<String>
index.add(((StringInputRowParser) parser).parse((String) row));
} else {
index.add(((List<InputRow>) parser.parseBatch(row)).get(0));
}
}
if (toMerge.size() > 0) {
File tmp = tempFolder.newFolder();
toMerge.add(tmp);
indexMerger.persist(index, tmp, new IndexSpec(), null);
List<QueryableIndex> indexes = new ArrayList<>(toMerge.size());
for (File file : toMerge) {
indexes.add(indexIO.loadIndex(file));
}
indexMerger.mergeQueryableIndex(indexes, rollup, metrics, outDir, new IndexSpec(), null, -1);
for (QueryableIndex qi : indexes) {
qi.close();
}
} else {
indexMerger.persist(index, outDir, new IndexSpec(), null);
}
} finally {
if (index != null) {
index.close();
}
}
}
use of org.apache.druid.segment.QueryableIndex in project druid by druid-io.
the class GroupByLimitPushDownInsufficientBufferTest method getRunner2.
private List<QueryRunner<ResultRow>> getRunner2() {
List<QueryRunner<ResultRow>> runners = new ArrayList<>();
QueryableIndex index2 = groupByIndices.get(1);
QueryRunner<ResultRow> tooSmallRunner = makeQueryRunner(tooSmallGroupByFactory, SegmentId.dummy(index2.toString()), new QueryableIndexSegment(index2, SegmentId.dummy(index2.toString())));
runners.add(tooSmallGroupByFactory.getToolchest().preMergeQueryDecoration(tooSmallRunner));
return runners;
}
use of org.apache.druid.segment.QueryableIndex in project druid by druid-io.
the class GroupByLimitPushDownInsufficientBufferTest method setup.
@Before
public void setup() throws Exception {
tmpDir = FileUtils.createTempDir();
InputRow row;
List<String> dimNames = Arrays.asList("dimA", "metA");
Map<String, Object> event;
final IncrementalIndex indexA = makeIncIndex(false);
incrementalIndices.add(indexA);
event = new HashMap<>();
event.put("dimA", "hello");
event.put("metA", 100);
row = new MapBasedInputRow(1000, dimNames, event);
indexA.add(row);
event = new HashMap<>();
event.put("dimA", "mango");
event.put("metA", 95);
row = new MapBasedInputRow(1000, dimNames, event);
indexA.add(row);
event = new HashMap<>();
event.put("dimA", "world");
event.put("metA", 75);
row = new MapBasedInputRow(1000, dimNames, event);
indexA.add(row);
event = new HashMap<>();
event.put("dimA", "fubaz");
event.put("metA", 75);
row = new MapBasedInputRow(1000, dimNames, event);
indexA.add(row);
event = new HashMap<>();
event.put("dimA", "zortaxx");
event.put("metA", 999);
row = new MapBasedInputRow(1000, dimNames, event);
indexA.add(row);
event = new HashMap<>();
event.put("dimA", "blarg");
event.put("metA", 125);
row = new MapBasedInputRow(1000, dimNames, event);
indexA.add(row);
event = new HashMap<>();
event.put("dimA", "blerg");
event.put("metA", 130);
row = new MapBasedInputRow(1000, dimNames, event);
indexA.add(row);
final File fileA = INDEX_MERGER_V9.persist(indexA, new File(tmpDir, "A"), new IndexSpec(), OffHeapMemorySegmentWriteOutMediumFactory.instance());
QueryableIndex qindexA = INDEX_IO.loadIndex(fileA);
final IncrementalIndex indexB = makeIncIndex(false);
incrementalIndices.add(indexB);
event = new HashMap<>();
event.put("dimA", "foo");
event.put("metA", 200);
row = new MapBasedInputRow(1000, dimNames, event);
indexB.add(row);
event = new HashMap<>();
event.put("dimA", "world");
event.put("metA", 75);
row = new MapBasedInputRow(1000, dimNames, event);
indexB.add(row);
event = new HashMap<>();
event.put("dimA", "mango");
event.put("metA", 95);
row = new MapBasedInputRow(1000, dimNames, event);
indexB.add(row);
event = new HashMap<>();
event.put("dimA", "zebra");
event.put("metA", 180);
row = new MapBasedInputRow(1000, dimNames, event);
indexB.add(row);
event = new HashMap<>();
event.put("dimA", "blorg");
event.put("metA", 120);
row = new MapBasedInputRow(1000, dimNames, event);
indexB.add(row);
final File fileB = INDEX_MERGER_V9.persist(indexB, new File(tmpDir, "B"), new IndexSpec(), OffHeapMemorySegmentWriteOutMediumFactory.instance());
QueryableIndex qindexB = INDEX_IO.loadIndex(fileB);
groupByIndices = Arrays.asList(qindexA, qindexB);
resourceCloser = Closer.create();
setupGroupByFactory();
}
use of org.apache.druid.segment.QueryableIndex in project druid by druid-io.
the class GroupByLimitPushDownMultiNodeMergeTest method getRunner1.
private List<QueryRunner<ResultRow>> getRunner1(int qIndexNumber) {
List<QueryRunner<ResultRow>> runners = new ArrayList<>();
QueryableIndex index = groupByIndices.get(qIndexNumber);
QueryRunner<ResultRow> runner = makeQueryRunner(groupByFactory, SegmentId.dummy(index.toString()), new QueryableIndexSegment(index, SegmentId.dummy(index.toString())));
runners.add(groupByFactory.getToolchest().preMergeQueryDecoration(runner));
return runners;
}
Aggregations