use of org.apache.druid.segment.incremental.IncrementalIndex in project druid by druid-io.
the class IncrementalIndexTest method testgetDimensions.
@Test
public void testgetDimensions() {
final IncrementalIndex incrementalIndex = indexCreator.createIndex((builder, args) -> builder.setIndexSchema(new IncrementalIndexSchema.Builder().withMetrics(new CountAggregatorFactory("count")).withDimensionsSpec(new DimensionsSpec(DimensionsSpec.getDefaultSchemas(Arrays.asList("dim0", "dim1")))).build()).setMaxRowCount(1000000).build());
Assert.assertEquals(Arrays.asList("dim0", "dim1"), incrementalIndex.getDimensionNames());
}
use of org.apache.druid.segment.incremental.IncrementalIndex in project druid by druid-io.
the class StringColumnAggregationTest method setup.
@Before
public void setup() throws Exception {
List<String> dimensions = ImmutableList.of(singleValue, multiValue);
List<InputRow> inputRows = new ArrayList<>(n);
for (int i = 1; i <= n; i++) {
String val = String.valueOf(i * 1.0d);
inputRows.add(new MapBasedInputRow(DateTime.now(DateTimeZone.UTC), dimensions, ImmutableMap.of(singleValue, val, multiValue, Lists.newArrayList(val, null, val))));
}
aggregationTestHelper = AggregationTestHelper.createGroupByQueryAggregationTestHelper(Collections.emptyList(), new GroupByQueryConfig(), tempFolder);
IncrementalIndex index = AggregationTestHelper.createIncrementalIndex(inputRows.iterator(), new NoopInputRowParser(null), new AggregatorFactory[] { new CountAggregatorFactory("count") }, 0, Granularities.NONE, false, 100, false);
this.segments = ImmutableList.of(new IncrementalIndexSegment(index, SegmentId.dummy("test")), aggregationTestHelper.persistIncrementalIndex(index, null));
// we have ingested arithmetic progression from 1 to 10, so sums can be computed using following
// All sum values are multiplied by 2 because we are running query on duplicated segment twice.
numRows = 2 * n;
singleValueSum = n * (n + 1);
multiValueSum = 2 * n * (n + 1);
singleValueMax = n;
multiValueMax = n;
singleValueMin = 1;
multiValueMin = 1;
}
use of org.apache.druid.segment.incremental.IncrementalIndex in project druid by druid-io.
the class AggregationTestHelper method createIndex.
public void createIndex(Iterator rows, InputRowParser parser, final AggregatorFactory[] metrics, File outDir, long minTimestamp, Granularity gran, boolean deserializeComplexMetrics, int maxRowCount, boolean rollup) throws Exception {
IncrementalIndex index = null;
List<File> toMerge = new ArrayList<>();
try {
index = new OnheapIncrementalIndex.Builder().setIndexSchema(new IncrementalIndexSchema.Builder().withMinTimestamp(minTimestamp).withDimensionsSpec(parser.getParseSpec().getDimensionsSpec()).withQueryGranularity(gran).withMetrics(metrics).withRollup(rollup).build()).setDeserializeComplexMetrics(deserializeComplexMetrics).setMaxRowCount(maxRowCount).build();
while (rows.hasNext()) {
Object row = rows.next();
if (!index.canAppendRow()) {
File tmp = tempFolder.newFolder();
toMerge.add(tmp);
indexMerger.persist(index, tmp, new IndexSpec(), null);
index.close();
index = new OnheapIncrementalIndex.Builder().setIndexSchema(new IncrementalIndexSchema.Builder().withMinTimestamp(minTimestamp).withDimensionsSpec(parser.getParseSpec().getDimensionsSpec()).withQueryGranularity(gran).withMetrics(metrics).withRollup(rollup).build()).setDeserializeComplexMetrics(deserializeComplexMetrics).setMaxRowCount(maxRowCount).build();
}
if (row instanceof String && parser instanceof StringInputRowParser) {
// Note: this is required because StringInputRowParser is InputRowParser<ByteBuffer> as opposed to
// InputRowsParser<String>
index.add(((StringInputRowParser) parser).parse((String) row));
} else {
index.add(((List<InputRow>) parser.parseBatch(row)).get(0));
}
}
if (toMerge.size() > 0) {
File tmp = tempFolder.newFolder();
toMerge.add(tmp);
indexMerger.persist(index, tmp, new IndexSpec(), null);
List<QueryableIndex> indexes = new ArrayList<>(toMerge.size());
for (File file : toMerge) {
indexes.add(indexIO.loadIndex(file));
}
indexMerger.mergeQueryableIndex(indexes, rollup, metrics, outDir, new IndexSpec(), null, -1);
for (QueryableIndex qi : indexes) {
qi.close();
}
} else {
indexMerger.persist(index, outDir, new IndexSpec(), null);
}
} finally {
if (index != null) {
index.close();
}
}
}
use of org.apache.druid.segment.incremental.IncrementalIndex in project druid by druid-io.
the class GroupByLimitPushDownInsufficientBufferTest method setup.
@Before
public void setup() throws Exception {
tmpDir = FileUtils.createTempDir();
InputRow row;
List<String> dimNames = Arrays.asList("dimA", "metA");
Map<String, Object> event;
final IncrementalIndex indexA = makeIncIndex(false);
incrementalIndices.add(indexA);
event = new HashMap<>();
event.put("dimA", "hello");
event.put("metA", 100);
row = new MapBasedInputRow(1000, dimNames, event);
indexA.add(row);
event = new HashMap<>();
event.put("dimA", "mango");
event.put("metA", 95);
row = new MapBasedInputRow(1000, dimNames, event);
indexA.add(row);
event = new HashMap<>();
event.put("dimA", "world");
event.put("metA", 75);
row = new MapBasedInputRow(1000, dimNames, event);
indexA.add(row);
event = new HashMap<>();
event.put("dimA", "fubaz");
event.put("metA", 75);
row = new MapBasedInputRow(1000, dimNames, event);
indexA.add(row);
event = new HashMap<>();
event.put("dimA", "zortaxx");
event.put("metA", 999);
row = new MapBasedInputRow(1000, dimNames, event);
indexA.add(row);
event = new HashMap<>();
event.put("dimA", "blarg");
event.put("metA", 125);
row = new MapBasedInputRow(1000, dimNames, event);
indexA.add(row);
event = new HashMap<>();
event.put("dimA", "blerg");
event.put("metA", 130);
row = new MapBasedInputRow(1000, dimNames, event);
indexA.add(row);
final File fileA = INDEX_MERGER_V9.persist(indexA, new File(tmpDir, "A"), new IndexSpec(), OffHeapMemorySegmentWriteOutMediumFactory.instance());
QueryableIndex qindexA = INDEX_IO.loadIndex(fileA);
final IncrementalIndex indexB = makeIncIndex(false);
incrementalIndices.add(indexB);
event = new HashMap<>();
event.put("dimA", "foo");
event.put("metA", 200);
row = new MapBasedInputRow(1000, dimNames, event);
indexB.add(row);
event = new HashMap<>();
event.put("dimA", "world");
event.put("metA", 75);
row = new MapBasedInputRow(1000, dimNames, event);
indexB.add(row);
event = new HashMap<>();
event.put("dimA", "mango");
event.put("metA", 95);
row = new MapBasedInputRow(1000, dimNames, event);
indexB.add(row);
event = new HashMap<>();
event.put("dimA", "zebra");
event.put("metA", 180);
row = new MapBasedInputRow(1000, dimNames, event);
indexB.add(row);
event = new HashMap<>();
event.put("dimA", "blorg");
event.put("metA", 120);
row = new MapBasedInputRow(1000, dimNames, event);
indexB.add(row);
final File fileB = INDEX_MERGER_V9.persist(indexB, new File(tmpDir, "B"), new IndexSpec(), OffHeapMemorySegmentWriteOutMediumFactory.instance());
QueryableIndex qindexB = INDEX_IO.loadIndex(fileB);
groupByIndices = Arrays.asList(qindexA, qindexB);
resourceCloser = Closer.create();
setupGroupByFactory();
}
use of org.apache.druid.segment.incremental.IncrementalIndex in project druid by druid-io.
the class IndexMergerV9WithSpatialIndexTest method makeQueryableIndex.
private static QueryableIndex makeQueryableIndex(IndexSpec indexSpec, IndexMergerV9 indexMergerV9, IndexIO indexIO) throws IOException {
IncrementalIndex theIndex = makeIncrementalIndex();
File tmpFile = File.createTempFile("billy", "yay");
tmpFile.delete();
FileUtils.mkdirp(tmpFile);
try {
indexMergerV9.persist(theIndex, tmpFile, indexSpec, null);
return indexIO.loadIndex(tmpFile);
} finally {
FileUtils.deleteDirectory(tmpFile);
}
}
Aggregations