use of io.druid.segment.QueryableIndex in project druid by druid-io.
the class BaseFilterTest method makeConstructors.
public static Collection<Object[]> makeConstructors() {
final List<Object[]> constructors = Lists.newArrayList();
final Map<String, BitmapSerdeFactory> bitmapSerdeFactories = ImmutableMap.<String, BitmapSerdeFactory>of("concise", new ConciseBitmapSerdeFactory(), "roaring", new RoaringBitmapSerdeFactory(true));
final Map<String, IndexMerger> indexMergers = ImmutableMap.<String, IndexMerger>of("IndexMerger", TestHelper.getTestIndexMerger(), "IndexMergerV9", TestHelper.getTestIndexMergerV9());
final Map<String, Function<IndexBuilder, Pair<StorageAdapter, Closeable>>> finishers = ImmutableMap.of("incremental", new Function<IndexBuilder, Pair<StorageAdapter, Closeable>>() {
@Override
public Pair<StorageAdapter, Closeable> apply(IndexBuilder input) {
final IncrementalIndex index = input.buildIncrementalIndex();
return Pair.<StorageAdapter, Closeable>of(new IncrementalIndexStorageAdapter(index), new Closeable() {
@Override
public void close() throws IOException {
index.close();
}
});
}
}, "mmapped", new Function<IndexBuilder, Pair<StorageAdapter, Closeable>>() {
@Override
public Pair<StorageAdapter, Closeable> apply(IndexBuilder input) {
final QueryableIndex index = input.buildMMappedIndex();
return Pair.<StorageAdapter, Closeable>of(new QueryableIndexStorageAdapter(index), new Closeable() {
@Override
public void close() throws IOException {
index.close();
}
});
}
}, "mmappedMerged", new Function<IndexBuilder, Pair<StorageAdapter, Closeable>>() {
@Override
public Pair<StorageAdapter, Closeable> apply(IndexBuilder input) {
final QueryableIndex index = input.buildMMappedMergedIndex();
return Pair.<StorageAdapter, Closeable>of(new QueryableIndexStorageAdapter(index), new Closeable() {
@Override
public void close() throws IOException {
index.close();
}
});
}
});
for (Map.Entry<String, BitmapSerdeFactory> bitmapSerdeFactoryEntry : bitmapSerdeFactories.entrySet()) {
for (Map.Entry<String, IndexMerger> indexMergerEntry : indexMergers.entrySet()) {
for (Map.Entry<String, Function<IndexBuilder, Pair<StorageAdapter, Closeable>>> finisherEntry : finishers.entrySet()) {
for (boolean cnf : ImmutableList.of(false, true)) {
for (boolean optimize : ImmutableList.of(false, true)) {
final String testName = String.format("bitmaps[%s], indexMerger[%s], finisher[%s], optimize[%s]", bitmapSerdeFactoryEntry.getKey(), indexMergerEntry.getKey(), finisherEntry.getKey(), optimize);
final IndexBuilder indexBuilder = IndexBuilder.create().indexSpec(new IndexSpec(bitmapSerdeFactoryEntry.getValue(), null, null, null)).indexMerger(indexMergerEntry.getValue());
constructors.add(new Object[] { testName, indexBuilder, finisherEntry.getValue(), cnf, optimize });
}
}
}
}
}
return constructors;
}
use of io.druid.segment.QueryableIndex in project druid by druid-io.
the class IndexMergeBenchmark method setup.
@Setup
public void setup() throws IOException {
log.info("SETUP CALLED AT " + +System.currentTimeMillis());
if (ComplexMetrics.getSerdeForType("hyperUnique") == null) {
ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde(HyperLogLogHash.getDefault()));
}
indexesToMerge = new ArrayList<>();
schemaInfo = BenchmarkSchemas.SCHEMA_MAP.get(schema);
for (int i = 0; i < numSegments; i++) {
BenchmarkDataGenerator gen = new BenchmarkDataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED + i, schemaInfo.getDataInterval(), rowsPerSegment);
IncrementalIndex incIndex = makeIncIndex();
for (int j = 0; j < rowsPerSegment; j++) {
InputRow row = gen.nextRow();
if (j % 10000 == 0) {
log.info(j + " rows generated.");
}
incIndex.add(row);
}
tmpDir = Files.createTempDir();
log.info("Using temp dir: " + tmpDir.getAbsolutePath());
File indexFile = INDEX_MERGER_V9.persist(incIndex, tmpDir, new IndexSpec());
QueryableIndex qIndex = INDEX_IO.loadIndex(indexFile);
indexesToMerge.add(qIndex);
}
}
use of io.druid.segment.QueryableIndex in project druid by druid-io.
the class SelectBenchmark method setup.
@Setup
public void setup() throws IOException {
log.info("SETUP CALLED AT " + System.currentTimeMillis());
if (ComplexMetrics.getSerdeForType("hyperUnique") == null) {
ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde(HyperLogLogHash.getDefault()));
}
executorService = Execs.multiThreaded(numSegments, "SelectThreadPool");
setupQueries();
String[] schemaQuery = schemaAndQuery.split("\\.");
String schemaName = schemaQuery[0];
String queryName = schemaQuery[1];
schemaInfo = BenchmarkSchemas.SCHEMA_MAP.get(schemaName);
queryBuilder = SCHEMA_QUERY_MAP.get(schemaName).get(queryName);
queryBuilder.pagingSpec(PagingSpec.newSpec(pagingThreshold));
query = queryBuilder.build();
incIndexes = new ArrayList<>();
for (int i = 0; i < numSegments; i++) {
BenchmarkDataGenerator gen = new BenchmarkDataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED + i, schemaInfo.getDataInterval(), rowsPerSegment);
IncrementalIndex incIndex = makeIncIndex();
for (int j = 0; j < rowsPerSegment; j++) {
InputRow row = gen.nextRow();
if (j % 10000 == 0) {
log.info(j + " rows generated.");
}
incIndex.add(row);
}
incIndexes.add(incIndex);
}
tmpDir = Files.createTempDir();
log.info("Using temp dir: " + tmpDir.getAbsolutePath());
qIndexes = new ArrayList<>();
for (int i = 0; i < numSegments; i++) {
File indexFile = INDEX_MERGER_V9.persist(incIndexes.get(i), tmpDir, new IndexSpec());
QueryableIndex qIndex = INDEX_IO.loadIndex(indexFile);
qIndexes.add(qIndex);
}
final Supplier<SelectQueryConfig> selectConfigSupplier = Suppliers.ofInstance(new SelectQueryConfig(true));
factory = new SelectQueryRunnerFactory(new SelectQueryQueryToolChest(JSON_MAPPER, QueryBenchmarkUtil.NoopIntervalChunkingQueryRunnerDecorator(), selectConfigSupplier), new SelectQueryEngine(selectConfigSupplier), QueryBenchmarkUtil.NOOP_QUERYWATCHER);
}
use of io.druid.segment.QueryableIndex in project druid by druid-io.
the class Sink method makeNewCurrIndex.
private FireHydrant makeNewCurrIndex(long minTimestamp, DataSchema schema) {
final IncrementalIndexSchema indexSchema = new IncrementalIndexSchema.Builder().withMinTimestamp(minTimestamp).withTimestampSpec(schema.getParser()).withQueryGranularity(schema.getGranularitySpec().getQueryGranularity()).withDimensionsSpec(schema.getParser()).withMetrics(schema.getAggregators()).withRollup(schema.getGranularitySpec().isRollup()).build();
final IncrementalIndex newIndex = new OnheapIncrementalIndex(indexSchema, reportParseExceptions, maxRowsInMemory);
final FireHydrant old;
synchronized (hydrantLock) {
if (writable) {
old = currHydrant;
int newCount = 0;
int numHydrants = hydrants.size();
if (numHydrants > 0) {
FireHydrant lastHydrant = hydrants.get(numHydrants - 1);
newCount = lastHydrant.getCount() + 1;
if (!indexSchema.getDimensionsSpec().hasCustomDimensions()) {
Map<String, ColumnCapabilitiesImpl> oldCapabilities;
if (lastHydrant.hasSwapped()) {
oldCapabilities = Maps.newHashMap();
QueryableIndex oldIndex = lastHydrant.getSegment().asQueryableIndex();
for (String dim : oldIndex.getAvailableDimensions()) {
dimOrder.add(dim);
oldCapabilities.put(dim, (ColumnCapabilitiesImpl) oldIndex.getColumn(dim).getCapabilities());
}
} else {
IncrementalIndex oldIndex = lastHydrant.getIndex();
dimOrder.addAll(oldIndex.getDimensionOrder());
oldCapabilities = oldIndex.getColumnCapabilities();
}
newIndex.loadDimensionIterable(dimOrder, oldCapabilities);
}
}
currHydrant = new FireHydrant(newIndex, newCount, getSegment().getIdentifier());
if (old != null) {
numRowsExcludingCurrIndex.addAndGet(old.getIndex().size());
}
hydrants.add(currHydrant);
} else {
// Oops, someone called finishWriting while we were making this new index.
newIndex.close();
throw new ISE("finishWriting() called during swap");
}
}
return old;
}
use of io.druid.segment.QueryableIndex in project druid by druid-io.
the class QuantileSqlAggregatorTest method setUp.
@Before
public void setUp() throws Exception {
Calcites.setSystemProperties();
// Note: this is needed in order to properly register the serde for Histogram.
new ApproximateHistogramDruidModule().configure(null);
final QueryableIndex index = IndexBuilder.create().tmpDir(temporaryFolder.newFolder()).indexMerger(TestHelper.getTestIndexMergerV9()).schema(new IncrementalIndexSchema.Builder().withMetrics(new AggregatorFactory[] { new CountAggregatorFactory("cnt"), new DoubleSumAggregatorFactory("m1", "m1"), new ApproximateHistogramAggregatorFactory("hist_m1", "m1", null, null, null, null) }).withRollup(false).build()).rows(CalciteTests.ROWS1).buildMMappedIndex();
walker = new SpecificSegmentsQuerySegmentWalker(CalciteTests.queryRunnerFactoryConglomerate()).add(DataSegment.builder().dataSource(DATA_SOURCE).interval(index.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).build(), index);
final PlannerConfig plannerConfig = new PlannerConfig();
final SchemaPlus rootSchema = Calcites.createRootSchema(CalciteTests.createMockSchema(walker, plannerConfig));
final DruidOperatorTable operatorTable = new DruidOperatorTable(ImmutableSet.<SqlAggregator>of(new QuantileSqlAggregator()), ImmutableSet.<SqlExtractionOperator>of());
plannerFactory = new PlannerFactory(rootSchema, walker, operatorTable, plannerConfig);
}
Aggregations