Search in sources :

Example 31 with IndexSpec

use of io.druid.segment.IndexSpec in project druid by druid-io.

the class IngestSegmentFirehoseTest method createTestIndex.

private void createTestIndex(File segmentDir) throws Exception {
    List<String> rows = Lists.newArrayList("2014102200,host1,10", "2014102200,host2,20", "2014102200,host3,30", "2014102201,host1,10", "2014102201,host2,20", "2014102201,host3,30", "2014102202,host1,10", "2014102202,host2,20", "2014102202,host3,30");
    StringInputRowParser parser = new StringInputRowParser(new CSVParseSpec(new TimestampSpec("timestamp", "yyyyMMddHH", null), new DimensionsSpec(DimensionsSpec.getDefaultSchemas(ImmutableList.of("host")), null, null), null, ImmutableList.of("timestamp", "host", "visited")), Charsets.UTF_8.toString());
    AggregatorFactory[] aggregators = new AggregatorFactory[] { new LongSumAggregatorFactory("visited_sum", "visited") };
    IncrementalIndex index = null;
    try {
        index = new OnheapIncrementalIndex(0, Granularities.NONE, aggregators, true, true, true, 5000);
        for (String line : rows) {
            index.add(parser.parse(line));
        }
        indexMerger.persist(index, segmentDir, new IndexSpec());
    } finally {
        if (index != null) {
            index.close();
        }
    }
}
Also used : IndexSpec(io.druid.segment.IndexSpec) IncrementalIndex(io.druid.segment.incremental.IncrementalIndex) OnheapIncrementalIndex(io.druid.segment.incremental.OnheapIncrementalIndex) CSVParseSpec(io.druid.data.input.impl.CSVParseSpec) StringInputRowParser(io.druid.data.input.impl.StringInputRowParser) TimestampSpec(io.druid.data.input.impl.TimestampSpec) LongSumAggregatorFactory(io.druid.query.aggregation.LongSumAggregatorFactory) OnheapIncrementalIndex(io.druid.segment.incremental.OnheapIncrementalIndex) DimensionsSpec(io.druid.data.input.impl.DimensionsSpec) AggregatorFactory(io.druid.query.aggregation.AggregatorFactory) LongSumAggregatorFactory(io.druid.query.aggregation.LongSumAggregatorFactory)

Example 32 with IndexSpec

use of io.druid.segment.IndexSpec in project druid by druid-io.

the class GroupByTypeInterfaceBenchmark method setup.

@Setup(Level.Trial)
public void setup() throws IOException {
    log.info("SETUP CALLED AT %d", System.currentTimeMillis());
    if (ComplexMetrics.getSerdeForType("hyperUnique") == null) {
        ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde(HyperLogLogHash.getDefault()));
    }
    executorService = Execs.multiThreaded(numProcessingThreads, "GroupByThreadPool[%d]");
    setupQueries();
    String schemaName = "basic";
    schemaInfo = BenchmarkSchemas.SCHEMA_MAP.get(schemaName);
    stringQuery = SCHEMA_QUERY_MAP.get(schemaName).get("string");
    longFloatQuery = SCHEMA_QUERY_MAP.get(schemaName).get("longFloat");
    longQuery = SCHEMA_QUERY_MAP.get(schemaName).get("long");
    floatQuery = SCHEMA_QUERY_MAP.get(schemaName).get("float");
    final BenchmarkDataGenerator dataGenerator = new BenchmarkDataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED + 1, schemaInfo.getDataInterval(), rowsPerSegment);
    tmpDir = Files.createTempDir();
    log.info("Using temp dir: %s", tmpDir.getAbsolutePath());
    // queryableIndexes   -> numSegments worth of on-disk segments
    // anIncrementalIndex -> the last incremental index
    anIncrementalIndex = null;
    queryableIndexes = new ArrayList<>(numSegments);
    for (int i = 0; i < numSegments; i++) {
        log.info("Generating rows for segment %d/%d", i + 1, numSegments);
        final IncrementalIndex index = makeIncIndex();
        for (int j = 0; j < rowsPerSegment; j++) {
            final InputRow row = dataGenerator.nextRow();
            if (j % 20000 == 0) {
                log.info("%,d/%,d rows generated.", i * rowsPerSegment + j, rowsPerSegment * numSegments);
            }
            index.add(row);
        }
        log.info("%,d/%,d rows generated, persisting segment %d/%d.", (i + 1) * rowsPerSegment, rowsPerSegment * numSegments, i + 1, numSegments);
        final File file = INDEX_MERGER_V9.persist(index, new File(tmpDir, String.valueOf(i)), new IndexSpec());
        queryableIndexes.add(INDEX_IO.loadIndex(file));
        if (i == numSegments - 1) {
            anIncrementalIndex = index;
        } else {
            index.close();
        }
    }
    StupidPool<ByteBuffer> bufferPool = new StupidPool<>("GroupByBenchmark-computeBufferPool", new OffheapBufferGenerator("compute", 250_000_000), 0, Integer.MAX_VALUE);
    // limit of 2 is required since we simulate both historical merge and broker merge in the same process
    BlockingPool<ByteBuffer> mergePool = new BlockingPool<>(new OffheapBufferGenerator("merge", 250_000_000), 2);
    final GroupByQueryConfig config = new GroupByQueryConfig() {

        @Override
        public String getDefaultStrategy() {
            return defaultStrategy;
        }

        @Override
        public int getBufferGrouperInitialBuckets() {
            return initialBuckets;
        }

        @Override
        public long getMaxOnDiskStorage() {
            return 1_000_000_000L;
        }
    };
    config.setSingleThreaded(false);
    config.setMaxIntermediateRows(Integer.MAX_VALUE);
    config.setMaxResults(Integer.MAX_VALUE);
    DruidProcessingConfig druidProcessingConfig = new DruidProcessingConfig() {

        @Override
        public int getNumThreads() {
            // Used by "v2" strategy for concurrencyHint
            return numProcessingThreads;
        }

        @Override
        public String getFormatString() {
            return null;
        }
    };
    final Supplier<GroupByQueryConfig> configSupplier = Suppliers.ofInstance(config);
    final GroupByStrategySelector strategySelector = new GroupByStrategySelector(configSupplier, new GroupByStrategyV1(configSupplier, new GroupByQueryEngine(configSupplier, bufferPool), QueryBenchmarkUtil.NOOP_QUERYWATCHER, bufferPool), new GroupByStrategyV2(druidProcessingConfig, configSupplier, bufferPool, mergePool, new ObjectMapper(new SmileFactory()), QueryBenchmarkUtil.NOOP_QUERYWATCHER));
    factory = new GroupByQueryRunnerFactory(strategySelector, new GroupByQueryQueryToolChest(strategySelector, QueryBenchmarkUtil.NoopIntervalChunkingQueryRunnerDecorator()));
}
Also used : GroupByStrategySelector(io.druid.query.groupby.strategy.GroupByStrategySelector) IndexSpec(io.druid.segment.IndexSpec) BenchmarkDataGenerator(io.druid.benchmark.datagen.BenchmarkDataGenerator) HyperUniquesSerde(io.druid.query.aggregation.hyperloglog.HyperUniquesSerde) GroupByQueryQueryToolChest(io.druid.query.groupby.GroupByQueryQueryToolChest) GroupByStrategyV1(io.druid.query.groupby.strategy.GroupByStrategyV1) GroupByStrategyV2(io.druid.query.groupby.strategy.GroupByStrategyV2) GroupByQueryEngine(io.druid.query.groupby.GroupByQueryEngine) DefaultObjectMapper(io.druid.jackson.DefaultObjectMapper) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) GroupByQueryRunnerFactory(io.druid.query.groupby.GroupByQueryRunnerFactory) IncrementalIndex(io.druid.segment.incremental.IncrementalIndex) OnheapIncrementalIndex(io.druid.segment.incremental.OnheapIncrementalIndex) GroupByQueryConfig(io.druid.query.groupby.GroupByQueryConfig) ByteBuffer(java.nio.ByteBuffer) SmileFactory(com.fasterxml.jackson.dataformat.smile.SmileFactory) OffheapBufferGenerator(io.druid.offheap.OffheapBufferGenerator) InputRow(io.druid.data.input.InputRow) BlockingPool(io.druid.collections.BlockingPool) StupidPool(io.druid.collections.StupidPool) DruidProcessingConfig(io.druid.query.DruidProcessingConfig) File(java.io.File) Setup(org.openjdk.jmh.annotations.Setup)

Example 33 with IndexSpec

use of io.druid.segment.IndexSpec in project druid by druid-io.

the class FilterPartitionBenchmark method setup.

@Setup
public void setup() throws IOException {
    log.info("SETUP CALLED AT " + System.currentTimeMillis());
    if (ComplexMetrics.getSerdeForType("hyperUnique") == null) {
        ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde(HyperLogLogHash.getDefault()));
    }
    schemaInfo = BenchmarkSchemas.SCHEMA_MAP.get(schema);
    BenchmarkDataGenerator gen = new BenchmarkDataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED, schemaInfo.getDataInterval(), rowsPerSegment);
    incIndex = makeIncIndex();
    for (int j = 0; j < rowsPerSegment; j++) {
        InputRow row = gen.nextRow();
        if (j % 10000 == 0) {
            log.info(j + " rows generated.");
        }
        incIndex.add(row);
    }
    tmpDir = Files.createTempDir();
    log.info("Using temp dir: " + tmpDir.getAbsolutePath());
    indexFile = INDEX_MERGER_V9.persist(incIndex, tmpDir, new IndexSpec());
    qIndex = INDEX_IO.loadIndex(indexFile);
    Interval interval = schemaInfo.getDataInterval();
    timeFilterNone = new BoundFilter(new BoundDimFilter(Column.TIME_COLUMN_NAME, String.valueOf(Long.MAX_VALUE), String.valueOf(Long.MAX_VALUE), true, true, null, null, StringComparators.ALPHANUMERIC));
    long halfEnd = (interval.getEndMillis() + interval.getStartMillis()) / 2;
    timeFilterHalf = new BoundFilter(new BoundDimFilter(Column.TIME_COLUMN_NAME, String.valueOf(interval.getStartMillis()), String.valueOf(halfEnd), true, true, null, null, StringComparators.ALPHANUMERIC));
    timeFilterAll = new BoundFilter(new BoundDimFilter(Column.TIME_COLUMN_NAME, String.valueOf(interval.getStartMillis()), String.valueOf(interval.getEndMillis()), true, true, null, null, StringComparators.ALPHANUMERIC));
}
Also used : IndexSpec(io.druid.segment.IndexSpec) BoundDimFilter(io.druid.query.filter.BoundDimFilter) BoundFilter(io.druid.segment.filter.BoundFilter) BenchmarkDataGenerator(io.druid.benchmark.datagen.BenchmarkDataGenerator) InputRow(io.druid.data.input.InputRow) HyperUniquesSerde(io.druid.query.aggregation.hyperloglog.HyperUniquesSerde) Interval(org.joda.time.Interval) Setup(org.openjdk.jmh.annotations.Setup)

Example 34 with IndexSpec

use of io.druid.segment.IndexSpec in project druid by druid-io.

the class FilteredAggregatorBenchmark method setup.

@Setup
public void setup() throws IOException {
    log.info("SETUP CALLED AT " + System.currentTimeMillis());
    if (ComplexMetrics.getSerdeForType("hyperUnique") == null) {
        ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde(HyperLogLogHash.getDefault()));
    }
    schemaInfo = BenchmarkSchemas.SCHEMA_MAP.get(schema);
    BenchmarkDataGenerator gen = new BenchmarkDataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED, schemaInfo.getDataInterval(), rowsPerSegment);
    incIndex = makeIncIndex(schemaInfo.getAggsArray());
    filter = new OrDimFilter(Arrays.asList(new BoundDimFilter("dimSequential", "-1", "-1", true, true, null, null, StringComparators.ALPHANUMERIC), new JavaScriptDimFilter("dimSequential", "function(x) { return false }", null, JavaScriptConfig.getEnabledInstance()), new RegexDimFilter("dimSequential", "X", null), new SearchQueryDimFilter("dimSequential", new ContainsSearchQuerySpec("X", false), null), new InDimFilter("dimSequential", Arrays.asList("X"), null)));
    filteredMetrics = new AggregatorFactory[1];
    filteredMetrics[0] = new FilteredAggregatorFactory(new CountAggregatorFactory("rows"), filter);
    incIndexFilteredAgg = makeIncIndex(filteredMetrics);
    inputRows = new ArrayList<>();
    for (int j = 0; j < rowsPerSegment; j++) {
        InputRow row = gen.nextRow();
        if (j % 10000 == 0) {
            log.info(j + " rows generated.");
        }
        incIndex.add(row);
        inputRows.add(row);
    }
    tmpDir = Files.createTempDir();
    log.info("Using temp dir: " + tmpDir.getAbsolutePath());
    indexFile = INDEX_MERGER_V9.persist(incIndex, tmpDir, new IndexSpec());
    qIndex = INDEX_IO.loadIndex(indexFile);
    factory = new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(QueryBenchmarkUtil.NoopIntervalChunkingQueryRunnerDecorator()), new TimeseriesQueryEngine(), QueryBenchmarkUtil.NOOP_QUERYWATCHER);
    BenchmarkSchemaInfo basicSchema = BenchmarkSchemas.SCHEMA_MAP.get("basic");
    QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec(Arrays.asList(basicSchema.getDataInterval()));
    List<AggregatorFactory> queryAggs = new ArrayList<>();
    queryAggs.add(filteredMetrics[0]);
    query = Druids.newTimeseriesQueryBuilder().dataSource("blah").granularity(Granularities.ALL).intervals(intervalSpec).aggregators(queryAggs).descending(false).build();
}
Also used : FilteredAggregatorFactory(io.druid.query.aggregation.FilteredAggregatorFactory) RegexDimFilter(io.druid.query.filter.RegexDimFilter) IndexSpec(io.druid.segment.IndexSpec) BoundDimFilter(io.druid.query.filter.BoundDimFilter) ContainsSearchQuerySpec(io.druid.query.search.search.ContainsSearchQuerySpec) BenchmarkDataGenerator(io.druid.benchmark.datagen.BenchmarkDataGenerator) ArrayList(java.util.ArrayList) HyperUniquesSerde(io.druid.query.aggregation.hyperloglog.HyperUniquesSerde) MultipleIntervalSegmentSpec(io.druid.query.spec.MultipleIntervalSegmentSpec) TimeseriesQueryQueryToolChest(io.druid.query.timeseries.TimeseriesQueryQueryToolChest) CountAggregatorFactory(io.druid.query.aggregation.CountAggregatorFactory) AggregatorFactory(io.druid.query.aggregation.AggregatorFactory) FilteredAggregatorFactory(io.druid.query.aggregation.FilteredAggregatorFactory) TimeseriesQueryEngine(io.druid.query.timeseries.TimeseriesQueryEngine) TimeseriesQueryRunnerFactory(io.druid.query.timeseries.TimeseriesQueryRunnerFactory) CountAggregatorFactory(io.druid.query.aggregation.CountAggregatorFactory) BenchmarkSchemaInfo(io.druid.benchmark.datagen.BenchmarkSchemaInfo) OrDimFilter(io.druid.query.filter.OrDimFilter) InDimFilter(io.druid.query.filter.InDimFilter) InputRow(io.druid.data.input.InputRow) SearchQueryDimFilter(io.druid.query.filter.SearchQueryDimFilter) JavaScriptDimFilter(io.druid.query.filter.JavaScriptDimFilter) QuerySegmentSpec(io.druid.query.spec.QuerySegmentSpec) Setup(org.openjdk.jmh.annotations.Setup)

Example 35 with IndexSpec

use of io.druid.segment.IndexSpec in project druid by druid-io.

the class TaskSerdeTest method testSegmentConvertSerde.

@Test
public void testSegmentConvertSerde() throws IOException {
    final DataSegment segment = new DataSegment("dataSource", Interval.parse("1990-01-01/1999-12-31"), "version", ImmutableMap.<String, Object>of(), ImmutableList.of("dim1", "dim2"), ImmutableList.of("metric1", "metric2"), NoneShardSpec.instance(), 0, 12345L);
    final ConvertSegmentTask convertSegmentTaskOriginal = ConvertSegmentTask.create(segment, new IndexSpec(new RoaringBitmapSerdeFactory(null), CompressedObjectStrategy.CompressionStrategy.LZF, CompressedObjectStrategy.CompressionStrategy.UNCOMPRESSED, CompressionFactory.LongEncodingStrategy.LONGS), false, true, null);
    final String json = jsonMapper.writeValueAsString(convertSegmentTaskOriginal);
    final Task task = jsonMapper.readValue(json, Task.class);
    Assert.assertTrue(task instanceof ConvertSegmentTask);
    final ConvertSegmentTask convertSegmentTask = (ConvertSegmentTask) task;
    Assert.assertEquals(convertSegmentTaskOriginal.getDataSource(), convertSegmentTask.getDataSource());
    Assert.assertEquals(convertSegmentTaskOriginal.getInterval(), convertSegmentTask.getInterval());
    Assert.assertEquals(convertSegmentTaskOriginal.getIndexSpec().getBitmapSerdeFactory().getClass().getCanonicalName(), convertSegmentTask.getIndexSpec().getBitmapSerdeFactory().getClass().getCanonicalName());
    Assert.assertEquals(convertSegmentTaskOriginal.getIndexSpec().getDimensionCompression(), convertSegmentTask.getIndexSpec().getDimensionCompression());
    Assert.assertEquals(convertSegmentTaskOriginal.getIndexSpec().getMetricCompression(), convertSegmentTask.getIndexSpec().getMetricCompression());
    Assert.assertEquals(false, convertSegmentTask.isForce());
    Assert.assertEquals(segment, convertSegmentTask.getSegment());
}
Also used : IndexSpec(io.druid.segment.IndexSpec) RoaringBitmapSerdeFactory(io.druid.segment.data.RoaringBitmapSerdeFactory) DataSegment(io.druid.timeline.DataSegment) Test(org.junit.Test)

Aggregations

IndexSpec (io.druid.segment.IndexSpec)37 File (java.io.File)21 OnheapIncrementalIndex (io.druid.segment.incremental.OnheapIncrementalIndex)15 IncrementalIndex (io.druid.segment.incremental.IncrementalIndex)13 InputRow (io.druid.data.input.InputRow)11 Test (org.junit.Test)11 BenchmarkDataGenerator (io.druid.benchmark.datagen.BenchmarkDataGenerator)10 HyperUniquesSerde (io.druid.query.aggregation.hyperloglog.HyperUniquesSerde)10 QueryableIndex (io.druid.segment.QueryableIndex)10 Setup (org.openjdk.jmh.annotations.Setup)10 Period (org.joda.time.Period)7 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)6 AggregatorFactory (io.druid.query.aggregation.AggregatorFactory)5 StupidPool (io.druid.collections.StupidPool)4 DefaultObjectMapper (io.druid.jackson.DefaultObjectMapper)4 OffheapBufferGenerator (io.druid.offheap.OffheapBufferGenerator)4 QueryableIndexSegment (io.druid.segment.QueryableIndexSegment)4 RoaringBitmapSerdeFactory (io.druid.segment.data.RoaringBitmapSerdeFactory)4 DataSegment (io.druid.timeline.DataSegment)4 IOException (java.io.IOException)4