Search in sources :

Example 26 with TimeseriesResultValue

use of org.apache.druid.query.timeseries.TimeseriesResultValue in project druid by druid-io.

the class TimeseriesBenchmark method queryMultiQueryableIndex.

@Benchmark
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.MICROSECONDS)
public void queryMultiQueryableIndex(Blackhole blackhole, QueryableIndexState state) {
    List<QueryRunner<Result<TimeseriesResultValue>>> singleSegmentRunners = new ArrayList<>();
    QueryToolChest toolChest = factory.getToolchest();
    for (int i = 0; i < state.numSegments; i++) {
        SegmentId segmentId = SegmentId.dummy("qIndex " + i);
        QueryRunner<Result<TimeseriesResultValue>> runner = QueryBenchmarkUtil.makeQueryRunner(factory, segmentId, new QueryableIndexSegment(state.qIndexes.get(i), segmentId));
        singleSegmentRunners.add(toolChest.preMergeQueryDecoration(runner));
    }
    QueryRunner theRunner = toolChest.postMergeQueryDecoration(new FinalizeResultsQueryRunner<>(toolChest.mergeResults(factory.mergeRunners(state.executorService, singleSegmentRunners)), toolChest));
    Sequence<Result<TimeseriesResultValue>> queryResult = theRunner.run(QueryPlus.wrap(query), ResponseContext.createEmpty());
    List<Result<TimeseriesResultValue>> results = queryResult.toList();
    blackhole.consume(results);
}
Also used : QueryableIndexSegment(org.apache.druid.segment.QueryableIndexSegment) TimeseriesResultValue(org.apache.druid.query.timeseries.TimeseriesResultValue) SegmentId(org.apache.druid.timeline.SegmentId) ArrayList(java.util.ArrayList) TimeseriesQueryQueryToolChest(org.apache.druid.query.timeseries.TimeseriesQueryQueryToolChest) QueryToolChest(org.apache.druid.query.QueryToolChest) QueryRunner(org.apache.druid.query.QueryRunner) FinalizeResultsQueryRunner(org.apache.druid.query.FinalizeResultsQueryRunner) Result(org.apache.druid.query.Result) BenchmarkMode(org.openjdk.jmh.annotations.BenchmarkMode) Benchmark(org.openjdk.jmh.annotations.Benchmark) OutputTimeUnit(org.openjdk.jmh.annotations.OutputTimeUnit)

Example 27 with TimeseriesResultValue

use of org.apache.druid.query.timeseries.TimeseriesResultValue in project druid by druid-io.

the class TimeCompareBenchmark method setup.

@Setup
public void setup() throws IOException {
    log.info("SETUP CALLED AT " + System.currentTimeMillis());
    ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde());
    executorService = Execs.multiThreaded(numSegments, "TopNThreadPool");
    setupQueries();
    String schemaName = "basic";
    schemaInfo = GeneratorBasicSchemas.SCHEMA_MAP.get(schemaName);
    segmentIntervals = new Interval[numSegments];
    long startMillis = schemaInfo.getDataInterval().getStartMillis();
    long endMillis = schemaInfo.getDataInterval().getEndMillis();
    long partialIntervalMillis = (endMillis - startMillis) / numSegments;
    for (int i = 0; i < numSegments; i++) {
        long partialEndMillis = startMillis + partialIntervalMillis;
        segmentIntervals[i] = Intervals.utc(startMillis, partialEndMillis);
        log.info("Segment [%d] with interval [%s]", i, segmentIntervals[i]);
        startMillis = partialEndMillis;
    }
    incIndexes = new ArrayList<>();
    for (int i = 0; i < numSegments; i++) {
        log.info("Generating rows for segment " + i);
        DataGenerator gen = new DataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED + i, segmentIntervals[i], rowsPerSegment);
        IncrementalIndex incIndex = makeIncIndex();
        for (int j = 0; j < rowsPerSegment; j++) {
            InputRow row = gen.nextRow();
            if (j % 10000 == 0) {
                log.info(j + " rows generated.");
            }
            incIndex.add(row);
        }
        incIndexes.add(incIndex);
    }
    tmpDir = FileUtils.createTempDir();
    log.info("Using temp dir: " + tmpDir.getAbsolutePath());
    qIndexes = new ArrayList<>();
    for (int i = 0; i < numSegments; i++) {
        File indexFile = INDEX_MERGER_V9.persist(incIndexes.get(i), tmpDir, new IndexSpec(), null);
        QueryableIndex qIndex = INDEX_IO.loadIndex(indexFile);
        qIndexes.add(qIndex);
    }
    List<QueryRunner<Result<TopNResultValue>>> singleSegmentRunners = new ArrayList<>();
    QueryToolChest toolChest = topNFactory.getToolchest();
    for (int i = 0; i < numSegments; i++) {
        SegmentId segmentId = SegmentId.dummy("qIndex " + i);
        QueryRunner<Result<TopNResultValue>> runner = QueryBenchmarkUtil.makeQueryRunner(topNFactory, segmentId, new QueryableIndexSegment(qIndexes.get(i), segmentId));
        singleSegmentRunners.add(new PerSegmentOptimizingQueryRunner<>(toolChest.preMergeQueryDecoration(runner), new PerSegmentQueryOptimizationContext(new SegmentDescriptor(segmentIntervals[i], "1", 0))));
    }
    topNRunner = toolChest.postMergeQueryDecoration(new FinalizeResultsQueryRunner<>(toolChest.mergeResults(topNFactory.mergeRunners(executorService, singleSegmentRunners)), toolChest));
    List<QueryRunner<Result<TimeseriesResultValue>>> singleSegmentRunnersT = new ArrayList<>();
    QueryToolChest toolChestT = timeseriesFactory.getToolchest();
    for (int i = 0; i < numSegments; i++) {
        SegmentId segmentId = SegmentId.dummy("qIndex " + i);
        QueryRunner<Result<TimeseriesResultValue>> runner = QueryBenchmarkUtil.makeQueryRunner(timeseriesFactory, segmentId, new QueryableIndexSegment(qIndexes.get(i), segmentId));
        singleSegmentRunnersT.add(new PerSegmentOptimizingQueryRunner<>(toolChestT.preMergeQueryDecoration(runner), new PerSegmentQueryOptimizationContext(new SegmentDescriptor(segmentIntervals[i], "1", 0))));
    }
    timeseriesRunner = toolChestT.postMergeQueryDecoration(new FinalizeResultsQueryRunner<>(toolChestT.mergeResults(timeseriesFactory.mergeRunners(executorService, singleSegmentRunnersT)), toolChestT));
}
Also used : QueryableIndexSegment(org.apache.druid.segment.QueryableIndexSegment) TimeseriesResultValue(org.apache.druid.query.timeseries.TimeseriesResultValue) IndexSpec(org.apache.druid.segment.IndexSpec) ArrayList(java.util.ArrayList) HyperUniquesSerde(org.apache.druid.query.aggregation.hyperloglog.HyperUniquesSerde) TopNQueryQueryToolChest(org.apache.druid.query.topn.TopNQueryQueryToolChest) TimeseriesQueryQueryToolChest(org.apache.druid.query.timeseries.TimeseriesQueryQueryToolChest) QueryToolChest(org.apache.druid.query.QueryToolChest) Result(org.apache.druid.query.Result) SegmentDescriptor(org.apache.druid.query.SegmentDescriptor) TopNResultValue(org.apache.druid.query.topn.TopNResultValue) IncrementalIndex(org.apache.druid.segment.incremental.IncrementalIndex) OnheapIncrementalIndex(org.apache.druid.segment.incremental.OnheapIncrementalIndex) SegmentId(org.apache.druid.timeline.SegmentId) QueryRunner(org.apache.druid.query.QueryRunner) FinalizeResultsQueryRunner(org.apache.druid.query.FinalizeResultsQueryRunner) PerSegmentOptimizingQueryRunner(org.apache.druid.query.PerSegmentOptimizingQueryRunner) PerSegmentQueryOptimizationContext(org.apache.druid.query.PerSegmentQueryOptimizationContext) FinalizeResultsQueryRunner(org.apache.druid.query.FinalizeResultsQueryRunner) QueryableIndex(org.apache.druid.segment.QueryableIndex) DataGenerator(org.apache.druid.segment.generator.DataGenerator) InputRow(org.apache.druid.data.input.InputRow) File(java.io.File) Setup(org.openjdk.jmh.annotations.Setup)

Example 28 with TimeseriesResultValue

use of org.apache.druid.query.timeseries.TimeseriesResultValue in project druid by druid-io.

the class DistinctCountTimeseriesQueryTest method testTimeseriesWithDistinctCountAgg.

@Test
public void testTimeseriesWithDistinctCountAgg() throws Exception {
    TimeseriesQueryEngine engine = new TimeseriesQueryEngine();
    IncrementalIndex index = new OnheapIncrementalIndex.Builder().setIndexSchema(new IncrementalIndexSchema.Builder().withQueryGranularity(Granularities.SECOND).withMetrics(new CountAggregatorFactory("cnt")).build()).setMaxRowCount(1000).build();
    String visitor_id = "visitor_id";
    String client_type = "client_type";
    DateTime time = DateTimes.of("2016-03-04T00:00:00.000Z");
    long timestamp = time.getMillis();
    index.add(new MapBasedInputRow(timestamp, Lists.newArrayList(visitor_id, client_type), ImmutableMap.of(visitor_id, "0", client_type, "iphone")));
    index.add(new MapBasedInputRow(timestamp, Lists.newArrayList(visitor_id, client_type), ImmutableMap.of(visitor_id, "1", client_type, "iphone")));
    index.add(new MapBasedInputRow(timestamp, Lists.newArrayList(visitor_id, client_type), ImmutableMap.of(visitor_id, "2", client_type, "android")));
    TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource(QueryRunnerTestHelper.DATA_SOURCE).granularity(QueryRunnerTestHelper.ALL_GRAN).intervals(QueryRunnerTestHelper.FULL_ON_INTERVAL_SPEC).aggregators(Lists.newArrayList(QueryRunnerTestHelper.ROWS_COUNT, new DistinctCountAggregatorFactory("UV", visitor_id, null))).build();
    final Iterable<Result<TimeseriesResultValue>> results = engine.process(query, new IncrementalIndexStorageAdapter(index)).toList();
    List<Result<TimeseriesResultValue>> expectedResults = Collections.singletonList(new Result<>(time, new TimeseriesResultValue(ImmutableMap.of("UV", 3, "rows", 3L))));
    TestHelper.assertExpectedResults(expectedResults, results);
}
Also used : TimeseriesResultValue(org.apache.druid.query.timeseries.TimeseriesResultValue) TimeseriesQuery(org.apache.druid.query.timeseries.TimeseriesQuery) IncrementalIndex(org.apache.druid.segment.incremental.IncrementalIndex) OnheapIncrementalIndex(org.apache.druid.segment.incremental.OnheapIncrementalIndex) OnheapIncrementalIndex(org.apache.druid.segment.incremental.OnheapIncrementalIndex) DateTime(org.joda.time.DateTime) Result(org.apache.druid.query.Result) TimeseriesQueryEngine(org.apache.druid.query.timeseries.TimeseriesQueryEngine) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) IncrementalIndexStorageAdapter(org.apache.druid.segment.incremental.IncrementalIndexStorageAdapter) MapBasedInputRow(org.apache.druid.data.input.MapBasedInputRow) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test)

Example 29 with TimeseriesResultValue

use of org.apache.druid.query.timeseries.TimeseriesResultValue in project druid by druid-io.

the class SketchAggregationWithSimpleDataTest method testSimpleDataIngestAndTimeseriesQuery.

@Test
public void testSimpleDataIngestAndTimeseriesQuery() throws Exception {
    AggregationTestHelper timeseriesQueryAggregationTestHelper = AggregationTestHelper.createTimeseriesQueryAggregationTestHelper(sm.getJacksonModules(), tempFolder);
    Sequence seq = timeseriesQueryAggregationTestHelper.runQueryOnSegments(ImmutableList.of(s1, s2), (Query) SketchAggregationTest.readQueryFromClasspath("timeseries_query.json", timeseriesQueryAggregationTestHelper.getObjectMapper(), vectorize));
    Result<TimeseriesResultValue> result = (Result<TimeseriesResultValue>) Iterables.getOnlyElement(seq.toList());
    Assert.assertEquals(DateTimes.of("2014-10-20T00:00:00.000Z"), result.getTimestamp());
    Assert.assertEquals(50.0, result.getValue().getDoubleMetric("sketch_count"), 0.01);
    Assert.assertEquals(50.0, result.getValue().getDoubleMetric("sketchEstimatePostAgg"), 0.01);
    Assert.assertEquals(50.0, result.getValue().getDoubleMetric("sketchUnionPostAggEstimate"), 0.01);
    Assert.assertEquals(50.0, result.getValue().getDoubleMetric("sketchIntersectionPostAggEstimate"), 0.01);
    Assert.assertEquals(0.0, result.getValue().getDoubleMetric("sketchAnotBPostAggEstimate"), 0.01);
    Assert.assertEquals(0.0, result.getValue().getDoubleMetric("non_existing_col_validation"), 0.01);
}
Also used : TimeseriesResultValue(org.apache.druid.query.timeseries.TimeseriesResultValue) AggregationTestHelper(org.apache.druid.query.aggregation.AggregationTestHelper) Sequence(org.apache.druid.java.util.common.guava.Sequence) Result(org.apache.druid.query.Result) GroupByQueryRunnerTest(org.apache.druid.query.groupby.GroupByQueryRunnerTest) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test)

Example 30 with TimeseriesResultValue

use of org.apache.druid.query.timeseries.TimeseriesResultValue in project druid by druid-io.

the class CPUTimeMetricQueryRunnerTest method testCpuTimeMetric.

@Test
public void testCpuTimeMetric() {
    final StubServiceEmitter emitter = new StubServiceEmitter("s", "h");
    final AtomicLong accumulator = new AtomicLong();
    final List<Result<TimeseriesResultValue>> expectedResults = Collections.singletonList(new Result<>(DateTimes.of("2000-01-01"), new TimeseriesResultValue(ImmutableMap.of("x", "y"))));
    final QueryRunner<Result<TimeseriesResultValue>> runner = CPUTimeMetricQueryRunner.safeBuild((queryPlus, responseContext) -> Sequences.simple(expectedResults), new TimeseriesQueryQueryToolChest(), emitter, accumulator, true);
    final Sequence<Result<TimeseriesResultValue>> results = runner.run(QueryPlus.wrap(Druids.newTimeseriesQueryBuilder().dataSource("foo").intervals("2000/2001").build()).withQueryMetrics(new TimeseriesQueryQueryToolChest()));
    Assert.assertEquals(expectedResults, results.toList());
    Assert.assertEquals(1, emitter.getEvents().size());
    final Event event = Iterables.getOnlyElement(emitter.getEvents());
    Assert.assertEquals("metrics", event.toMap().get("feed"));
    Assert.assertEquals("query/cpu/time", event.toMap().get("metric"));
    final Object value = event.toMap().get("value");
    Assert.assertThat(value, CoreMatchers.instanceOf(Long.class));
    Assert.assertTrue((long) value > 0);
}
Also used : StubServiceEmitter(org.apache.druid.java.util.metrics.StubServiceEmitter) TimeseriesResultValue(org.apache.druid.query.timeseries.TimeseriesResultValue) AtomicLong(java.util.concurrent.atomic.AtomicLong) AtomicLong(java.util.concurrent.atomic.AtomicLong) Event(org.apache.druid.java.util.emitter.core.Event) TimeseriesQueryQueryToolChest(org.apache.druid.query.timeseries.TimeseriesQueryQueryToolChest) Test(org.junit.Test)

Aggregations

TimeseriesResultValue (org.apache.druid.query.timeseries.TimeseriesResultValue)50 Result (org.apache.druid.query.Result)42 Test (org.junit.Test)37 TimeseriesQuery (org.apache.druid.query.timeseries.TimeseriesQuery)35 CountAggregatorFactory (org.apache.druid.query.aggregation.CountAggregatorFactory)20 FinalizeResultsQueryRunner (org.apache.druid.query.FinalizeResultsQueryRunner)18 QueryRunner (org.apache.druid.query.QueryRunner)18 TimeseriesQueryQueryToolChest (org.apache.druid.query.timeseries.TimeseriesQueryQueryToolChest)18 InitializedNullHandlingTest (org.apache.druid.testing.InitializedNullHandlingTest)18 LongSumAggregatorFactory (org.apache.druid.query.aggregation.LongSumAggregatorFactory)17 TimeseriesQueryEngine (org.apache.druid.query.timeseries.TimeseriesQueryEngine)15 Sequence (org.apache.druid.java.util.common.guava.Sequence)12 TimeseriesQueryRunnerFactory (org.apache.druid.query.timeseries.TimeseriesQueryRunnerFactory)12 IOException (java.io.IOException)9 SpatialDimFilter (org.apache.druid.query.filter.SpatialDimFilter)9 ArrayList (java.util.ArrayList)8 ResponseContext (org.apache.druid.query.context.ResponseContext)7 QueryToolChest (org.apache.druid.query.QueryToolChest)5 List (java.util.List)4 RadiusBound (org.apache.druid.collections.spatial.search.RadiusBound)4