use of io.druid.query.timeseries.TimeseriesResultValue in project druid by druid-io.
the class QueryMaker method executeTimeseries.
private Sequence<Object[]> executeTimeseries(final DruidQueryBuilder queryBuilder, final TimeseriesQuery query) {
final List<RelDataTypeField> fieldList = queryBuilder.getRowType().getFieldList();
final List<DimensionSpec> dimensions = queryBuilder.getGrouping().getDimensions();
final String timeOutputName = dimensions.isEmpty() ? null : Iterables.getOnlyElement(dimensions).getOutputName();
Hook.QUERY_PLAN.run(query);
return Sequences.map(query.run(walker, Maps.<String, Object>newHashMap()), new Function<Result<TimeseriesResultValue>, Object[]>() {
@Override
public Object[] apply(final Result<TimeseriesResultValue> result) {
final Map<String, Object> row = result.getValue().getBaseObject();
final Object[] retVal = new Object[fieldList.size()];
for (final RelDataTypeField field : fieldList) {
final String outputName = queryBuilder.getRowOrder().get(field.getIndex());
if (outputName.equals(timeOutputName)) {
retVal[field.getIndex()] = coerce(result.getTimestamp(), field.getType().getSqlTypeName());
} else {
retVal[field.getIndex()] = coerce(row.get(outputName), field.getType().getSqlTypeName());
}
}
return retVal;
}
});
}
use of io.druid.query.timeseries.TimeseriesResultValue in project druid by druid-io.
the class SpatialFilterTest method testSpatialQueryWithOtherSpatialDim.
@Test
public void testSpatialQueryWithOtherSpatialDim() {
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource("test").granularity(Granularities.ALL).intervals(Arrays.asList(new Interval("2013-01-01/2013-01-07"))).filters(new SpatialDimFilter("spatialIsRad", new RadiusBound(new float[] { 0.0f, 0.0f }, 5))).aggregators(Arrays.<AggregatorFactory>asList(new CountAggregatorFactory("rows"), new LongSumAggregatorFactory("val", "val"))).build();
List<Result<TimeseriesResultValue>> expectedResults = Arrays.asList(new Result<TimeseriesResultValue>(new DateTime("2013-01-01T00:00:00.000Z"), new TimeseriesResultValue(ImmutableMap.<String, Object>builder().put("rows", 1L).put("val", 13L).build())));
try {
TimeseriesQueryRunnerFactory factory = new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(QueryRunnerTestHelper.NoopIntervalChunkingQueryRunnerDecorator()), new TimeseriesQueryEngine(), QueryRunnerTestHelper.NOOP_QUERYWATCHER);
QueryRunner runner = new FinalizeResultsQueryRunner(factory.createRunner(segment), factory.getToolchest());
TestHelper.assertExpectedResults(expectedResults, runner.run(query, Maps.newHashMap()));
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
use of io.druid.query.timeseries.TimeseriesResultValue in project druid by druid-io.
the class CachingClusteredClientTest method testTimeSeriesWithFilter.
@Test
public void testTimeSeriesWithFilter() throws Exception {
DimFilter filter = Druids.newAndDimFilterBuilder().fields(Arrays.asList(Druids.newOrDimFilterBuilder().fields(Arrays.asList(new SelectorDimFilter("dim0", "1", null), new BoundDimFilter("dim0", "222", "333", false, false, false, null, StringComparators.LEXICOGRAPHIC))).build(), Druids.newAndDimFilterBuilder().fields(Arrays.asList(new InDimFilter("dim1", Arrays.asList("0", "1", "2", "3", "4"), null), new BoundDimFilter("dim1", "0", "3", false, true, false, null, StringComparators.LEXICOGRAPHIC), new BoundDimFilter("dim1", "1", "9999", true, false, false, null, StringComparators.LEXICOGRAPHIC))).build())).build();
final Druids.TimeseriesQueryBuilder builder = Druids.newTimeseriesQueryBuilder().dataSource(DATA_SOURCE).intervals(SEG_SPEC).filters(filter).granularity(GRANULARITY).aggregators(AGGS).postAggregators(POST_AGGS).context(CONTEXT);
QueryRunner runner = new FinalizeResultsQueryRunner(client, new TimeseriesQueryQueryToolChest(QueryRunnerTestHelper.NoopIntervalChunkingQueryRunnerDecorator()));
/*
For dim0 (2011-01-01/2011-01-05), the combined range is {[1,1], [222,333]}, so segments [-inf,1], [1,2], [2,3], and
[3,4] is needed
For dim1 (2011-01-06/2011-01-10), the combined range for the bound filters is {(1,3)}, combined this with the in
filter result in {[2,2]}, so segments [1,2] and [2,3] is needed
*/
List<Iterable<Result<TimeseriesResultValue>>> expectedResult = Arrays.asList(makeTimeResults(new DateTime("2011-01-01"), 50, 5000, new DateTime("2011-01-02"), 10, 1252, new DateTime("2011-01-03"), 20, 6213, new DateTime("2011-01-04"), 30, 743), makeTimeResults(new DateTime("2011-01-07"), 60, 6020, new DateTime("2011-01-08"), 70, 250));
testQueryCachingWithFilter(runner, 3, builder.build(), expectedResult, new Interval("2011-01-01/2011-01-05"), makeTimeResults(new DateTime("2011-01-01"), 50, 5000), new Interval("2011-01-01/2011-01-05"), makeTimeResults(new DateTime("2011-01-02"), 10, 1252), new Interval("2011-01-01/2011-01-05"), makeTimeResults(new DateTime("2011-01-03"), 20, 6213), new Interval("2011-01-01/2011-01-05"), makeTimeResults(new DateTime("2011-01-04"), 30, 743), new Interval("2011-01-01/2011-01-05"), makeTimeResults(new DateTime("2011-01-05"), 40, 6000), new Interval("2011-01-06/2011-01-10"), makeTimeResults(new DateTime("2011-01-06"), 50, 425), new Interval("2011-01-06/2011-01-10"), makeTimeResults(new DateTime("2011-01-07"), 60, 6020), new Interval("2011-01-06/2011-01-10"), makeTimeResults(new DateTime("2011-01-08"), 70, 250), new Interval("2011-01-06/2011-01-10"), makeTimeResults(new DateTime("2011-01-09"), 23, 85312), new Interval("2011-01-06/2011-01-10"), makeTimeResults(new DateTime("2011-01-10"), 100, 512));
}
use of io.druid.query.timeseries.TimeseriesResultValue in project druid by druid-io.
the class CachingClusteredClientTest method testQueryCachingWithFilter.
@SuppressWarnings("unchecked")
public void testQueryCachingWithFilter(final QueryRunner runner, final int numTimesToQuery, final Query query, final List<Iterable<Result<TimeseriesResultValue>>> filteredExpected, // does this assume query intervals must be ordered?
Object... args) {
final List<Interval> queryIntervals = Lists.newArrayListWithCapacity(args.length / 2);
final List<List<Iterable<Result<Object>>>> expectedResults = Lists.newArrayListWithCapacity(queryIntervals.size());
parseResults(queryIntervals, expectedResults, args);
for (int i = 0; i < queryIntervals.size(); ++i) {
List<Object> mocks = Lists.newArrayList();
mocks.add(serverView);
final Interval actualQueryInterval = new Interval(queryIntervals.get(0).getStart(), queryIntervals.get(i).getEnd());
final List<Map<DruidServer, ServerExpectations>> serverExpectationList = populateTimeline(queryIntervals, expectedResults, i, mocks);
final Map<DruidServer, ServerExpectations> finalExpectation = serverExpectationList.get(serverExpectationList.size() - 1);
for (Map.Entry<DruidServer, ServerExpectations> entry : finalExpectation.entrySet()) {
DruidServer server = entry.getKey();
ServerExpectations expectations = entry.getValue();
EasyMock.expect(serverView.getQueryRunner(server)).andReturn(expectations.getQueryRunner()).times(0, 1);
final Capture<? extends Query> capture = new Capture();
final Capture<? extends Map> context = new Capture();
QueryRunner queryable = expectations.getQueryRunner();
if (query instanceof TimeseriesQuery) {
final List<String> segmentIds = Lists.newArrayList();
final List<Iterable<Result<TimeseriesResultValue>>> results = Lists.newArrayList();
for (ServerExpectation expectation : expectations) {
segmentIds.add(expectation.getSegmentId());
results.add(expectation.getResults());
}
EasyMock.expect(queryable.run(EasyMock.capture(capture), EasyMock.capture(context))).andAnswer(new IAnswer<Sequence>() {
@Override
public Sequence answer() throws Throwable {
return toFilteredQueryableTimeseriesResults((TimeseriesQuery) capture.getValue(), segmentIds, queryIntervals, results);
}
}).times(0, 1);
} else {
throw new ISE("Unknown query type[%s]", query.getClass());
}
}
final Iterable<Result<Object>> expected = new ArrayList<>();
for (int intervalNo = 0; intervalNo < i + 1; intervalNo++) {
Iterables.addAll((List) expected, filteredExpected.get(intervalNo));
}
runWithMocks(new Runnable() {
@Override
public void run() {
HashMap<String, List> context = new HashMap<String, List>();
for (int i = 0; i < numTimesToQuery; ++i) {
TestHelper.assertExpectedResults(expected, runner.run(query.withQuerySegmentSpec(new MultipleIntervalSegmentSpec(ImmutableList.of(actualQueryInterval))), context));
if (queryCompletedCallback != null) {
queryCompletedCallback.run();
}
}
}
}, mocks.toArray());
}
}
use of io.druid.query.timeseries.TimeseriesResultValue in project druid by druid-io.
the class IncrementalIndexTest method testSingleThreadedIndexingAndQuery.
@Test
public void testSingleThreadedIndexingAndQuery() throws Exception {
final int dimensionCount = 5;
final ArrayList<AggregatorFactory> ingestAggregatorFactories = new ArrayList<>();
ingestAggregatorFactories.add(new CountAggregatorFactory("rows"));
for (int i = 0; i < dimensionCount; ++i) {
ingestAggregatorFactories.add(new LongSumAggregatorFactory(String.format("sumResult%s", i), String.format("Dim_%s", i)));
ingestAggregatorFactories.add(new DoubleSumAggregatorFactory(String.format("doubleSumResult%s", i), String.format("Dim_%s", i)));
}
final IncrementalIndex index = closer.closeLater(indexCreator.createIndex(ingestAggregatorFactories.toArray(new AggregatorFactory[ingestAggregatorFactories.size()])));
final long timestamp = System.currentTimeMillis();
final int rows = 50;
//ingesting same data twice to have some merging happening
for (int i = 0; i < rows; i++) {
index.add(getLongRow(timestamp + i, i, dimensionCount));
}
for (int i = 0; i < rows; i++) {
index.add(getLongRow(timestamp + i, i, dimensionCount));
}
//run a timeseries query on the index and verify results
final ArrayList<AggregatorFactory> queryAggregatorFactories = new ArrayList<>();
queryAggregatorFactories.add(new CountAggregatorFactory("rows"));
for (int i = 0; i < dimensionCount; ++i) {
queryAggregatorFactories.add(new LongSumAggregatorFactory(String.format("sumResult%s", i), String.format("sumResult%s", i)));
queryAggregatorFactories.add(new DoubleSumAggregatorFactory(String.format("doubleSumResult%s", i), String.format("doubleSumResult%s", i)));
}
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource("xxx").granularity(Granularities.ALL).intervals(ImmutableList.of(new Interval("2000/2030"))).aggregators(queryAggregatorFactories).build();
final Segment incrementalIndexSegment = new IncrementalIndexSegment(index, null);
final QueryRunnerFactory factory = new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(QueryRunnerTestHelper.NoopIntervalChunkingQueryRunnerDecorator()), new TimeseriesQueryEngine(), QueryRunnerTestHelper.NOOP_QUERYWATCHER);
final QueryRunner<Result<TimeseriesResultValue>> runner = new FinalizeResultsQueryRunner<Result<TimeseriesResultValue>>(factory.createRunner(incrementalIndexSegment), factory.getToolchest());
List<Result<TimeseriesResultValue>> results = Sequences.toList(runner.run(query, new HashMap<String, Object>()), new LinkedList<Result<TimeseriesResultValue>>());
Result<TimeseriesResultValue> result = Iterables.getOnlyElement(results);
boolean isRollup = index.isRollup();
Assert.assertEquals(rows * (isRollup ? 1 : 2), result.getValue().getLongMetric("rows").intValue());
for (int i = 0; i < dimensionCount; ++i) {
Assert.assertEquals(String.format("Failed long sum on dimension %d", i), 2 * rows, result.getValue().getLongMetric(String.format("sumResult%s", i)).intValue());
Assert.assertEquals(String.format("Failed double sum on dimension %d", i), 2 * rows, result.getValue().getDoubleMetric(String.format("doubleSumResult%s", i)).intValue());
}
}
Aggregations