use of org.apache.druid.query.timeseries.TimeseriesResultValue in project druid by druid-io.
the class StreamAppenderatorTest method testQueryBySegments.
@Test
public void testQueryBySegments() throws Exception {
try (final StreamAppenderatorTester tester = new StreamAppenderatorTester(2, true)) {
final Appenderator appenderator = tester.getAppenderator();
appenderator.startJob();
appenderator.add(IDENTIFIERS.get(0), ir("2000", "foo", 1), Suppliers.ofInstance(Committers.nil()));
appenderator.add(IDENTIFIERS.get(0), ir("2000", "foo", 2), Suppliers.ofInstance(Committers.nil()));
appenderator.add(IDENTIFIERS.get(1), ir("2000", "foo", 4), Suppliers.ofInstance(Committers.nil()));
appenderator.add(IDENTIFIERS.get(2), ir("2001", "foo", 8), Suppliers.ofInstance(Committers.nil()));
appenderator.add(IDENTIFIERS.get(2), ir("2001T01", "foo", 16), Suppliers.ofInstance(Committers.nil()));
appenderator.add(IDENTIFIERS.get(2), ir("2001T02", "foo", 32), Suppliers.ofInstance(Committers.nil()));
appenderator.add(IDENTIFIERS.get(2), ir("2001T03", "foo", 64), Suppliers.ofInstance(Committers.nil()));
// Query1: segment #2
final TimeseriesQuery query1 = Druids.newTimeseriesQueryBuilder().dataSource(StreamAppenderatorTester.DATASOURCE).aggregators(Arrays.asList(new LongSumAggregatorFactory("count", "count"), new LongSumAggregatorFactory("met", "met"))).granularity(Granularities.DAY).intervals(new MultipleSpecificSegmentSpec(ImmutableList.of(new SegmentDescriptor(IDENTIFIERS.get(2).getInterval(), IDENTIFIERS.get(2).getVersion(), IDENTIFIERS.get(2).getShardSpec().getPartitionNum())))).build();
final List<Result<TimeseriesResultValue>> results1 = QueryPlus.wrap(query1).run(appenderator, ResponseContext.createEmpty()).toList();
Assert.assertEquals("query1", ImmutableList.of(new Result<>(DateTimes.of("2001"), new TimeseriesResultValue(ImmutableMap.of("count", 4L, "met", 120L)))), results1);
// Query2: segment #2, partial
final TimeseriesQuery query2 = Druids.newTimeseriesQueryBuilder().dataSource(StreamAppenderatorTester.DATASOURCE).aggregators(Arrays.asList(new LongSumAggregatorFactory("count", "count"), new LongSumAggregatorFactory("met", "met"))).granularity(Granularities.DAY).intervals(new MultipleSpecificSegmentSpec(ImmutableList.of(new SegmentDescriptor(Intervals.of("2001/PT1H"), IDENTIFIERS.get(2).getVersion(), IDENTIFIERS.get(2).getShardSpec().getPartitionNum())))).build();
final List<Result<TimeseriesResultValue>> results2 = QueryPlus.wrap(query2).run(appenderator, ResponseContext.createEmpty()).toList();
Assert.assertEquals("query2", ImmutableList.of(new Result<>(DateTimes.of("2001"), new TimeseriesResultValue(ImmutableMap.of("count", 1L, "met", 8L)))), results2);
// Query3: segment #2, two disjoint intervals
final TimeseriesQuery query3 = Druids.newTimeseriesQueryBuilder().dataSource(StreamAppenderatorTester.DATASOURCE).aggregators(Arrays.asList(new LongSumAggregatorFactory("count", "count"), new LongSumAggregatorFactory("met", "met"))).granularity(Granularities.DAY).intervals(new MultipleSpecificSegmentSpec(ImmutableList.of(new SegmentDescriptor(Intervals.of("2001/PT1H"), IDENTIFIERS.get(2).getVersion(), IDENTIFIERS.get(2).getShardSpec().getPartitionNum()), new SegmentDescriptor(Intervals.of("2001T03/PT1H"), IDENTIFIERS.get(2).getVersion(), IDENTIFIERS.get(2).getShardSpec().getPartitionNum())))).build();
final List<Result<TimeseriesResultValue>> results3 = QueryPlus.wrap(query3).run(appenderator, ResponseContext.createEmpty()).toList();
Assert.assertEquals("query3", ImmutableList.of(new Result<>(DateTimes.of("2001"), new TimeseriesResultValue(ImmutableMap.of("count", 2L, "met", 72L)))), results3);
final ScanQuery query4 = Druids.newScanQueryBuilder().dataSource(StreamAppenderatorTester.DATASOURCE).intervals(new MultipleSpecificSegmentSpec(ImmutableList.of(new SegmentDescriptor(Intervals.of("2001/PT1H"), IDENTIFIERS.get(2).getVersion(), IDENTIFIERS.get(2).getShardSpec().getPartitionNum()), new SegmentDescriptor(Intervals.of("2001T03/PT1H"), IDENTIFIERS.get(2).getVersion(), IDENTIFIERS.get(2).getShardSpec().getPartitionNum())))).order(ScanQuery.Order.ASCENDING).batchSize(10).resultFormat(ScanQuery.ResultFormat.RESULT_FORMAT_COMPACTED_LIST).build();
final List<ScanResultValue> results4 = QueryPlus.wrap(query4).run(appenderator, ResponseContext.createEmpty()).toList();
// 2 segments, 1 row per segment
Assert.assertEquals(2, results4.size());
Assert.assertArrayEquals(new String[] { "__time", "dim", "count", "met" }, results4.get(0).getColumns().toArray());
Assert.assertArrayEquals(new Object[] { DateTimes.of("2001").getMillis(), "foo", 1L, 8L }, ((List<Object>) ((List<Object>) results4.get(0).getEvents()).get(0)).toArray());
Assert.assertArrayEquals(new String[] { "__time", "dim", "count", "met" }, results4.get(0).getColumns().toArray());
Assert.assertArrayEquals(new Object[] { DateTimes.of("2001T03").getMillis(), "foo", 1L, 64L }, ((List<Object>) ((List<Object>) results4.get(1).getEvents()).get(0)).toArray());
}
}
use of org.apache.druid.query.timeseries.TimeseriesResultValue in project druid by druid-io.
the class ChainedExecutionQueryRunnerTest method testSubmittedTaskType.
@Test
public void testSubmittedTaskType() {
QueryProcessingPool queryProcessingPool = Mockito.mock(QueryProcessingPool.class);
QueryWatcher watcher = EasyMock.createStrictMock(QueryWatcher.class);
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource("test").intervals("2014/2015").aggregators(Collections.singletonList(new CountAggregatorFactory("count"))).context(ImmutableMap.of(QueryContexts.TIMEOUT_KEY, 100, "queryId", "test")).build();
List<QueryRunner<Result<TimeseriesResultValue>>> runners = Arrays.asList(Mockito.mock(QueryRunner.class), Mockito.mock(QueryRunner.class));
ChainedExecutionQueryRunner<Result<TimeseriesResultValue>> chainedRunner = new ChainedExecutionQueryRunner<>(queryProcessingPool, watcher, runners);
Mockito.when(queryProcessingPool.submitRunnerTask(ArgumentMatchers.any())).thenReturn(Futures.immediateFuture(Collections.singletonList(123)));
chainedRunner.run(QueryPlus.wrap(query)).toList();
ArgumentCaptor<PrioritizedQueryRunnerCallable> captor = ArgumentCaptor.forClass(PrioritizedQueryRunnerCallable.class);
Mockito.verify(queryProcessingPool, Mockito.times(2)).submitRunnerTask(captor.capture());
List<QueryRunner> actual = captor.getAllValues().stream().map(PrioritizedQueryRunnerCallable::getRunner).collect(Collectors.toList());
Assert.assertEquals(runners, actual);
}
use of org.apache.druid.query.timeseries.TimeseriesResultValue in project druid by druid-io.
the class StringFirstTimeseriesQueryTest method testTimeseriesQuery.
@Test
public void testTimeseriesQuery() {
TimeseriesQueryEngine engine = new TimeseriesQueryEngine();
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource(QueryRunnerTestHelper.DATA_SOURCE).granularity(QueryRunnerTestHelper.ALL_GRAN).intervals(QueryRunnerTestHelper.FULL_ON_INTERVAL_SPEC).aggregators(ImmutableList.of(new StringFirstAggregatorFactory("nonfolding", CLIENT_TYPE, null, 1024), new StringFirstAggregatorFactory("folding", FIRST_CLIENT_TYPE, null, 1024), new StringFirstAggregatorFactory("nonexistent", "nonexistent", null, 1024), new StringFirstAggregatorFactory("numeric", "cnt", null, 1024))).build();
List<Result<TimeseriesResultValue>> expectedResults = Collections.singletonList(new Result<>(TIME1, new TimeseriesResultValue(ImmutableMap.<String, Object>builder().put("nonfolding", new SerializablePairLongString(TIME1.getMillis(), "iphone")).put("folding", new SerializablePairLongString(TIME1.getMillis(), "iphone")).put("nonexistent", new SerializablePairLongString(DateTimes.MAX.getMillis(), null)).put("numeric", new SerializablePairLongString(DateTimes.MAX.getMillis(), null)).build())));
final Iterable<Result<TimeseriesResultValue>> iiResults = engine.process(query, new IncrementalIndexStorageAdapter(incrementalIndex)).toList();
final Iterable<Result<TimeseriesResultValue>> qiResults = engine.process(query, new QueryableIndexStorageAdapter(queryableIndex)).toList();
TestHelper.assertExpectedResults(expectedResults, iiResults, "incremental index");
TestHelper.assertExpectedResults(expectedResults, qiResults, "queryable index");
}
use of org.apache.druid.query.timeseries.TimeseriesResultValue in project druid by druid-io.
the class VectorizedVirtualColumnTest method testTimeseries.
private void testTimeseries(ColumnCapabilities capabilities) {
testTimeseries(capabilities, CONTEXT_VECTORIZE_FORCE, true);
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().intervals("2000/2030").dataSource(QueryRunnerTestHelper.DATA_SOURCE).granularity(Granularities.ALL).virtualColumns(new AlwaysTwoVectorizedVirtualColumn(ALWAYS_TWO, capabilities)).aggregators(new AlwaysTwoCounterAggregatorFactory(COUNT, ALWAYS_TWO)).context(CONTEXT_VECTORIZE_FORCE).build();
Sequence seq = timeseriesTestHelper.runQueryOnSegmentsObjs(segments, query);
List<Result<TimeseriesResultValue>> expectedResults = ImmutableList.of(new Result<>(DateTimes.of("2011-01-12T00:00:00.000Z"), new TimeseriesResultValue(ImmutableMap.of(COUNT, getCount(capabilities)))));
TestHelper.assertExpectedObjects(expectedResults, seq.toList(), "failed");
}
use of org.apache.druid.query.timeseries.TimeseriesResultValue in project druid by druid-io.
the class VectorizedVirtualColumnTest method testTimeseries.
private void testTimeseries(ColumnCapabilities capabilities, Map<String, Object> context, boolean canVectorize) {
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().intervals("2000/2030").dataSource(QueryRunnerTestHelper.DATA_SOURCE).granularity(Granularities.ALL).virtualColumns(new AlwaysTwoVectorizedVirtualColumn(ALWAYS_TWO, capabilities, canVectorize)).aggregators(new AlwaysTwoCounterAggregatorFactory(COUNT, ALWAYS_TWO)).context(context).build();
Sequence seq = timeseriesTestHelper.runQueryOnSegmentsObjs(segments, query);
List<Result<TimeseriesResultValue>> expectedResults = ImmutableList.of(new Result<>(DateTimes.of("2011-01-12T00:00:00.000Z"), new TimeseriesResultValue(ImmutableMap.of(COUNT, getCount(capabilities)))));
TestHelper.assertExpectedObjects(expectedResults, seq.toList(), "failed");
}
Aggregations