use of io.druid.query.timeseries.TimeseriesQueryRunnerFactory in project druid by druid-io.
the class IndexMergerV9WithSpatialIndexTest method testSpatialQueryWithOtherSpatialDim.
@Test
public void testSpatialQueryWithOtherSpatialDim() {
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource("test").granularity(Granularities.ALL).intervals(Arrays.asList(new Interval("2013-01-01/2013-01-07"))).filters(new SpatialDimFilter("spatialIsRad", new RadiusBound(new float[] { 0.0f, 0.0f }, 5))).aggregators(Arrays.<AggregatorFactory>asList(new CountAggregatorFactory("rows"), new LongSumAggregatorFactory("val", "val"))).build();
List<Result<TimeseriesResultValue>> expectedResults = Arrays.asList(new Result<>(new DateTime("2013-01-01T00:00:00.000Z"), new TimeseriesResultValue(ImmutableMap.<String, Object>builder().put("rows", 1L).put("val", 13L).build())));
try {
TimeseriesQueryRunnerFactory factory = new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(QueryRunnerTestHelper.NoopIntervalChunkingQueryRunnerDecorator()), new TimeseriesQueryEngine(), QueryRunnerTestHelper.NOOP_QUERYWATCHER);
QueryRunner runner = new FinalizeResultsQueryRunner(factory.createRunner(segment), factory.getToolchest());
TestHelper.assertExpectedResults(expectedResults, runner.run(query, Maps.newHashMap()));
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
use of io.druid.query.timeseries.TimeseriesQueryRunnerFactory in project druid by druid-io.
the class AggregationTestHelper method createTimeseriesQueryAggregationTestHelper.
public static final AggregationTestHelper createTimeseriesQueryAggregationTestHelper(List<? extends Module> jsonModulesToRegister, TemporaryFolder tempFolder) {
ObjectMapper mapper = new DefaultObjectMapper();
TimeseriesQueryQueryToolChest toolchest = new TimeseriesQueryQueryToolChest(QueryRunnerTestHelper.NoopIntervalChunkingQueryRunnerDecorator());
TimeseriesQueryRunnerFactory factory = new TimeseriesQueryRunnerFactory(toolchest, new TimeseriesQueryEngine(), QueryRunnerTestHelper.NOOP_QUERYWATCHER);
IndexIO indexIO = new IndexIO(mapper, new ColumnConfig() {
@Override
public int columnCacheSizeBytes() {
return 0;
}
});
return new AggregationTestHelper(mapper, new IndexMerger(mapper, indexIO), indexIO, toolchest, factory, tempFolder, jsonModulesToRegister);
}
use of io.druid.query.timeseries.TimeseriesQueryRunnerFactory in project druid by druid-io.
the class SchemaEvolutionTest method testNumericEvolutionFiltering.
@Test
public void testNumericEvolutionFiltering() {
final TimeseriesQueryRunnerFactory factory = QueryRunnerTestHelper.newTimeseriesQueryRunnerFactory();
// "c1" changes from string(1) -> long(2) -> float(3) -> nonexistent(4)
// test behavior of filtering
final TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource(DATA_SOURCE).intervals("1000/3000").filters(new BoundDimFilter("c1", "9", "11", false, false, null, null, StringComparators.NUMERIC)).aggregators(ImmutableList.of(new LongSumAggregatorFactory("a", "c1"), new DoubleSumAggregatorFactory("b", "c1"), new CountAggregatorFactory("c"))).build();
// Only string(1) -- which we can filter but not aggregate
Assert.assertEquals(timeseriesResult(ImmutableMap.of("a", 0L, "b", 0.0, "c", 2L)), runQuery(query, factory, ImmutableList.of(index1)));
// Only long(2) -- which we can filter and aggregate
Assert.assertEquals(timeseriesResult(ImmutableMap.of("a", 19L, "b", 19.0, "c", 2L)), runQuery(query, factory, ImmutableList.of(index2)));
// Only float(3) -- which we can't filter, but can aggregate
Assert.assertEquals(timeseriesResult(ImmutableMap.of("a", 19L, "b", 19.100000381469727, "c", 2L)), runQuery(query, factory, ImmutableList.of(index3)));
// Only nonexistent(4)
Assert.assertEquals(timeseriesResult(ImmutableMap.of("a", 0L, "b", 0.0, "c", 0L)), runQuery(query, factory, ImmutableList.of(index4)));
// string(1) + long(2) + float(3) + nonexistent(4)
Assert.assertEquals(timeseriesResult(ImmutableMap.of("a", 38L, "b", 38.10000038146973, "c", 6L)), runQuery(query, factory, ImmutableList.of(index1, index2, index3, index4)));
}
use of io.druid.query.timeseries.TimeseriesQueryRunnerFactory in project druid by druid-io.
the class SchemaEvolutionTest method testHyperUniqueEvolutionTimeseries.
@Test
public void testHyperUniqueEvolutionTimeseries() {
final TimeseriesQueryRunnerFactory factory = QueryRunnerTestHelper.newTimeseriesQueryRunnerFactory();
final TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource(DATA_SOURCE).intervals("1000/3000").aggregators(ImmutableList.<AggregatorFactory>of(new HyperUniquesAggregatorFactory("uniques", "uniques"))).build();
// index1 has no "uniques" column
Assert.assertEquals(timeseriesResult(ImmutableMap.of("uniques", 0)), runQuery(query, factory, ImmutableList.of(index1)));
// index1 (no uniques) + index2 and index3 (yes uniques); we should be able to combine
Assert.assertEquals(timeseriesResult(ImmutableMap.of("uniques", 4.003911343725148d)), runQuery(query, factory, ImmutableList.of(index1, index2, index3)));
}
use of io.druid.query.timeseries.TimeseriesQueryRunnerFactory in project druid by druid-io.
the class FilteredAggregatorBenchmark method setup.
@Setup
public void setup() throws IOException {
log.info("SETUP CALLED AT " + System.currentTimeMillis());
if (ComplexMetrics.getSerdeForType("hyperUnique") == null) {
ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde(HyperLogLogHash.getDefault()));
}
schemaInfo = BenchmarkSchemas.SCHEMA_MAP.get(schema);
BenchmarkDataGenerator gen = new BenchmarkDataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED, schemaInfo.getDataInterval(), rowsPerSegment);
incIndex = makeIncIndex(schemaInfo.getAggsArray());
filter = new OrDimFilter(Arrays.asList(new BoundDimFilter("dimSequential", "-1", "-1", true, true, null, null, StringComparators.ALPHANUMERIC), new JavaScriptDimFilter("dimSequential", "function(x) { return false }", null, JavaScriptConfig.getEnabledInstance()), new RegexDimFilter("dimSequential", "X", null), new SearchQueryDimFilter("dimSequential", new ContainsSearchQuerySpec("X", false), null), new InDimFilter("dimSequential", Arrays.asList("X"), null)));
filteredMetrics = new AggregatorFactory[1];
filteredMetrics[0] = new FilteredAggregatorFactory(new CountAggregatorFactory("rows"), filter);
incIndexFilteredAgg = makeIncIndex(filteredMetrics);
inputRows = new ArrayList<>();
for (int j = 0; j < rowsPerSegment; j++) {
InputRow row = gen.nextRow();
if (j % 10000 == 0) {
log.info(j + " rows generated.");
}
incIndex.add(row);
inputRows.add(row);
}
tmpDir = Files.createTempDir();
log.info("Using temp dir: " + tmpDir.getAbsolutePath());
indexFile = INDEX_MERGER_V9.persist(incIndex, tmpDir, new IndexSpec());
qIndex = INDEX_IO.loadIndex(indexFile);
factory = new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(QueryBenchmarkUtil.NoopIntervalChunkingQueryRunnerDecorator()), new TimeseriesQueryEngine(), QueryBenchmarkUtil.NOOP_QUERYWATCHER);
BenchmarkSchemaInfo basicSchema = BenchmarkSchemas.SCHEMA_MAP.get("basic");
QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec(Arrays.asList(basicSchema.getDataInterval()));
List<AggregatorFactory> queryAggs = new ArrayList<>();
queryAggs.add(filteredMetrics[0]);
query = Druids.newTimeseriesQueryBuilder().dataSource("blah").granularity(Granularities.ALL).intervals(intervalSpec).aggregators(queryAggs).descending(false).build();
}
Aggregations