use of io.druid.query.aggregation.AggregatorFactory in project druid by druid-io.
the class IndexMergerV9WithSpatialIndexTest method testSpatialQueryWithOtherSpatialDim.
@Test
public void testSpatialQueryWithOtherSpatialDim() {
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource("test").granularity(Granularities.ALL).intervals(Arrays.asList(new Interval("2013-01-01/2013-01-07"))).filters(new SpatialDimFilter("spatialIsRad", new RadiusBound(new float[] { 0.0f, 0.0f }, 5))).aggregators(Arrays.<AggregatorFactory>asList(new CountAggregatorFactory("rows"), new LongSumAggregatorFactory("val", "val"))).build();
List<Result<TimeseriesResultValue>> expectedResults = Arrays.asList(new Result<>(new DateTime("2013-01-01T00:00:00.000Z"), new TimeseriesResultValue(ImmutableMap.<String, Object>builder().put("rows", 1L).put("val", 13L).build())));
try {
TimeseriesQueryRunnerFactory factory = new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(QueryRunnerTestHelper.NoopIntervalChunkingQueryRunnerDecorator()), new TimeseriesQueryEngine(), QueryRunnerTestHelper.NOOP_QUERYWATCHER);
QueryRunner runner = new FinalizeResultsQueryRunner(factory.createRunner(segment), factory.getToolchest());
TestHelper.assertExpectedResults(expectedResults, runner.run(query, Maps.newHashMap()));
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
use of io.druid.query.aggregation.AggregatorFactory in project druid by druid-io.
the class MetadataTest method testSerde.
@Test
public void testSerde() throws Exception {
ObjectMapper jsonMapper = new DefaultObjectMapper();
Metadata metadata = new Metadata();
metadata.put("k", "v");
AggregatorFactory[] aggregators = new AggregatorFactory[] { new LongSumAggregatorFactory("out", "in") };
metadata.setAggregators(aggregators);
metadata.setQueryGranularity(Granularities.ALL);
metadata.setRollup(Boolean.FALSE);
Metadata other = jsonMapper.readValue(jsonMapper.writeValueAsString(metadata), Metadata.class);
Assert.assertEquals(metadata, other);
}
use of io.druid.query.aggregation.AggregatorFactory in project druid by druid-io.
the class MetadataTest method testMerge.
@Test
public void testMerge() {
Assert.assertNull(Metadata.merge(null, null));
Assert.assertNull(Metadata.merge(ImmutableList.<Metadata>of(), null));
List<Metadata> metadataToBeMerged = new ArrayList<>();
metadataToBeMerged.add(null);
Assert.assertNull(Metadata.merge(metadataToBeMerged, null));
//sanity merge check
AggregatorFactory[] aggs = new AggregatorFactory[] { new LongMaxAggregatorFactory("n", "f") };
Metadata m1 = new Metadata();
m1.put("k", "v");
m1.setAggregators(aggs);
m1.setTimestampSpec(new TimestampSpec("ds", "auto", null));
m1.setQueryGranularity(Granularities.ALL);
m1.setRollup(Boolean.FALSE);
Metadata m2 = new Metadata();
m2.put("k", "v");
m2.setAggregators(aggs);
m2.setTimestampSpec(new TimestampSpec("ds", "auto", null));
m2.setQueryGranularity(Granularities.ALL);
m2.setRollup(Boolean.FALSE);
Metadata merged = new Metadata();
merged.put("k", "v");
merged.setAggregators(new AggregatorFactory[] { new LongMaxAggregatorFactory("n", "n") });
merged.setTimestampSpec(new TimestampSpec("ds", "auto", null));
merged.setRollup(Boolean.FALSE);
merged.setQueryGranularity(Granularities.ALL);
Assert.assertEquals(merged, Metadata.merge(ImmutableList.of(m1, m2), null));
//merge check with one metadata being null
metadataToBeMerged.clear();
metadataToBeMerged.add(m1);
metadataToBeMerged.add(m2);
metadataToBeMerged.add(null);
merged.setAggregators(null);
merged.setTimestampSpec(null);
merged.setQueryGranularity(null);
merged.setRollup(null);
Assert.assertEquals(merged, Metadata.merge(metadataToBeMerged, null));
//merge check with client explicitly providing merged aggregators
AggregatorFactory[] explicitAggs = new AggregatorFactory[] { new DoubleMaxAggregatorFactory("x", "y") };
merged.setAggregators(explicitAggs);
Assert.assertEquals(merged, Metadata.merge(metadataToBeMerged, explicitAggs));
merged.setTimestampSpec(new TimestampSpec("ds", "auto", null));
merged.setQueryGranularity(Granularities.ALL);
m1.setRollup(Boolean.TRUE);
Assert.assertEquals(merged, Metadata.merge(ImmutableList.of(m1, m2), explicitAggs));
}
use of io.druid.query.aggregation.AggregatorFactory in project hive by apache.
the class DruidGroupByQueryRecordReader method initExtractors.
private void initExtractors() throws IOException {
extractors = new Extract[query.getAggregatorSpecs().size() + query.getPostAggregatorSpecs().size()];
int counter = 0;
for (int i = 0; i < query.getAggregatorSpecs().size(); i++, counter++) {
AggregatorFactory af = query.getAggregatorSpecs().get(i);
switch(af.getTypeName().toUpperCase()) {
case DruidSerDeUtils.FLOAT_TYPE:
extractors[counter] = Extract.FLOAT;
break;
case DruidSerDeUtils.LONG_TYPE:
extractors[counter] = Extract.LONG;
break;
default:
throw new IOException("Type not supported");
}
}
for (int i = 0; i < query.getPostAggregatorSpecs().size(); i++, counter++) {
extractors[counter] = Extract.FLOAT;
}
}
use of io.druid.query.aggregation.AggregatorFactory in project hive by apache.
the class DruidSerDe method inferSchema.
/* GroupBy query */
private void inferSchema(GroupByQuery query, List<String> columnNames, List<PrimitiveTypeInfo> columnTypes) {
// Timestamp column
columnNames.add(DruidTable.DEFAULT_TIMESTAMP_COLUMN);
columnTypes.add(TypeInfoFactory.timestampTypeInfo);
// Dimension columns
for (DimensionSpec ds : query.getDimensions()) {
columnNames.add(ds.getOutputName());
columnTypes.add(TypeInfoFactory.stringTypeInfo);
}
// Aggregator columns
for (AggregatorFactory af : query.getAggregatorSpecs()) {
columnNames.add(af.getName());
columnTypes.add(DruidSerDeUtils.convertDruidToHiveType(af.getTypeName()));
}
// different types for post-aggregation functions
for (PostAggregator pa : query.getPostAggregatorSpecs()) {
columnNames.add(pa.getName());
columnTypes.add(TypeInfoFactory.floatTypeInfo);
}
}
Aggregations