use of io.druid.query.aggregation.CountAggregatorFactory in project druid by druid-io.
the class QuantileSqlAggregatorTest method setUp.
@Before
public void setUp() throws Exception {
Calcites.setSystemProperties();
// Note: this is needed in order to properly register the serde for Histogram.
new ApproximateHistogramDruidModule().configure(null);
final QueryableIndex index = IndexBuilder.create().tmpDir(temporaryFolder.newFolder()).indexMerger(TestHelper.getTestIndexMergerV9()).schema(new IncrementalIndexSchema.Builder().withMetrics(new AggregatorFactory[] { new CountAggregatorFactory("cnt"), new DoubleSumAggregatorFactory("m1", "m1"), new ApproximateHistogramAggregatorFactory("hist_m1", "m1", null, null, null, null) }).withRollup(false).build()).rows(CalciteTests.ROWS1).buildMMappedIndex();
walker = new SpecificSegmentsQuerySegmentWalker(CalciteTests.queryRunnerFactoryConglomerate()).add(DataSegment.builder().dataSource(DATA_SOURCE).interval(index.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).build(), index);
final PlannerConfig plannerConfig = new PlannerConfig();
final SchemaPlus rootSchema = Calcites.createRootSchema(CalciteTests.createMockSchema(walker, plannerConfig));
final DruidOperatorTable operatorTable = new DruidOperatorTable(ImmutableSet.<SqlAggregator>of(new QuantileSqlAggregator()), ImmutableSet.<SqlExtractionOperator>of());
plannerFactory = new PlannerFactory(rootSchema, walker, operatorTable, plannerConfig);
}
use of io.druid.query.aggregation.CountAggregatorFactory in project druid by druid-io.
the class SchemaEvolutionTest method testNumericEvolutionFiltering.
@Test
public void testNumericEvolutionFiltering() {
final TimeseriesQueryRunnerFactory factory = QueryRunnerTestHelper.newTimeseriesQueryRunnerFactory();
// "c1" changes from string(1) -> long(2) -> float(3) -> nonexistent(4)
// test behavior of filtering
final TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource(DATA_SOURCE).intervals("1000/3000").filters(new BoundDimFilter("c1", "9", "11", false, false, null, null, StringComparators.NUMERIC)).aggregators(ImmutableList.of(new LongSumAggregatorFactory("a", "c1"), new DoubleSumAggregatorFactory("b", "c1"), new CountAggregatorFactory("c"))).build();
// Only string(1) -- which we can filter but not aggregate
Assert.assertEquals(timeseriesResult(ImmutableMap.of("a", 0L, "b", 0.0, "c", 2L)), runQuery(query, factory, ImmutableList.of(index1)));
// Only long(2) -- which we can filter and aggregate
Assert.assertEquals(timeseriesResult(ImmutableMap.of("a", 19L, "b", 19.0, "c", 2L)), runQuery(query, factory, ImmutableList.of(index2)));
// Only float(3) -- which we can't filter, but can aggregate
Assert.assertEquals(timeseriesResult(ImmutableMap.of("a", 19L, "b", 19.100000381469727, "c", 2L)), runQuery(query, factory, ImmutableList.of(index3)));
// Only nonexistent(4)
Assert.assertEquals(timeseriesResult(ImmutableMap.of("a", 0L, "b", 0.0, "c", 0L)), runQuery(query, factory, ImmutableList.of(index4)));
// string(1) + long(2) + float(3) + nonexistent(4)
Assert.assertEquals(timeseriesResult(ImmutableMap.of("a", 38L, "b", 38.10000038146973, "c", 6L)), runQuery(query, factory, ImmutableList.of(index1, index2, index3, index4)));
}
use of io.druid.query.aggregation.CountAggregatorFactory in project druid by druid-io.
the class SchemaEvolutionTest method setUp.
@Before
public void setUp() throws IOException {
// Index1: c1 is a string, c2 nonexistent, "uniques" nonexistent
index1 = IndexBuilder.create().tmpDir(temporaryFolder.newFolder()).schema(new IncrementalIndexSchema.Builder().withMetrics(new AggregatorFactory[] { new CountAggregatorFactory("cnt") }).withRollup(false).build()).rows(inputRowsWithDimensions(ImmutableList.of("c1"))).buildMMappedIndex();
// Index2: c1 is a long, c2 is a string, "uniques" is uniques on c2
index2 = IndexBuilder.create().tmpDir(temporaryFolder.newFolder()).schema(new IncrementalIndexSchema.Builder().withMetrics(new AggregatorFactory[] { new CountAggregatorFactory("cnt"), new LongSumAggregatorFactory("c1", "c1"), new HyperUniquesAggregatorFactory("uniques", "c2") }).withRollup(false).build()).rows(inputRowsWithDimensions(ImmutableList.of("c2"))).buildMMappedIndex();
// Index3: c1 is a float, c2 is a string, "uniques" is uniques on c2
index3 = IndexBuilder.create().tmpDir(temporaryFolder.newFolder()).schema(new IncrementalIndexSchema.Builder().withMetrics(new AggregatorFactory[] { new CountAggregatorFactory("cnt"), new DoubleSumAggregatorFactory("c1", "c1"), new HyperUniquesAggregatorFactory("uniques", "c2") }).withRollup(false).build()).rows(inputRowsWithDimensions(ImmutableList.of("c2"))).buildMMappedIndex();
// Index4: c1 is nonexistent, c2 is uniques on c2
index4 = IndexBuilder.create().tmpDir(temporaryFolder.newFolder()).schema(new IncrementalIndexSchema.Builder().withMetrics(new AggregatorFactory[] { new HyperUniquesAggregatorFactory("c2", "c2") }).withRollup(false).build()).rows(inputRowsWithDimensions(ImmutableList.<String>of())).buildMMappedIndex();
if (index4.getAvailableDimensions().size() != 0) {
// Just double-checking that the exclusions are working properly
throw new ISE("WTF?! Expected no dimensions in index4");
}
}
use of io.druid.query.aggregation.CountAggregatorFactory in project druid by druid-io.
the class DefaultOfflineAppenderatorFactoryTest method testBuild.
@Test
public void testBuild() throws IOException, SegmentNotWritableException {
Injector injector = Initialization.makeInjectorWithModules(GuiceInjectors.makeStartupInjector(), ImmutableList.<Module>of(new Module() {
@Override
public void configure(Binder binder) {
binder.bindConstant().annotatedWith(Names.named("serviceName")).to("druid/tool");
binder.bindConstant().annotatedWith(Names.named("servicePort")).to(9999);
binder.bind(DruidProcessingConfig.class).toInstance(new DruidProcessingConfig() {
@Override
public String getFormatString() {
return "processing-%s";
}
@Override
public int intermediateComputeSizeBytes() {
return 100 * 1024 * 1024;
}
@Override
public int getNumThreads() {
return 1;
}
@Override
public int columnCacheSizeBytes() {
return 25 * 1024 * 1024;
}
});
binder.bind(ColumnConfig.class).to(DruidProcessingConfig.class);
}
}));
ObjectMapper objectMapper = injector.getInstance(ObjectMapper.class);
AppenderatorFactory defaultOfflineAppenderatorFactory = objectMapper.reader(AppenderatorFactory.class).readValue("{\"type\":\"offline\"}");
final Map<String, Object> parserMap = objectMapper.convertValue(new MapInputRowParser(new JSONParseSpec(new TimestampSpec("ts", "auto", null), new DimensionsSpec(null, null, null), null, null)), Map.class);
DataSchema schema = new DataSchema("dataSourceName", parserMap, new AggregatorFactory[] { new CountAggregatorFactory("count"), new LongSumAggregatorFactory("met", "met") }, new UniformGranularitySpec(Granularities.MINUTE, Granularities.NONE, null), objectMapper);
RealtimeTuningConfig tuningConfig = new RealtimeTuningConfig(75000, null, null, temporaryFolder.newFolder(), null, null, null, null, null, null, 0, 0, null, null);
try (Appenderator appenderator = defaultOfflineAppenderatorFactory.build(schema, tuningConfig, new FireDepartmentMetrics())) {
Assert.assertEquals("dataSourceName", appenderator.getDataSource());
Assert.assertEquals(null, appenderator.startJob());
SegmentIdentifier identifier = new SegmentIdentifier("dataSourceName", new Interval("2000/2001"), "A", new LinearShardSpec(0));
Assert.assertEquals(0, ((AppenderatorImpl) appenderator).getRowsInMemory());
appenderator.add(identifier, AppenderatorTest.IR("2000", "bar", 1), Suppliers.ofInstance(Committers.nil()));
Assert.assertEquals(1, ((AppenderatorImpl) appenderator).getRowsInMemory());
appenderator.add(identifier, AppenderatorTest.IR("2000", "baz", 1), Suppliers.ofInstance(Committers.nil()));
Assert.assertEquals(2, ((AppenderatorImpl) appenderator).getRowsInMemory());
appenderator.close();
Assert.assertEquals(0, ((AppenderatorImpl) appenderator).getRowsInMemory());
}
}
use of io.druid.query.aggregation.CountAggregatorFactory in project druid by druid-io.
the class SinkTest method testSwap.
@Test
public void testSwap() throws Exception {
final DataSchema schema = new DataSchema("test", null, new AggregatorFactory[] { new CountAggregatorFactory("rows") }, new UniformGranularitySpec(Granularities.HOUR, Granularities.MINUTE, null), new DefaultObjectMapper());
final Interval interval = new Interval("2013-01-01/2013-01-02");
final String version = new DateTime().toString();
RealtimeTuningConfig tuningConfig = new RealtimeTuningConfig(100, new Period("P1Y"), null, null, null, null, null, null, null, null, 0, 0, null, null);
final Sink sink = new Sink(interval, schema, tuningConfig.getShardSpec(), version, tuningConfig.getMaxRowsInMemory(), tuningConfig.isReportParseExceptions());
sink.add(new InputRow() {
@Override
public List<String> getDimensions() {
return Lists.newArrayList();
}
@Override
public long getTimestampFromEpoch() {
return new DateTime("2013-01-01").getMillis();
}
@Override
public DateTime getTimestamp() {
return new DateTime("2013-01-01");
}
@Override
public List<String> getDimension(String dimension) {
return Lists.newArrayList();
}
@Override
public float getFloatMetric(String metric) {
return 0;
}
@Override
public long getLongMetric(String metric) {
return 0L;
}
@Override
public Object getRaw(String dimension) {
return null;
}
@Override
public int compareTo(Row o) {
return 0;
}
});
FireHydrant currHydrant = sink.getCurrHydrant();
Assert.assertEquals(new Interval("2013-01-01/PT1M"), currHydrant.getIndex().getInterval());
FireHydrant swapHydrant = sink.swap();
sink.add(new InputRow() {
@Override
public List<String> getDimensions() {
return Lists.newArrayList();
}
@Override
public long getTimestampFromEpoch() {
return new DateTime("2013-01-01").getMillis();
}
@Override
public DateTime getTimestamp() {
return new DateTime("2013-01-01");
}
@Override
public List<String> getDimension(String dimension) {
return Lists.newArrayList();
}
@Override
public float getFloatMetric(String metric) {
return 0;
}
@Override
public long getLongMetric(String metric) {
return 0L;
}
@Override
public Object getRaw(String dimension) {
return null;
}
@Override
public int compareTo(Row o) {
return 0;
}
});
Assert.assertEquals(currHydrant, swapHydrant);
Assert.assertNotSame(currHydrant, sink.getCurrHydrant());
Assert.assertEquals(new Interval("2013-01-01/PT1M"), sink.getCurrHydrant().getIndex().getInterval());
Assert.assertEquals(2, Iterators.size(sink.iterator()));
}
Aggregations